diff --git a/acuity-metadata.json b/acuity-metadata.json new file mode 100644 index 00000000000..4d652a51092 --- /dev/null +++ b/acuity-metadata.json @@ -0,0 +1,2630 @@ +[ + { + "name": "a_times_b_plus_c", + "attributes": [], + "inputs": [ + { "name": "A" }, + { "name": "B" }, + { "name": "C" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "abs", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "add", + "attributes": [], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "constants": [], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "addn", + "attributes": [], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "argmin", + "attributes": [ + { "name": "axis", "default": -1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "base_input_layer", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "batch2space", + "attributes": [ + { "name": "block_shape", "default": [ 2, 2 ] }, + { "name": "block_crops", "default": [ [ 0, 0 ], [ 0, 0 ] ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "batchnorm_single", + "category": "Normalization", + "attributes": [ + { "name": "eps", "default": 0.0001 } + ], + "inputs": [ + { "name": "input" }, + { "name": "mean" }, + { "name": "variance" } + ], + "constants": [ + { "name": "bias" }, + { "name": "scale" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "batchnormalize", + "category": "Normalization", + "attributes": [ + { "name": "eps", "default": 0.0001 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "beta" }, + { "name": "gamma" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "capsule_norm", + "category": "Normalization", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "cast", + "attributes": [ + { "name": "in_data_type", "default": 0 }, + { "name": "out_data_type", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "clipbyvalue", + "attributes": [ + { "name": "clip_value_min", "default": 0 }, + { "name": "clip_value_max", "default": 255 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "concat", + "category": "Tensor", + "attributes": [ + { "name": "dim", "default": 1 } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "concatshift", + "category": "Tensor", + "attributes": [ + { "name": "dim", "default": 1 }, + { "name": "keep_size", "default": 1 } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "data" }, + { "name": "shifted_data" } + ] + }, + { + "name": "continuationindicator", + "attributes": [ + { "name": "time_step", "default": 0 }, + { "name": "batch_size", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "conv1d", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "group_number", "default": 1 }, + { "name": "ksize", "default": 1 }, + { "name": "stride", "default": 1 }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1, 1 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "conv2d_op", + "category": "Layer", + "attributes": [ + { "name": "padding", "default": "VALID" }, + { "name": "group_number", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "dilation", "default": [ 1, 1, 1 ] }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "conv3d", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "bias", "default": false }, + { "name": "group_number", "default": 1 }, + { "name": "ksize_d", "default": 1 }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_d", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "pad_method", "default": "padding_const" }, + { "name": "pad", "default": [ 0, 0, 0, 0, 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1, 1, 1, 1 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "convolution", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "group_number", "default": 1 }, + { "name": "regularize", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "dilation", "default": [ 1, 1, 1, 1 ] }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 }, + { "name": "multiplier", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "crop_image", + "attributes": [ + { "name": "crop_size", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "cropandresize", + "category": "Layer", + "attributes": [ + { "name": "num_crop_boxes", "default": 0 }, + { "name": "crop_size", "default": [] }, + { "name": "resize_method", "default": "bilinear" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "boxes" }, + { "name": "box_ind" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ctc_loss_layer", + "attributes": [ + { "name": "time_major", "default": false } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "customlayer", + "attributes": [], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "deconvolution", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "group_number", "default": 1 }, + { "name": "regularize", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 }, + { "name": "output_shape", "default": [] }, + { "name": "output_padding_h", "default": 0 }, + { "name": "output_padding_w", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "depth2space", + "attributes": [ + { "name": "block_size", "default": 2 }, + { "name": "mode", "default": "DCR" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "depthwise_conv1d", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "group_number", "default": 2 }, + { "name": "ksize", "default": 1 }, + { "name": "stride", "default": 1 }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1, 1 ] }, + { "name": "multiplier", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "depthwise_conv2d_op", + "category": "Layer", + "attributes": [ + { "name": "padding", "default": "VALID" }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "dilation", "default": [ 1, 1, 1, 1 ] }, + { "name": "multiplier", "default": 1 }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "depthwise_convolution", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "regularize", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "dilation", "default": [ 1, 1, 1, 1 ] }, + { "name": "multiplier", "default": 1 }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "dequantize", + "category": "Layer", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "detectionevaluate", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" }, + { "name": "out2" }, + { "name": "out3" }, + { "name": "out4" } + ] + }, + { + "name": "detectionoutput", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" }, + { "name": "in2" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "digit_capsule", + "attributes": [ + { "name": "num_output", "default": 1 }, + { "name": "vec_len", "default": 1 }, + { "name": "iterations", "default": 3 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "divide", + "attributes": [], + "inputs": [ + { "name": "dividend" }, + { "name": "divisor" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "dropout", + "category": "Dropout", + "attributes": [ + { "name": "ratio", "default": 0.5 }, + { "name": "scale_train", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "dtype_converter", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "eltwise", + "attributes": [ + { "name": "operation", "default": "SUM" }, + { "name": "coeff", "default": "" } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "elu", + "category": "Activation", + "attributes": [ + { "name": "alpha", "default": 0.1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "embedding_lookup", + "category": "Embedding", + "attributes": [ + { "name": "partition_strategy", "default": "mod" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "embedding_params" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "equal", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "exp", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "expand_broadcast", + "attributes": [ + { "name": "shape", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "expanddims", + "attributes": [ + { "name": "dim", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "flatten", + "attributes": [ + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "floor", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "floor_div", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "fullconnect", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "bias", "default": true }, + { "name": "regularize", "default": false }, + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "fullconnect_op", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "bias", "default": true }, + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ], + "constants": [ + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "gather", + "category": "Transform", + "attributes": [ + { "name": "axis", "default": 0 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "gathernd", + "category": "Layer", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "generator_input_layer", + "category": "Data", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "greater", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "greater_equal", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "group_conv1d", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "padding", "default": "VALID" }, + { "name": "bias", "default": true }, + { "name": "group_number", "default": 2 }, + { "name": "ksize", "default": 1 }, + { "name": "stride", "default": 1 }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1, 1 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "gru", + "category": "Layer", + "attributes": [ + { "name": "num_units", "default": 1 }, + { "name": "time_major", "default": true }, + { "name": "bias", "default": true }, + { "name": "activation", "default": "tanh" }, + { "name": "recurrent_activation", "default": "sigmoid" }, + { "name": "return_sequences", "default": true }, + { "name": "direction", "default": "forward" }, + { "name": "linear_before_reset", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "in_hstat" } + ], + "constants": [ + { "name": "gates_kernel" }, + { "name": "gates_bias" }, + { "name": "candidate_kernel" }, + { "name": "candidate_bias" } + ], + "outputs": [ + { "name": "output" }, + { "name": "out_hstat" } + ] + }, + { + "name": "gru_cell", + "category": "Layer", + "attributes": [ + { "name": "num_units", "default": 1 }, + { "name": "activation", "default": "tanh" }, + { "name": "recurrent_activation", "default": "sigmoid" }, + { "name": "linear_before_reset", "default": 0 }, + { "name": "cudnn_implementation", "default": false } + ], + "inputs": [ + { "name": "data" }, + { "name": "hstat" }, + { "name": "cond_reset" }, + { "name": "cond_update" }, + { "name": "cond_candidate" } + ], + "constants": [ + { "name": "gates_kernel" }, + { "name": "gates_bias" }, + { "name": "candidate_kernel" }, + { "name": "candidate_bias" } + ], + "outputs": [ + { "name": "data" }, + { "name": "hstat" } + ] + }, + { + "name": "gru_keras", + "category": "Layer", + "attributes": [ + { "name": "units", "default": 1 }, + { "name": "activation", "default": "tanh" }, + { "name": "recurrent_activation", "default": "hard_sigmoid" }, + { "name": "use_bias", "default": true }, + { "name": "return_sequences", "default": false }, + { "name": "return_state", "default": false }, + { "name": "go_backwards", "default": false }, + { "name": "stateful", "default": false }, + { "name": "reset_after", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "kernel" }, + { "name": "recurrent_kernel" }, + { "name": "bias" } + ], + "outputs": [] + }, + { + "name": "h5_input_layer", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "hard_swish", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "image_resize", + "category": "Layer", + "attributes": [ + { "name": "type", "default": "bilinear" }, + { "name": "new_size", "default": [] }, + { "name": "align_corners", "default": false }, + { "name": "half_pixel", "default": false }, + { "name": "size_factors", "default": null } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "image_transform", + "category": "Layer", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "input", + "category": "Data", + "attributes": [ + { "name": "size", "default": "" }, + { "name": "channels", "default": 1 }, + { "name": "shape", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "instancenormalize", + "category": "Normalization", + "attributes": [ + { "name": "eps", "default": 0.0001 }, + { "name": "axis", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "bias" }, + { "name": "scale" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "keras_rnn_lstm", + "category": "Layer", + "attributes": [ + { "name": "cell", "default": null }, + { "name": "go_backwards", "default": false }, + { "name": "return_sequences", "default": false }, + { "name": "return_state", "default": false }, + { "name": "stateful", "default": false }, + { "name": "unroll", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "kernel" }, + { "name": "recurrent_kernel" }, + { "name": "bias" } + ], + "outputs": [] + }, + { + "name": "l2normalize", + "category": "Normalization", + "attributes": [ + { "name": "l2n_dim", "default": null }, + { "name": "eps", "default": 1e-12 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "l2normalizescale", + "category": "Normalization", + "attributes": [ + { "name": "l2n_dim", "default": null }, + { "name": "eps", "default": 1e-12 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "scale" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "l2pooling", + "category": "Pool", + "attributes": [ + { "name": "padding", "default": "VALID" }, + { "name": "type", "default": "MAX" }, + { "name": "global_pooling", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "round_type", "default": "ceil" }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "layernormalize", + "category": "Normalization", + "attributes": [ + { "name": "eps", "default": 0.0001 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "bias" }, + { "name": "scale" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "leakyrelu", + "category": "Activation", + "attributes": [ + { "name": "leaky_ratio", "default": 0.1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "less", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "less_equal", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "lmdb_input_layer", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" } + ] + }, + { + "name": "localresponsenormalization", + "category": "Normalization", + "attributes": [ + { "name": "local_size", "default": 1 }, + { "name": "bias", "default": 2 }, + { "name": "alpha", "default": 0.0001 }, + { "name": "beta", "default": 0.75 }, + { "name": "type", "default": "NORM_ACROSS_CHANNELS" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "localresponsenormalization_tf", + "category": "Normalization", + "attributes": [ + { "name": "local_size", "default": 1 }, + { "name": "bias", "default": 2 }, + { "name": "alpha", "default": 0.0001 }, + { "name": "beta", "default": 0.75 }, + { "name": "type", "default": "NORM_ACROSS_CHANNELS" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "log", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "log_softmax", + "category": "Activation", + "attributes": [ + { "name": "sf_axis", "default": -1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "logical_and", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "logical_or", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "lstm", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "time_major", "default": true }, + { "name": "forget_bias", "default": 1 }, + { "name": "activation", "default": "tanh" }, + { "name": "use_cifg", "default": false }, + { "name": "use_peepholes", "default": false }, + { "name": "num_proj", "default": null }, + { "name": "cell_clip", "default": 0 }, + { "name": "proj_clip", "default": 0 }, + { "name": "recurrent_activation", "default": "sigmoid" }, + { "name": "return_sequences", "default": true } + ], + "inputs": [ + { "name": "data" }, + { "name": "hstat" }, + { "name": "cstat" } + ], + "constants": [ + { "name": "lstm_cell_kernel" }, + { "name": "lstm_cell_bias" }, + { "name": "weight_proj" }, + { "name": "bias_proj" } + ], + "outputs": [ + { "name": "data" }, + { "name": "hstat" }, + { "name": "cstat" } + ] + }, + { + "name": "lstm_keras", + "category": "Layer", + "attributes": [ + { "name": "units", "default": 1 }, + { "name": "activation", "default": "tanh" }, + { "name": "recurrent_activation", "default": "hard_sigmoid" }, + { "name": "use_bias", "default": true }, + { "name": "return_sequences", "default": false }, + { "name": "return_state", "default": false }, + { "name": "go_backwards", "default": false }, + { "name": "stateful", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "kernel" }, + { "name": "recurrent_kernel" }, + { "name": "bias" } + ], + "outputs": [] + }, + { + "name": "lstmunit", + "category": "Layer", + "attributes": [ + { "name": "weights", "default": 1 }, + { "name": "num_proj", "default": null }, + { "name": "forget_bias", "default": 1 }, + { "name": "cell_clip", "default": 0 }, + { "name": "proj_clip", "default": 0 }, + { "name": "activation", "default": "tanh" }, + { "name": "use_layer_norm_lstm", "default": false }, + { "name": "use_cifg", "default": false } + ], + "inputs": [ + { "name": "data" }, + { "name": "hstat" }, + { "name": "cstat" } + ], + "constants": [ + { "name": "wi" }, + { "name": "wf" }, + { "name": "wc" }, + { "name": "wo" }, + { "name": "hi" }, + { "name": "hf" }, + { "name": "hc" }, + { "name": "ho" }, + { "name": "bi" }, + { "name": "bf" }, + { "name": "bc" }, + { "name": "bo" }, + { "name": "wp" }, + { "name": "bp" }, + { "name": "ln_i" }, + { "name": "ln_f" }, + { "name": "ln_c" }, + { "name": "ln_o" } + ], + "outputs": [ + { "name": "data" }, + { "name": "hstat" }, + { "name": "cstat" } + ] + }, + { + "name": "margin_loss_layer", + "attributes": [ + { "name": "margin", "default": 0.4 }, + { "name": "downweight", "default": 0.5 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "mat_inverse", + "attributes": [ + { "name": "adjoint", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "matmul", + "attributes": [ + { "name": "transpose_a", "default": false }, + { "name": "transpose_b", "default": false } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "minimum", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "minimum_with_clip", + "attributes": [ + { "name": "clip", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "mish", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "moments", + "attributes": [ + { "name": "axis_list", "default": [] }, + { "name": "keep_dims", "default": true } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "mean" }, + { "name": "variance" } + ] + }, + { + "name": "multiply", + "attributes": [ + { "name": "axis", "default": 1 }, + { "name": "bias", "default": true } + ], + "inputs": [], + "constants": [ + { "name": "scale" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "nce_loss", + "attributes": [ + { "name": "num_sampled", "default": 1 }, + { "name": "num_classes", "default": 1 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [ + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "neg", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "noop", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "noop_multi_out", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [] + }, + { + "name": "norm_with_channel_mean", + "category": "Normalization", + "attributes": [ + { "name": "mean", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "norm_with_min_max", + "category": "Normalization", + "attributes": [ + { "name": "min_value", "default": 0 }, + { "name": "max_value", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "norm_with_scale", + "category": "Normalization", + "attributes": [ + { "name": "scale", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "not_equal", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "npy_input_layer", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "output", + "category": "Data", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "pad", + "category": "Tensor", + "attributes": [ + { "name": "padding_value", "default": [] }, + { "name": "padding_mode", "default": "CONSTANT" }, + { "name": "padding_const", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "permute", + "category": "Shape", + "attributes": [ + { "name": "perm", "default": [ 0 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "pool3d", + "category": "Pool", + "attributes": [ + { "name": "type", "default": "MAX" }, + { "name": "global_pooling", "default": false }, + { "name": "ksize_d", "default": 1 }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_d", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "round_type", "default": "ceil" }, + { "name": "pad_method", "default": "padding_const" }, + { "name": "pad", "default": [ 0, 0, 0, 0, 0, 0 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "pooling", + "category": "Pool", + "attributes": [ + { "name": "padding", "default": "VALID" }, + { "name": "type", "default": "MAX" }, + { "name": "global_pooling", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "round_type", "default": "ceil" }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "poolwithargmax", + "category": "Pool", + "attributes": [ + { "name": "padding", "default": "VALID" }, + { "name": "type", "default": "MAX" }, + { "name": "global_pooling", "default": false }, + { "name": "ksize_h", "default": 1 }, + { "name": "ksize_w", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "round_type", "default": "ceil" }, + { "name": "pad_method", "default": "auto" }, + { "name": "pad", "default": [ 0, 0, 0, 0 ] }, + { "name": "pad_h_b", "default": 0 }, + { "name": "pad_w_r", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" } + ] + }, + { + "name": "postprocess", + "attributes": [ + { "name": "perm", "default": [ 0, 1, 2, 3 ] }, + { "name": "dim_num", "default": 4 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "pow", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "prelu", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "a" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "preprocess", + "attributes": [ + { "name": "type", "default": "VSI_NN_OP_PRE_PROCESS_RGB" }, + { "name": "left", "default": 0 }, + { "name": "top", "default": 0 }, + { "name": "width", "default": 244 }, + { "name": "height", "default": 224 }, + { "name": "mean", "default": [ 0, 0, 0 ] }, + { "name": "scale", "default": 1 }, + { "name": "perm", "default": [ 0, 1, 2, 3 ] }, + { "name": "in_dim_num", "default": 4 }, + { "name": "out_dim_num", "default": 4 }, + { "name": "out_size", "default": [ 224, 224, 3, 1 ] }, + { "name": "reverse_channel", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "primary_capsule", + "attributes": [ + { "name": "num_output", "default": 1 }, + { "name": "vec_len", "default": 1 }, + { "name": "strides", "default": [ 1, 1 ] }, + { "name": "ksize", "default": [ 1, 1 ] }, + { "name": "padding", "default": "SAME" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "priorbox", + "attributes": [ + { "name": "mini_size", "default": "" }, + { "name": "max_size", "default": "" }, + { "name": "aspect_ratio", "default": "" }, + { "name": "flip", "default": "" }, + { "name": "clip", "default": "" }, + { "name": "variance", "default": "0.1" }, + { "name": "image_size", "default": 0 }, + { "name": "image_h", "default": 0 }, + { "name": "image_w", "default": 0 }, + { "name": "step", "default": 0 }, + { "name": "step_h", "default": 0 }, + { "name": "step_w", "default": 0 }, + { "name": "offset", "default": 0.5 } + ], + "inputs": [ + { "name": "data" }, + { "name": "shape" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "proposal", + "attributes": [ + { "name": "feat_stride", "default": 16 }, + { "name": "anchor_scales", "default": "8 16 32" }, + { "name": "anchor_ratios", "default": "0.5 1 2" }, + { "name": "anchor_base_size", "default": 16 }, + { "name": "pre_nms_top_n", "default": 6000 }, + { "name": "post_nms_top_n", "default": 300 }, + { "name": "nms_thresh", "default": 0.7 }, + { "name": "min_size", "default": 16 }, + { "name": "im_info", "default": "800 600 1 1" }, + { "name": "has_bg", "default": true }, + { "name": "dynamic", "default": false } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" } + ] + }, + { + "name": "quantize", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "real_div", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reconstruction_loss", + "attributes": [ + { "name": "balance_factor", "default": 0.0005 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "recurrent", + "category": "Layer", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reducemax", + "attributes": [ + { "name": "axis_list", "default": null }, + { "name": "keep_dims", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reducemean", + "attributes": [ + { "name": "axis_list", "default": null }, + { "name": "keep_dims", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reducemin", + "attributes": [ + { "name": "axis_list", "default": null }, + { "name": "keep_dims", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reducesum", + "attributes": [ + { "name": "axis_list", "default": [] }, + { "name": "keep_dims", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "region", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "relu", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "relu_keras", + "category": "Activation", + "attributes": [ + { "name": "alpha", "default": 0 }, + { "name": "max_value", "default": "inf" }, + { "name": "threshold", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "relun", + "category": "Activation", + "attributes": [ + { "name": "relu_clamp_top", "default": "inf" }, + { "name": "relu_clamp_bottom", "default": "0" } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reorg", + "attributes": [ + { "name": "stride", "default": 2 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reshape", + "category": "Shape", + "attributes": [ + { "name": "shape", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "resizebilinear_image", + "attributes": [ + { "name": "new_size", "default": [] }, + { "name": "align_corners", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "resizenearest_image", + "attributes": [ + { "name": "new_size", "default": [] }, + { "name": "align_corners", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reverse", + "attributes": [ + { "name": "axis", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "reverse_sequence", + "attributes": [ + { "name": "seq_axis", "default": 1 }, + { "name": "batch_axis", "default": 2 } + ], + "inputs": [ + { "name": "input" }, + { "name": "seq_lengths" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "roipooling", + "category": "Pool", + "attributes": [ + { "name": "pooled_h", "default": 6 }, + { "name": "pooled_w", "default": 6 }, + { "name": "spatial_scale", "default": 0.0625 }, + { "name": "sampling_ratio", "default": 0 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "route_train", + "attributes": [], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "rsqrt", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "scatternd", + "attributes": [ + { "name": "shape", "default": [] } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "shuffle", + "attributes": [ + { "name": "group_number", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "sigmoid", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "signalframe", + "attributes": [ + { "name": "frame_length", "default": 0 }, + { "name": "frame_step", "default": 0 }, + { "name": "pad_end", "default": false }, + { "name": "pad_value", "default": 0 }, + { "name": "axis", "default": -1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "simplernn_keras", + "category": "Layer", + "attributes": [ + { "name": "units", "default": 1 }, + { "name": "activation", "default": "tanh" }, + { "name": "use_bias", "default": true }, + { "name": "return_sequences", "default": false }, + { "name": "return_state", "default": false }, + { "name": "go_backwards", "default": false }, + { "name": "stateful", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "kernel" }, + { "name": "recurrent_kernel" }, + { "name": "bias" } + ], + "outputs": [] + }, + { + "name": "sin", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "slice", + "category": "Tensor", + "attributes": [ + { "name": "begin", "default": [] }, + { "name": "size", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "softmax", + "category": "Activation", + "attributes": [ + { "name": "sf_axis", "default": -1 }, + { "name": "beta", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "softmax_with_logits_loss_layer", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "softrelu", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "space2batch", + "attributes": [ + { "name": "block_shape", "default": [ 2, 2 ] }, + { "name": "block_paddings", "default": [ [ 0, 0 ], [ 0, 0 ] ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "space2depth", + "attributes": [ + { "name": "block_size", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "split", + "category": "Tensor", + "attributes": [ + { "name": "dim", "default": 1 }, + { "name": "slices", "default": "" }, + { "name": "slices_tf", "default": "" }, + { "name": "unstack", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [] + }, + { + "name": "sqlite_input_layer", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" }, + { "name": "out2" } + ] + }, + { + "name": "sqrt", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "square", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "squashing", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "squeeze", + "attributes": [ + { "name": "axis_list", "default": null }, + { "name": "name", "default": null } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "stack", + "attributes": [ + { "name": "axis", "default": 0 } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "stack_concat", + "attributes": [ + { "name": "shape", "default": [ 1, 32, 256 ] }, + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" }, + { "name": "in2" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "stridedslice", + "category": "Tensor", + "attributes": [ + { "name": "slice_begin_mask", "default": 0 }, + { "name": "slice_end_mask", "default": 0 }, + { "name": "slice_ellipsis_mask", "default": 0 }, + { "name": "slice_new_axis_mask", "default": 0 }, + { "name": "slice_shrink_axis_mask", "default": 0 }, + { "name": "slice_begin", "default": [ 0, 0, 0, 0 ] }, + { "name": "slice_end", "default": [ -1, -1, -1, -1 ] }, + { "name": "slice_strides", "default": [ 1, 1, 1, 1 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "subgraph", + "attributes": [ + { "name": "sg_argv", "default": "" }, + { "name": "sg_func", "default": "" }, + { "name": "sg_out_shapes", "default": "" }, + { "name": "sg_graph_buffer", "default": "" }, + { "name": "sg_input_nodes", "default": "" }, + { "name": "sg_output_nodes", "default": "" } + ], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "subtract", + "attributes": [], + "inputs": [ + { "name": "minuend" }, + { "name": "subtrahend" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "svdf", + "attributes": [ + { "name": "rank", "default": 1 }, + { "name": "num_units", "default": 1 }, + { "name": "spectrogram_length", "default": 1 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [ + { "name": "weights_feature" }, + { "name": "weights_time" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "swish", + "category": "Activation", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "tanh", + "category": "Activation", + "attributes": [ + { "name": "hyperbolic_tan_scale_a", "default": 1 }, + { "name": "hyperbolic_tan_scale_b", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "text_input_layer", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] } + ], + "inputs": [], + "constants": [], + "outputs": [] + }, + { + "name": "tile", + "attributes": [ + { "name": "multiples", "default": [] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "topk", + "attributes": [ + { "name": "topk", "default": 1 } + ], + "inputs": [ + { "name": "in0" }, + { "name": "in1" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "topk_score", + "attributes": [ + { "name": "topk", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" } + ] + }, + { + "name": "unstack", + "attributes": [ + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [] + }, + { + "name": "upsampling", + "category": "Layer", + "attributes": [ + { "name": "factor", "default": 2 } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "variable", + "category": "Data", + "attributes": [ + { "name": "shape", "default": [ 1 ] } + ], + "inputs": [ + { "name": "input" } + ], + "constants": [ + { "name": "data" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "where", + "attributes": [], + "inputs": [ + { "name": "in0" }, + { "name": "in1" }, + { "name": "in2" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "word2vec_input", + "attributes": [ + { "name": "database", "default": "" }, + { "name": "shapes", "default": [] }, + { "name": "sparse_tensors", "default": [] }, + { "name": "data_types", "default": [] }, + { "name": "dictionary", "default": "" }, + { "name": "model", "default": "skip-gram" }, + { "name": "num_skips", "default": 2 }, + { "name": "skip_window", "default": 1 } + ], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "out0" }, + { "name": "out1" } + ] + }, + { + "name": "yolo", + "attributes": [], + "inputs": [ + { "name": "input" } + ], + "constants": [], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "yoloprocess", + "attributes": [], + "inputs": [], + "constants": [], + "outputs": [ + { "name": "output" } + ] + } +] \ No newline at end of file diff --git a/acuity.js b/acuity.js new file mode 100644 index 00000000000..67b15d44e3e --- /dev/null +++ b/acuity.js @@ -0,0 +1,551 @@ + +const acuity = {}; + +acuity.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension === 'json') { + const obj = context.peek('json'); + if (obj && obj.MetaData && obj.Layers) { + return obj; + } + } + return null; + } + + async open(context, target) { + const metadata = await context.metadata('acuity-metadata.json'); + return new acuity.Model(metadata, target); + } +}; + +acuity.Model = class { + + constructor(metadata, model, data, quantization) { + this.name = model.MetaData.Name; + this.format = `Acuity v${model.MetaData.AcuityVersion}`; + this.runtime = model.MetaData.Platform; + this.graphs = [ new acuity.Graph(metadata, model, data, quantization) ]; + } +}; + +acuity.Graph = class { + + constructor(metadata, model) { + this.nodes = []; + this.inputs = []; + this.outputs = []; + const values = new Map(); + const value = (name) => { + if (!values.has(name)) { + values.set(name, { name: name, shape: null }); + } + return values.get(name); + }; + for (const [name, layer] of Object.entries(model.Layers)) { + layer.inputs = layer.inputs.map((input) => { + return value(input); + }); + layer.outputs = layer.outputs.map((port) => { + const output = value(`@${name}:${port}`); + let shape = null; + if (layer.op.toLowerCase() == 'input' || + layer.op.toLowerCase() == 'variable') { + if (Object.prototype.hasOwnProperty.call(layer.parameters, 'shape') && layer.parameters.shape.length > 0) { + shape = layer.parameters.shape; + } else if (Object.prototype.hasOwnProperty.call(layer.parameters, 'size') && Object.prototype.hasOwnProperty.call(layer.parameters, 'channels')) { + const sizes = layer.parameters.size.split(' '); + shape = [0, parseInt(sizes[0]), parseInt(sizes[1]), layer.parameters.channels]; + } + if (shape && shape.length === 4 && shape[0] === 0) { + shape[0] = 1; + } + } + output.shape = shape; + return output; + }); + } + acuity.Inference.infer(model.Layers); + for (const [name, obj] of values) { + const type = new acuity.TensorType(null, new acuity.TensorShape(obj.shape)); + const value = new acuity.Value(name, type, null, null); + values.set(name, value); + } + for (const [name, layer] of Object.entries(model.Layers)) { + switch (layer.op.toLowerCase()) { + case 'input': { + const value = values.get(layer.outputs[0].name); + const argument = new acuity.Argument(name, [ value ]); + this.inputs.push(argument); + break; + } + case 'output': { + const value = values.get(layer.inputs[0].name); + const argument = new acuity.Argument(name, [ value ]); + this.outputs.push(argument); + break; + } + default: { + const node = new acuity.Node(metadata, name, layer, values); + this.nodes.push(node); + break; + } + } + } + } +}; + +acuity.Node = class { + + constructor(metadata, name, layer, values) { + const op = layer.op; + this.name = name; + this.type = metadata.type(op) || { name: op }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + if (this.type) { + if (layer.parameters) { + for (const [name, value] of Object.entries(layer.parameters)) { + const meta = metadata.attribute(op, name); + const type = meta && meta.type ? meta.type : null; + const visible = meta && meta.default !== undefined && meta.default === value ? false : true; + const attribute = new acuity.Argument(name, value, type, visible); + this.attributes.push(attribute); + } + } + } + for (let i = 0; i < layer.inputs.length; i++) { + const input = layer.inputs[i]; + const value = values.get(input.name); + const name = this.type && this.type.inputs && i < this.type.inputs.length ? this.type.inputs[i].name : `input${i}`; + const argument = new acuity.Argument(name, [ value ]); + this.inputs.push(argument); + } + + if (this.type && this.type.constants) { + for (const constant of this.type.constants) { + // const name = "@" + this.name + ":" + constant.name; + const type = new acuity.TensorType(null, new acuity.TensorShape(null)); + const value = new acuity.Value('', type, null, new acuity.Tensor(type)); + const argument = new acuity.Argument(constant.name, [ value ]); + this.inputs.push(argument); + } + } + + for (let i = 0; i < layer.outputs.length; i++) { + const output = layer.outputs[i]; + const value = values.get(output.name); + const name = this.type && this.type.outputs && i < this.type.outputs.length ? this.type.outputs[i].name : `output${i}`; + const argument = new acuity.Argument(name, [ value ]); + this.outputs.push(argument); + } + } +}; + +acuity.Argument = class { + + constructor(name, value, type, visible) { + this.name = name; + this.value = value; + if (type) { + this.type = type; + } + if (visible === false) { + this.visible = false; + } + } +}; + +acuity.Value = class { + + constructor(name, type, quantization, initializer) { + if (typeof name !== 'string') { + throw new acuity.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type || null; + this.quantization = quantization || null; + this.initializer = initializer || null; + } +}; + +acuity.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = shape; + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +acuity.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.isArray(dimensions) && dimensions.length == 1 && dimensions[0] == 0 ? [] : dimensions; + } + + toString() { + if (!Array.isArray(this.dimensions) || this.dimensions.length == 0 || (this.dimensions.length == 1 && this.dimensions[0] == 0)) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`; + } +}; + +acuity.Tensor = class { + + constructor(type) { + this.type = type; + this.Category = 'Constant'; + } +}; + +acuity.Inference = class { + + static infer(layers) { + const outputs = new Map(); + const outputLayers = []; + for (const [, layer] of Object.entries(layers)) { + if (layer.op.toLowerCase() == 'output') { + outputLayers.push(layer); + } + for (const output of layer.outputs) { + outputs.set(output.name, layer); + } + } + const broadcasts = new Set([ + 'add', 'equal', 'fllor_mod', 'floor_div', 'greater', 'greater_equal', 'less', 'less_equal', + 'logical_and', 'logical_or', 'minimum', 'multiply', 'not_equal', 'pow', 'real_div', + 'squared_difference', 'subtract' + ]); + const passthroughs = new Set([ + 'LocalResponseNormalization', 'a_times_b_plus_c', 'abs', 'batchnorm_single', 'batchnormalize', + 'cast', 'cast', 'clipbyvalue', 'dequantize', 'dtype_converter', 'elu', 'exp', 'floor', + 'groupnormalize', 'hard_sigmoid', 'hard_swish', 'instancenormalize', 'l2normalize', 'l2normalizescale', + 'layernormalize', 'leakyrelu', 'log', 'log_softmax', 'mish', 'neg', 'norm_with_channel_mean', + 'norm_with_min_max', 'norm_with_scale', 'pow', 'prelu', 'quantize', 'relu', 'relu_keras', + 'relun', 'reverse', 'round', 'rsqrt', 'sigmoid', 'sin', 'softmax', 'softrelu', 'sqrt', 'square', 'tanh' + ]); + const reduces = new Set([ + 'reduceany', 'reducemax', 'reducemean', 'reducemin', 'reduceprod', 'reducesum' + ]); + const operators = new Map(); + operators.set('broadcast', ([a, b]) => { + const longer = a.length >= b.length ? a.slice() : b.slice(); + const shorter = a.length < b.length ? a.slice() : b.slice(); + const remain = longer.length - shorter.length; + for (let i = 0; i < remain; i++) { + shorter.splice(0, 0, 1); + } + for (let i = 0; i < longer.length; i++) { + longer[i] = longer[i] > shorter[i] ? longer[i] : shorter[i]; + } + return [ longer ]; + }); + operators.set('concat', (inputs, params) => { + const outputShape = inputs[0].slice(); + outputShape[params.dim] = 0; + for (const shape of inputs) { + outputShape[params.dim] += shape[params.dim]; + } + return [ outputShape ]; + }); + operators.set('conv1d', (inputs, params) => { + if (params.padding == 'VALID') { + const out_h = ~~((inputs[0][1] + params.stride - params.ksize) / params.stride); + return [ [ inputs[0][0], out_h, params.weights ] ]; + } else if (params.padding == 'SAME') { + const out_h = ~~((inputs[0][1] + params.stride - 1) / params.stride); + return [ [ inputs[0][0], out_h, params.weights ] ]; + } + return null; + }); + operators.set('convolution', (inputs, params) => { + if (params.padding == 'VALID') { + const out_h = ~~((inputs[0][1] + params.stride_h + params.pad[0] + params.pad[1] - params.ksize_h) / params.stride_h); + const out_w = ~~((inputs[0][2] + params.stride_w + params.pad[2] + params.pad[3]- params.ksize_w) / params.stride_w); + return [ [ inputs[0][0], out_h, out_w, params.weights ] ]; + } else if (params.padding == 'SAME') { + const out_h = ~~((inputs[0][1] + params.stride_h - 1) / params.stride_h); + const out_w = ~~((inputs[0][2] + params.stride_w - 1) / params.stride_w); + return [ [ inputs[0][0], out_h, out_w, params.weights ] ]; + } + return null; + }); + operators.set('deconvolution', (inputs, params) => { + return [ params.output_shape.map((item, index) => item == 0 ? inputs[0][index] : item) ]; + }); + operators.set('fullconnect', (inputs, params) => { + return [ inputs[0].slice(0, params.axis).concat([params.weights]) ]; + }); + operators.set('gather', (inputs, params) => { + const prefix = inputs[1].slice(); + const suffix = inputs[0].slice(params.axis + 1); + return [ prefix.concat(suffix) ]; + }); + operators.set('lstm', (inputs, params) => { + const [input] = inputs; + const [a, b] = input; + let batch = a; + const output = params.num_proj != null ? params.num_proj : params.weights; + if (params.time_major) { + batch = b; + } + const newShape = params.return_sequences ? [ a, b, output ] : [ batch, output ]; + return [ newShape, [batch, output], [batch, params.weights] ]; + }); + operators.set('matmul', ([a, b], params) => { + let newShape = a.slice(0, -2); + if (params.transpose_a) { + newShape = newShape.concat(a.slice(-1)); + } else { + newShape = newShape.concat(a.slice(-2, -1)); + } + if (params.transpose_b) { + newShape = newShape.concat(b.slice(-2, -1)); + } else { + newShape = newShape.concat(b.slice(-1)); + } + return [ newShape ]; + }); + operators.set('pad', (inputs, params) => { + return [ inputs[0].map((item, index) => item + params.padding_value[index][0] + params.padding_value[index][1]) ]; + }); + operators.set('permute', (inputs, params) => { + return [ inputs[0].map((item, index) => inputs[0][params.perm[index]]) ]; + }); + operators.set('pooling', (inputs, params) => { + if (params.padding == 'VALID') { + const out_h = ~~((inputs[0][1] + params.stride_h - params.ksize_h) / params.stride_h); + const out_w = ~~((inputs[0][2] + params.stride_w - params.ksize_w) / params.stride_w); + return [ [inputs[0][0], out_h, out_w, inputs[0][3]] ]; + } else if (params.padding == 'SAME') { + const out_h = ~~((inputs[0][1] + params.stride_h - 1) / params.stride_h); + const out_w = ~~((inputs[0][2] + params.stride_w - 1) / params.stride_w); + return [ [inputs[0][0], out_h, out_w, inputs[0][3]] ]; + } + return null; + }); + operators.set('reduce', (inputs, params) => { + const newShape = inputs[0].slice(); + if (params.keep_dims) { + for (const i in params.axis_list) { + newShape[i] = 1; + } + } else { + const axis_list = params.axis_list.map((item) => { + return item < 0 ? newShape.length + item : item; + }); + axis_list.sort((a, b) => { + return b - a; + }); + for (const item of axis_list) { + newShape.splice(item, 1); + } + if (!newShape.length) { + newShape.splice(0, 0, 0); + } + } + return [ newShape ]; + }); + operators.set('repeat', (inputs, params) => { + const newShape = inputs[0].slice(); + newShape[params.axis] = params.maxlen; + return [ newShape ]; + }); + operators.set('reshape', (inputs, params) => { + const negativeIndexs = []; + let shape = params.shape; + if (typeof params.shape === 'string') { + shape = params.shape.split(/\s+/).map((item) => { + return parseInt(item); + }); + } + const newShape = shape.map((item, index) => { + if (item == 0) { + return inputs[0][index]; + } + if (item == -1) { + negativeIndexs.push(index); + return 1; + } + return item; + }); + if (negativeIndexs.length > 0) { + newShape[negativeIndexs[0]] = inputs[0].reduce((a, c) => a * c) / newShape.reduce((a, c) => a * c); + } + return [ newShape ]; + }); + operators.set('sequence_mask', (inputs, params) => { + return [ inputs[0].slice().concat([params.maxlen]) ]; + }); + operators.set('slice', (inputs, params) => { + return [ params.size.map((item, index) => item == -1 ? inputs[0][index] : item) ]; + }); + operators.set('squeeze', (inputs, params) => { + const newShape = inputs[0].slice(); + const axis_list = [...new Set(params.axis_list)].sort((a, b) => b - a); + for (const item of axis_list) { + newShape.splice(item, 1); + } + return [ newShape ]; + }); + operators.set('space2depth', (inputs, params) => { + const h = inputs[0][1] / params.block_size[0]; + const w = inputs[0][2] / params.block_size[1]; + const c = inputs[0][3] * params.block_size[1] * params.block_size[1]; + return [ [inputs[0][0], h, w, c] ]; + }); + operators.set('split', (inputs, params) => { + const sizes = []; + const slices = params.slices.slice(); + slices.splice(0, 0, 0); + slices.push(inputs[0][params.dim]); + slices.reduce((a, b) => { + sizes.push(b - a); + return b; + }); + return sizes.map((item) => { + const shape = inputs[0].slice(); + shape[params.dim] = item; + return shape; + }); + }); + operators.set('stack', (inputs, params) => { + const newShape = inputs[0].slice(); + if (newShape.length == 1 && newShape[0] == 0) { + newShape[0] = 1; + } else { + newShape.splice(params.axis, 0, inputs.length); + } + return [ newShape ]; + }); + operators.set('stridedslice', (inputs, params) => { + const input_shape = inputs[0].slice(); + const begin = params.slice_begin.slice(); + const end = params.slice_end.slice(); + if (params.slice_begin_mask > 0) { + for (let i = 0; i < begin.length; i++) { + if ((params.slice_begin_mask >>> i) & 0x1) { + begin[i] = -1; + } + } + } + if (params.slice_end_mask > 0) { + for (let i = 0; i < end.length; i++) { + if ((params.slice_end_mask >>> i) & 0x1) { + end[i] = -1; + } + } + } + for (let i = 0; i < begin.length; i++) { + if (begin[i] == -1) { + begin[i] = 0; + } + } + if (inputs[0].length == end.length) { + for (let i = 0; i < end.length; i++) { + if (end[i] == -1 || end[i] > input_shape[i]) { + end[i] = input_shape[i]; + } + } + } else if (inputs[0].length < end.length) { + if (params.slice_new_axis_mask) { + const len = (params.slice_new_axis_mask >>> 0).toString(2).length; + for (let i = 0; i < len; i++) { + if ((params.slice_new_axis_mask >>> i) & 0x1) { + input_shape.splice(i, 0, 1); + } + } + for (let i = 0; i < end.length; i++) { + if (end[i] == -1) { + end[i] = input_shape[i]; + } + } + } + } + let newShape = []; + for (let i = 0; i < begin.length; i++) { + newShape = newShape.concat([(end[i] - begin[i])/params.slice_strides[i]]); + } + if (params.slice_shrink_axis_mask) { + const len = (params.slice_shrink_axis_mask >>> 0).toString(2).length; + for (let i = 0; i < len; i++) { + if ((params.slice_shrink_axis_mask >>> i) & 0x1) { + newShape.splice(i, 1); + } + } + } + if (params.slice_new_axis_mask) { + const len = (params.slice_new_axis_mask >>> 0).toString(2).length; + for (let i = 0; i < len; i++) { + if ((params.slice_new_axis_mask >>> i) & 0x1) { + if (inputs[0].length == begin.length) { + newShape.splice(i, 0, 1); + } else if (inputs[0].length < begin.length) { + newShape[i] = 1; + } + } + } + } + return [ newShape ]; + }); + const infer = (output) => { + if (outputs.has(output.name)) { + let ready = true; + const layer = outputs.get(output.name); + for (const input of layer.inputs) { + if (input.shape === null) { + infer(input); + if (input.shape === null) { + ready = false; + break; + } + } + } + if (ready) { + let callback = null; + if (operators.has(layer.op)) { + callback = operators.get(layer.op); + } else if (passthroughs.has(layer.op)) { + callback = (inputs) => [ inputs[0].slice() ]; + } else if (broadcasts.has(layer.op)) { + callback = operators.get('broadcast'); + } else if (reduces.has(layer.op)) { + callback = operators.get('reduce'); + } else { + callback = () => []; + } + const parameters = layer.parameters; + const inputs = layer.inputs.map((input) => input.shape); + const outputs = callback(inputs, parameters); + for (let i = 0; i < outputs.length; i++) { + if (i < layer.outputs.length) { + layer.outputs[i].shape = outputs[i]; + } + } + } + } + }; + for (const layer of outputLayers) { + for (const output of layer.outputs) { + infer(output); + } + } + } +}; + +acuity.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Acuity model.'; + } +}; + +export const ModelFactory = acuity.ModelFactory; diff --git a/armnn-metadata.json b/armnn-metadata.json new file mode 100644 index 00000000000..1370de24b12 --- /dev/null +++ b/armnn-metadata.json @@ -0,0 +1,422 @@ +[ + { + "name": "ActivationLayer", + "category": "Activation", + "attributes": [ + { "name": "activationFunction", "type": "ActivationFunction" }, + { "name": "a", "type": "float32" }, + { "name": "b", "type": "float32" } + ] + }, + { + "name": "AdditionLayer", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "BatchNormalizationLayer", + "category": "Normalization", + "attributes": [ + { "name": "eps", "type": "float32" }, + { "name": "dataLayout", "type": "DataLayout" } + ], + "inputs": [ + { "name": "input" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "beta" }, + { "name": "gamma" } + ] + }, + { + "name": "BatchToSpaceNdLayer", + "category": "Layer", + "attributes": [ + { "name": "blockShape", "type": "string" }, + { "name": "crops", "type": "string" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "ConcatLayer", + "category": "Tensor", + "attributes": [ + { "name": "concatAxis", "type": "uint32" }, + { "name": "numViews", "type": "uint32" }, + { "name": "numDimensions", "type": "uint32" } + ] + }, + { + "name": "ConstantLayer", + "category": "Constant", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "Convolution2dLayer", + "category": "Layer", + "attributes": [ + { "name": "padTop", "type": "uint32" }, + { "name": "padRight", "type": "uint32" }, + { "name": "padBottom", "type": "uint32" }, + { "name": "padLeft", "type": "uint32" }, + { "name": "strideX", "type": "uint32" }, + { "name": "strideY", "type": "uint32" }, + { "name": "dilationX", "type": "uint32" }, + { "name": "dilationY", "type": "uint32" }, + { "name": "dataLayout", "type": "DataLayout" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "DepthwiseConvolution2dLayer", + "category": "Layer", + "attributes": [ + { "name": "padTop", "type": "uint32" }, + { "name": "padRight", "type": "uint32" }, + { "name": "padBottom", "type": "uint32" }, + { "name": "padLeft", "type": "uint32" }, + { "name": "strideX", "type": "uint32" }, + { "name": "strideY", "type": "uint32" }, + { "name": "dilationX", "type": "uint32" }, + { "name": "dilationY", "type": "uint32" }, + { "name": "dataLayout", "type": "DataLayout" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "DequantizeLayer" + }, + { + "name": "DetectionPostProcessLayer", + "category": "Custom", + "attributes": [ + { "name": "maxDetections", "type": "uint32" }, + { "name": "maxClassesPerDetection", "type": "uint32" }, + { "name": "detectionsPerClass", "type": "uint32" }, + { "name": "nmsScoreThreshold", "type": "float32" }, + { "name": "numIouThreshold", "type": "float32" }, + { "name": "numClasses", "type": "uint32" }, + { "name": "useRegularNms", "type": "boolean" }, + { "name": "scaleX", "type": "float32" }, + { "name": "scaleY", "type": "float32" }, + { "name": "scaleW", "type": "float32" }, + { "name": "scaleH", "type": "float32" } + ] + }, + { + "name": "DivisionLayer", + "category": "Layer" + }, + { + "name": "EqualLayer", + "category": "Layer" + }, + { + "name": "FloorLayer", + "category": "Layer" + }, + { + "name": "FullyConnectedLayer", + "category": "Layer", + "attributes": [ + { "name": "transposeWeightsMatrix", "type": "boolean" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "GatherLayer", + "category": "Tensor" + }, + { + "name": "GreaterLayer", + "category": "Layer", + "attributes": [] + }, + { + "name": "InputLayer" + }, + { + "name": "L2NormalizationLayer", + "category": "Normalization", + "attributes": [ + { "name": "eps", "type": "float32" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "LstmLayer", + "category": "Layer", + "attributes": [ + { "name": "activationFunc", "type": "uint32" }, + { "name": "clippingThresCell", "type": "float32" }, + { "name": "clippingThresProj", "type": "float32" }, + { "name": "cifgEnabled", "type": "boolean" }, + { "name": "peepholeEnabled", "type": "boolean" }, + { "name": "projectionEnabled", "type": "boolean" }, + { "name": "layerNormEnabled", "type": "boolean" } + ], + "inputs": [ + { "name": "input" }, + { "name": "inputToForgetWeights1" }, + { "name": "inputToCellWeights1" }, + { "name": "inputToOutputWeights1" }, + { "name": "recurrentToForgetWeights1" }, + { "name": "recurrentToCellWeights1" }, + { "name": "recurrentToOutputWeights1" }, + { "name": "forgetGateBias1" }, + { "name": "cellBias1" }, + { "name": "outputGateBias1" }, + { "name": "inputToInputWeights1" }, + { "name": "recurrentToInputWeights1" }, + { "name": "cellToInputWeights1" }, + { "name": "inputGateBias1" }, + { "name": "projectionWeights1" }, + { "name": "projectionBias1" }, + { "name": "cellToForgetWeights1" }, + { "name": "cellToOutputWeights1" }, + { "name": "inputLayerNormWeights1" }, + { "name": "forgetLayerNormWeights1" }, + { "name": "cellLayerNormWeights1" }, + { "name": "outputLayerNormWeights1" } + ] + }, + { + "name": "MaximumLayer", + "category": "Layer" + }, + { + "name": "MeanLayer", + "attributes": [ + { "name": "axis", "type": "uint32" }, + { "name": "keepDims", "type": "boolean" } + ] + }, + { + "name": "MergeLayer", + "category": "Layer" + }, + { + "name": "MergerLayer", + "category": "Tensor" + }, + { + "name": "MinimumLayer", + "category": "Layer" + }, + { + "name": "MultiplicationLayer", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "NormalizationLayer", + "category": "Normalization", + "attributes": [ + { "name": "normChannelType", "type": "NormalizationAlgorithmChannel" }, + { "name": "normMethodType", "type": "NormalizationAlgorithmMethod" }, + { "name": "normSize", "type": "uint32" }, + { "name": "alpha", "type": "float32" }, + { "name": "beta", "type": "float32" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "OutputLayer", + "category": "Tensor" + }, + { + "name": "PadLayer", + "category": "Layer", + "attributes": [ + { "name": "padList", "type": "uint32" }, + { "name": "padValue", "type": "float32" } + ] + }, + { + "name": "PermuteLayer", + "category": "Shape", + "attributes": [ + { "name": "dimMappings", "type": "string" } + ] + }, + { + "name": "Pooling2dLayer", + "category": "Pool", + "attributes": [ + { "name": "poolType", "type": "PoolingAlgorithm" }, + { "name": "padTop", "type": "uint32" }, + { "name": "padRight", "type": "uint32" }, + { "name": "padBottom", "type": "uint32" }, + { "name": "padLeft", "type": "uint32" }, + { "name": "poolWidth", "type": "uint32" }, + { "name": "poolHeight", "type": "uint32" }, + { "name": "strideX", "type": "uint32" }, + { "name": "strideY", "type": "uint32" }, + { "name": "outputShapeRounding", "type": "OutputShapeRounding" }, + { "name": "paddingMethod", "type": "PaddingMethod" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "PreluLayer", + "category": "Layer" + }, + { + "name": "QuantizedLstmLayer", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "inputToInputWeights1" }, + { "name": "inputToForgetWeights1" }, + { "name": "inputToCellWeights1" }, + { "name": "inputToOutputWeights1" }, + { "name": "recurrentToInputWeights1" }, + { "name": "recurrentToForgetWeights1" }, + { "name": "recurrentToCellWeights1" }, + { "name": "recurrentToOutputWeights1" }, + { "name": "inputGateBias1" }, + { "name": "forgetGateBias1" }, + { "name": "cellBias1" }, + { "name": "outputGateBias1" } + ] + }, + { + "name": "QuantizeLayer" + }, + { + "name": "ReshapeLayer", + "category": "Shape", + "attributes": [ + { "name": "targetShape", "type": "uint32[]" } + ] + }, + { + "name": "ResizeBilinearLayer", + "category": "Layer", + "attributes": [ + { "name": "targetWidth", "type": "uint32" }, + { "name": "targetHeight", "type": "uint32" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "ResizeLayer", + "category": "Layer", + "attributes": [ + { "name": "targetWidth", "type": "uint32" }, + { "name": "targetHeight", "type": "uint32" }, + { "name": "method", "type": "ResizeMethod" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "RsqrtLayer", + "category": "Layer" + }, + { + "name": "SoftmaxLayer", + "category": "Activation", + "attributes": [ + { "name": "beta", "type": "float32" } + ] + }, + { + "name": "SpaceToBatchNdLayer", + "category": "Layer", + "attributes": [ + { "name": "blockShape", "type": "string" }, + { "name": "padList", "type": "string" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "SpaceToDepthLayer", + "category": "Layer", + "attributes": [ + { "name": "blockSize", "type": "uint32" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "SplitterLayer", + "category": "Tensor", + "attributes": [ + { "name": "concatAxis", "type": "uint32" }, + { "name": "numViews", "type": "uint32" }, + { "name": "numDimensions", "type": "uint32" } + ] + }, + { + "name": "StackLayer", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "uint32" }, + { "name": "numInputs", "type": "uint32" }, + { "name": "inputShape", "type": "uint32" } + ] + }, + { + "name": "StridedSliceLayer", + "category": "Tensor", + "attributes": [ + { "name": "begin", "type": "int32" }, + { "name": "end", "type": "int32" }, + { "name": "stride", "type": "int32" }, + { "name": "beginMask", "type": "int32" }, + { "name": "endMask", "type": "int32" }, + { "name": "shrinkAxisMask", "type": "int32" }, + { "name": "ellipsisMask", "type": "int32" }, + { "name": "newAxisMask", "type": "int32" }, + { "name": "dataLayout", "type": "DataLayout" } + ] + }, + { + "name": "SubtractionLayer" + }, + { + "name": "SwitchLayer", + "category": "Layer" + }, + { + "name": "TransposeConvolution2dLayer", + "category": "Layer", + "attributes": [ + { "name": "padTop", "type": "uint32" }, + { "name": "padRight", "type": "uint32" }, + { "name": "padBottom", "type": "uint32" }, + { "name": "padLeft", "type": "uint32" }, + { "name": "strideX", "type": "uint32" }, + { "name": "strideY", "type": "uint32" }, + { "name": "dataLayout", "type": "DataLayout" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + } +] \ No newline at end of file diff --git a/armnn-schema.js b/armnn-schema.js new file mode 100644 index 00000000000..fe7da30d97a --- /dev/null +++ b/armnn-schema.js @@ -0,0 +1,2504 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('armnn'); + +$root.armnnSerializer = $root.armnnSerializer || {}; + +$root.armnnSerializer.ActivationFunction = { + Sigmoid: 0, + TanH: 1, + Linear: 2, + ReLu: 3, + BoundedReLu: 4, + SoftReLu: 5, + LeakyReLu: 6, + Abs: 7, + Sqrt: 8, + Square: 9, + Elu: 10, + HardSwish: 11 +}; + +$root.armnnSerializer.ArgMinMaxFunction = { + Min: 0, + Max: 1 +}; + +$root.armnnSerializer.DataType = { + Float16: 0, + Float32: 1, + QuantisedAsymm8: 2, + Signed32: 3, + Boolean: 4, + QuantisedSymm16: 5, + QAsymmU8: 6, + QSymmS16: 7, + QAsymmS8: 8, + QSymmS8: 9 +}; + +$root.armnnSerializer.DataLayout = { + NHWC: 0, + NCHW: 1 +}; + +$root.armnnSerializer.ResizeMethod = { + NearestNeighbor: 0, + Bilinear: 1 +}; + +$root.armnnSerializer.TensorInfo = class TensorInfo { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.TensorInfo(); + $.dimensions = reader.typedArray(position, 4, Uint32Array); + $.dataType = reader.int8_(position, 6, 0); + $.quantizationScale = reader.float32_(position, 8, 1); + $.quantizationOffset = reader.int32_(position, 10, 0); + $.quantizationScales = reader.typedArray(position, 12, Float32Array); + $.quantizationDim = reader.uint32_(position, 14, 0); + $.dimensionality = reader.uint32_(position, 16, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.TensorInfo(); + $.dimensions = reader.typedArray(json.dimensions, Uint32Array); + $.dataType = $root.armnnSerializer.DataType[json.dataType]; + $.quantizationScale = reader.value(json.quantizationScale, 1); + $.quantizationOffset = reader.value(json.quantizationOffset, 0); + $.quantizationScales = reader.typedArray(json.quantizationScales, Float32Array); + $.quantizationDim = reader.value(json.quantizationDim, 0); + $.dimensionality = reader.value(json.dimensionality, 1); + return $; + } +}; + +$root.armnnSerializer.Connection = class Connection { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.Connection(); + $.sourceLayerIndex = reader.uint32(position + 0); + $.outputSlotIndex = reader.uint32(position + 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.Connection(); + $.sourceLayerIndex = json.sourceLayerIndex; + $.outputSlotIndex = json.outputSlotIndex; + return $; + } +}; + +$root.armnnSerializer.ByteData = class ByteData { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ByteData(); + $.data = reader.typedArray(position, 4, Int8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ByteData(); + $.data = reader.typedArray(json.data, Int8Array); + return $; + } +}; + +$root.armnnSerializer.ShortData = class ShortData { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ShortData(); + $.data = reader.typedArray(position, 4, Int16Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ShortData(); + $.data = reader.typedArray(json.data, Int16Array); + return $; + } +}; + +$root.armnnSerializer.IntData = class IntData { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.IntData(); + $.data = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.IntData(); + $.data = reader.typedArray(json.data, Int32Array); + return $; + } +}; + +$root.armnnSerializer.LongData = class LongData { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LongData(); + $.data = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LongData(); + $.data = reader.array(json.data); + return $; + } +}; + +$root.armnnSerializer.ConstTensorData = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.armnnSerializer.ByteData.decode(reader, position); + case 2: return $root.armnnSerializer.ShortData.decode(reader, position); + case 3: return $root.armnnSerializer.IntData.decode(reader, position); + case 4: return $root.armnnSerializer.LongData.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'ByteData': return $root.armnnSerializer.ByteData.decodeText(reader, json); + case 'ShortData': return $root.armnnSerializer.ShortData.decodeText(reader, json); + case 'IntData': return $root.armnnSerializer.IntData.decodeText(reader, json); + case 'LongData': return $root.armnnSerializer.LongData.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.armnnSerializer.ConstTensor = class ConstTensor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ConstTensor(); + $.info = reader.table(position, 4, $root.armnnSerializer.TensorInfo.decode); + $.data = reader.union(position, 6, $root.armnnSerializer.ConstTensorData.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ConstTensor(); + $.info = reader.object(json.info, $root.armnnSerializer.TensorInfo.decodeText); + $.data = $root.armnnSerializer.ConstTensorData.decodeText(reader, json.data, json.data_type); + return $; + } +}; + +$root.armnnSerializer.InputSlot = class InputSlot { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.InputSlot(); + $.index = reader.uint32_(position, 4, 0); + $.connection = reader.struct(position, 6, $root.armnnSerializer.Connection.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.InputSlot(); + $.index = reader.value(json.index, 0); + $.connection = reader.object(json.connection, $root.armnnSerializer.Connection.decodeText); + return $; + } +}; + +$root.armnnSerializer.OutputSlot = class OutputSlot { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.OutputSlot(); + $.index = reader.uint32_(position, 4, 0); + $.tensorInfo = reader.table(position, 6, $root.armnnSerializer.TensorInfo.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.OutputSlot(); + $.index = reader.value(json.index, 0); + $.tensorInfo = reader.object(json.tensorInfo, $root.armnnSerializer.TensorInfo.decodeText); + return $; + } +}; + +$root.armnnSerializer.LayerType = { + Addition: 0, + Input: 1, + Multiplication: 2, + Output: 3, + Pooling2d: 4, + Reshape: 5, + Softmax: 6, + Convolution2d: 7, + DepthwiseConvolution2d: 8, + Activation: 9, + Permute: 10, + FullyConnected: 11, + Constant: 12, + SpaceToBatchNd: 13, + BatchToSpaceNd: 14, + Division: 15, + Minimum: 16, + Equal: 17, + Maximum: 18, + Normalization: 19, + Pad: 20, + Rsqrt: 21, + Floor: 22, + BatchNormalization: 23, + Greater: 24, + ResizeBilinear: 25, + Subtraction: 26, + StridedSlice: 27, + Gather: 28, + Mean: 29, + Merger: 30, + L2Normalization: 31, + Splitter: 32, + DetectionPostProcess: 33, + Lstm: 34, + Quantize: 35, + Dequantize: 36, + Merge: 37, + Switch: 38, + Concat: 39, + SpaceToDepth: 40, + Prelu: 41, + TransposeConvolution2d: 42, + Resize: 43, + Stack: 44, + QuantizedLstm: 45, + Abs: 46, + ArgMinMax: 47, + Slice: 48, + DepthToSpace: 49, + InstanceNormalization: 50, + LogSoftmax: 51, + Comparison: 52, + StandIn: 53, + ElementwiseUnary: 54, + Transpose: 55, + QLstm: 56, + Fill: 57, + Rank: 58 +}; + +$root.armnnSerializer.LayerBase = class LayerBase { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LayerBase(); + $.index = reader.uint32_(position, 4, 0); + $.layerName = reader.string_(position, 6, null); + $.layerType = reader.uint32_(position, 8, 0); + $.inputSlots = reader.tableArray(position, 10, $root.armnnSerializer.InputSlot.decode); + $.outputSlots = reader.tableArray(position, 12, $root.armnnSerializer.OutputSlot.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LayerBase(); + $.index = reader.value(json.index, 0); + $.layerName = reader.value(json.layerName, null); + $.layerType = $root.armnnSerializer.LayerType[json.layerType]; + $.inputSlots = reader.objectArray(json.inputSlots, $root.armnnSerializer.InputSlot.decodeText); + $.outputSlots = reader.objectArray(json.outputSlots, $root.armnnSerializer.OutputSlot.decodeText); + return $; + } +}; + +$root.armnnSerializer.BindableLayerBase = class BindableLayerBase { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.BindableLayerBase(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.layerBindingId = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.BindableLayerBase(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.layerBindingId = reader.value(json.layerBindingId, 0); + return $; + } +}; + +$root.armnnSerializer.AbsLayer = class AbsLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.AbsLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.AbsLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.ActivationLayer = class ActivationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ActivationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ActivationDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ActivationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ActivationDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ActivationDescriptor = class ActivationDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ActivationDescriptor(); + $.activationFunction = reader.int8_(position, 4, 0); + $.a = reader.float32_(position, 6, 0); + $.b = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ActivationDescriptor(); + $.activationFunction = $root.armnnSerializer.ActivationFunction[json.activationFunction]; + $.a = reader.value(json.a, 0); + $.b = reader.value(json.b, 0); + return $; + } +}; + +$root.armnnSerializer.AdditionLayer = class AdditionLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.AdditionLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.AdditionLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.ArgMinMaxLayer = class ArgMinMaxLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ArgMinMaxLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ArgMinMaxDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ArgMinMaxLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ArgMinMaxDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ArgMinMaxDescriptor = class ArgMinMaxDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ArgMinMaxDescriptor(); + $.argMinMaxFunction = reader.int8_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ArgMinMaxDescriptor(); + $.argMinMaxFunction = $root.armnnSerializer.ArgMinMaxFunction[json.argMinMaxFunction]; + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.armnnSerializer.ComparisonOperation = { + Equal: 0, + Greater: 1, + GreaterOrEqual: 2, + Less: 3, + LessOrEqual: 4, + NotEqual: 5 +}; + +$root.armnnSerializer.ComparisonDescriptor = class ComparisonDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ComparisonDescriptor(); + $.operation = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ComparisonDescriptor(); + $.operation = $root.armnnSerializer.ComparisonOperation[json.operation]; + return $; + } +}; + +$root.armnnSerializer.ComparisonLayer = class ComparisonLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ComparisonLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ComparisonDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ComparisonLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ComparisonDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ConstantLayer = class ConstantLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ConstantLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.input = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ConstantLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.input = reader.object(json.input, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.Convolution2dLayer = class Convolution2dLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.Convolution2dLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.Convolution2dDescriptor.decode); + $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.Convolution2dLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.Convolution2dDescriptor.decodeText); + $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText); + $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.Convolution2dDescriptor = class Convolution2dDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.Convolution2dDescriptor(); + $.padLeft = reader.uint32_(position, 4, 0); + $.padRight = reader.uint32_(position, 6, 0); + $.padTop = reader.uint32_(position, 8, 0); + $.padBottom = reader.uint32_(position, 10, 0); + $.strideX = reader.uint32_(position, 12, 0); + $.strideY = reader.uint32_(position, 14, 0); + $.dilationX = reader.uint32_(position, 16, 1); + $.dilationY = reader.uint32_(position, 18, 1); + $.biasEnabled = reader.bool_(position, 20, false); + $.dataLayout = reader.int8_(position, 22, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.Convolution2dDescriptor(); + $.padLeft = reader.value(json.padLeft, 0); + $.padRight = reader.value(json.padRight, 0); + $.padTop = reader.value(json.padTop, 0); + $.padBottom = reader.value(json.padBottom, 0); + $.strideX = reader.value(json.strideX, 0); + $.strideY = reader.value(json.strideY, 0); + $.dilationX = reader.value(json.dilationX, 1); + $.dilationY = reader.value(json.dilationY, 1); + $.biasEnabled = reader.value(json.biasEnabled, false); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.DepthToSpaceLayer = class DepthToSpaceLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DepthToSpaceLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.DepthToSpaceDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DepthToSpaceLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DepthToSpaceDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.DepthToSpaceDescriptor = class DepthToSpaceDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DepthToSpaceDescriptor(); + $.blockSize = reader.uint32_(position, 4, 0); + $.dataLayout = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DepthToSpaceDescriptor(); + $.blockSize = reader.value(json.blockSize, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.DivisionLayer = class DivisionLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DivisionLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DivisionLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.UnaryOperation = { + Abs: 0, + Rsqrt: 1, + Sqrt: 2, + Exp: 3, + Neg: 4 +}; + +$root.armnnSerializer.ElementwiseUnaryDescriptor = class ElementwiseUnaryDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ElementwiseUnaryDescriptor(); + $.operation = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ElementwiseUnaryDescriptor(); + $.operation = $root.armnnSerializer.UnaryOperation[json.operation]; + return $; + } +}; + +$root.armnnSerializer.ElementwiseUnaryLayer = class ElementwiseUnaryLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ElementwiseUnaryLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ElementwiseUnaryDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ElementwiseUnaryLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ElementwiseUnaryDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.EqualLayer = class EqualLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.EqualLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.EqualLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.FillLayer = class FillLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FillLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.FillDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FillLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.FillDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.FillDescriptor = class FillDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FillDescriptor(); + $.value = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FillDescriptor(); + $.value = reader.value(json.value, 0); + return $; + } +}; + +$root.armnnSerializer.FloorLayer = class FloorLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FloorLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FloorLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.FullyConnectedLayer = class FullyConnectedLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FullyConnectedLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.FullyConnectedDescriptor.decode); + $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FullyConnectedLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.FullyConnectedDescriptor.decodeText); + $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText); + $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.FullyConnectedDescriptor = class FullyConnectedDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FullyConnectedDescriptor(); + $.biasEnabled = reader.bool_(position, 4, false); + $.transposeWeightsMatrix = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FullyConnectedDescriptor(); + $.biasEnabled = reader.value(json.biasEnabled, false); + $.transposeWeightsMatrix = reader.value(json.transposeWeightsMatrix, false); + return $; + } +}; + +$root.armnnSerializer.GatherLayer = class GatherLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.GatherLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.GatherDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.GatherLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.GatherDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.GatherDescriptor = class GatherDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.GatherDescriptor(); + $.axis = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.GatherDescriptor(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.armnnSerializer.GreaterLayer = class GreaterLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.GreaterLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.GreaterLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.InputLayer = class InputLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.InputLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.BindableLayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.InputLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.BindableLayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.InstanceNormalizationLayer = class InstanceNormalizationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.InstanceNormalizationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.InstanceNormalizationDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.InstanceNormalizationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.InstanceNormalizationDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.InstanceNormalizationDescriptor = class InstanceNormalizationDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.InstanceNormalizationDescriptor(); + $.gamma = reader.float32_(position, 4, 0); + $.beta = reader.float32_(position, 6, 0); + $.eps = reader.float32_(position, 8, 0); + $.dataLayout = reader.int8_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.InstanceNormalizationDescriptor(); + $.gamma = reader.value(json.gamma, 0); + $.beta = reader.value(json.beta, 0); + $.eps = reader.value(json.eps, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.LogSoftmaxLayer = class LogSoftmaxLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LogSoftmaxLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.LogSoftmaxDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LogSoftmaxLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.LogSoftmaxDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.LogSoftmaxDescriptor = class LogSoftmaxDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LogSoftmaxDescriptor(); + $.beta = reader.float32_(position, 4, 1); + $.axis = reader.int32_(position, 6, -1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LogSoftmaxDescriptor(); + $.beta = reader.value(json.beta, 1); + $.axis = reader.value(json.axis, -1); + return $; + } +}; + +$root.armnnSerializer.L2NormalizationLayer = class L2NormalizationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.L2NormalizationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.L2NormalizationDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.L2NormalizationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.L2NormalizationDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.L2NormalizationDescriptor = class L2NormalizationDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.L2NormalizationDescriptor(); + $.dataLayout = reader.int8_(position, 4, 1); + $.eps = reader.float32_(position, 6, 1e-12); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.L2NormalizationDescriptor(); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + $.eps = reader.value(json.eps, 1e-12); + return $; + } +}; + +$root.armnnSerializer.MinimumLayer = class MinimumLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MinimumLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MinimumLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.MaximumLayer = class MaximumLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MaximumLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MaximumLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.MultiplicationLayer = class MultiplicationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MultiplicationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MultiplicationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.Pooling2dLayer = class Pooling2dLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.Pooling2dLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.Pooling2dDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.Pooling2dLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.Pooling2dDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.PoolingAlgorithm = { + Max: 0, + Average: 1, + L2: 2 +}; + +$root.armnnSerializer.OutputShapeRounding = { + Floor: 0, + Ceiling: 1 +}; + +$root.armnnSerializer.PaddingMethod = { + IgnoreValue: 0, + Exclude: 1 +}; + +$root.armnnSerializer.Pooling2dDescriptor = class Pooling2dDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.Pooling2dDescriptor(); + $.poolType = reader.int8_(position, 4, 0); + $.padLeft = reader.uint32_(position, 6, 0); + $.padRight = reader.uint32_(position, 8, 0); + $.padTop = reader.uint32_(position, 10, 0); + $.padBottom = reader.uint32_(position, 12, 0); + $.poolWidth = reader.uint32_(position, 14, 0); + $.poolHeight = reader.uint32_(position, 16, 0); + $.strideX = reader.uint32_(position, 18, 0); + $.strideY = reader.uint32_(position, 20, 0); + $.outputShapeRounding = reader.int8_(position, 22, 0); + $.paddingMethod = reader.int8_(position, 24, 0); + $.dataLayout = reader.int8_(position, 26, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.Pooling2dDescriptor(); + $.poolType = $root.armnnSerializer.PoolingAlgorithm[json.poolType]; + $.padLeft = reader.value(json.padLeft, 0); + $.padRight = reader.value(json.padRight, 0); + $.padTop = reader.value(json.padTop, 0); + $.padBottom = reader.value(json.padBottom, 0); + $.poolWidth = reader.value(json.poolWidth, 0); + $.poolHeight = reader.value(json.poolHeight, 0); + $.strideX = reader.value(json.strideX, 0); + $.strideY = reader.value(json.strideY, 0); + $.outputShapeRounding = $root.armnnSerializer.OutputShapeRounding[json.outputShapeRounding]; + $.paddingMethod = $root.armnnSerializer.PaddingMethod[json.paddingMethod]; + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.QuantizeLayer = class QuantizeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QuantizeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QuantizeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.SoftmaxLayer = class SoftmaxLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SoftmaxLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.SoftmaxDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SoftmaxLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SoftmaxDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.SoftmaxDescriptor = class SoftmaxDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SoftmaxDescriptor(); + $.beta = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SoftmaxDescriptor(); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.armnnSerializer.DepthwiseConvolution2dLayer = class DepthwiseConvolution2dLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DepthwiseConvolution2dLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.DepthwiseConvolution2dDescriptor.decode); + $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DepthwiseConvolution2dLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DepthwiseConvolution2dDescriptor.decodeText); + $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText); + $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.DepthwiseConvolution2dDescriptor = class DepthwiseConvolution2dDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DepthwiseConvolution2dDescriptor(); + $.padLeft = reader.uint32_(position, 4, 0); + $.padRight = reader.uint32_(position, 6, 0); + $.padTop = reader.uint32_(position, 8, 0); + $.padBottom = reader.uint32_(position, 10, 0); + $.strideX = reader.uint32_(position, 12, 0); + $.strideY = reader.uint32_(position, 14, 0); + $.dilationX = reader.uint32_(position, 16, 1); + $.dilationY = reader.uint32_(position, 18, 1); + $.biasEnabled = reader.bool_(position, 20, false); + $.dataLayout = reader.int8_(position, 22, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DepthwiseConvolution2dDescriptor(); + $.padLeft = reader.value(json.padLeft, 0); + $.padRight = reader.value(json.padRight, 0); + $.padTop = reader.value(json.padTop, 0); + $.padBottom = reader.value(json.padBottom, 0); + $.strideX = reader.value(json.strideX, 0); + $.strideY = reader.value(json.strideY, 0); + $.dilationX = reader.value(json.dilationX, 1); + $.dilationY = reader.value(json.dilationY, 1); + $.biasEnabled = reader.value(json.biasEnabled, false); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.OutputLayer = class OutputLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.OutputLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.BindableLayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.OutputLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.BindableLayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.ReshapeLayer = class ReshapeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ReshapeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ReshapeDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ReshapeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ReshapeDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ReshapeDescriptor = class ReshapeDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ReshapeDescriptor(); + $.targetShape = reader.typedArray(position, 4, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ReshapeDescriptor(); + $.targetShape = reader.typedArray(json.targetShape, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.PermuteLayer = class PermuteLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.PermuteLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.PermuteDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.PermuteLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.PermuteDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.PermuteDescriptor = class PermuteDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.PermuteDescriptor(); + $.dimMappings = reader.typedArray(position, 4, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.PermuteDescriptor(); + $.dimMappings = reader.typedArray(json.dimMappings, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.SpaceToBatchNdLayer = class SpaceToBatchNdLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SpaceToBatchNdLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.SpaceToBatchNdDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SpaceToBatchNdLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SpaceToBatchNdDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.SpaceToBatchNdDescriptor = class SpaceToBatchNdDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SpaceToBatchNdDescriptor(); + $.blockShape = reader.typedArray(position, 4, Uint32Array); + $.padList = reader.typedArray(position, 6, Uint32Array); + $.dataLayout = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SpaceToBatchNdDescriptor(); + $.blockShape = reader.typedArray(json.blockShape, Uint32Array); + $.padList = reader.typedArray(json.padList, Uint32Array); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.SpaceToDepthLayer = class SpaceToDepthLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SpaceToDepthLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.SpaceToDepthDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SpaceToDepthLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SpaceToDepthDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.SpaceToDepthDescriptor = class SpaceToDepthDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SpaceToDepthDescriptor(); + $.blockSize = reader.uint32_(position, 4, 0); + $.dataLayout = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SpaceToDepthDescriptor(); + $.blockSize = reader.value(json.blockSize, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.SubtractionLayer = class SubtractionLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SubtractionLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SubtractionLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.BatchToSpaceNdLayer = class BatchToSpaceNdLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.BatchToSpaceNdLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.BatchToSpaceNdDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.BatchToSpaceNdLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.BatchToSpaceNdDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.BatchToSpaceNdDescriptor = class BatchToSpaceNdDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.BatchToSpaceNdDescriptor(); + $.blockShape = reader.typedArray(position, 4, Uint32Array); + $.crops = reader.typedArray(position, 6, Uint32Array); + $.dataLayout = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.BatchToSpaceNdDescriptor(); + $.blockShape = reader.typedArray(json.blockShape, Uint32Array); + $.crops = reader.typedArray(json.crops, Uint32Array); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.NormalizationAlgorithmChannel = { + Across: 0, + Within: 1 +}; + +$root.armnnSerializer.NormalizationAlgorithmMethod = { + LocalBrightness: 0, + LocalContrast: 1 +}; + +$root.armnnSerializer.NormalizationLayer = class NormalizationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.NormalizationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.NormalizationDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.NormalizationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.NormalizationDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.NormalizationDescriptor = class NormalizationDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.NormalizationDescriptor(); + $.normChannelType = reader.int8_(position, 4, 0); + $.normMethodType = reader.int8_(position, 6, 0); + $.normSize = reader.uint32_(position, 8, 0); + $.alpha = reader.float32_(position, 10, 0); + $.beta = reader.float32_(position, 12, 0); + $.k = reader.float32_(position, 14, 0); + $.dataLayout = reader.int8_(position, 16, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.NormalizationDescriptor(); + $.normChannelType = $root.armnnSerializer.NormalizationAlgorithmChannel[json.normChannelType]; + $.normMethodType = $root.armnnSerializer.NormalizationAlgorithmMethod[json.normMethodType]; + $.normSize = reader.value(json.normSize, 0); + $.alpha = reader.value(json.alpha, 0); + $.beta = reader.value(json.beta, 0); + $.k = reader.value(json.k, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.MeanLayer = class MeanLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MeanLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.MeanDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MeanLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.MeanDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.MeanDescriptor = class MeanDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MeanDescriptor(); + $.axis = reader.typedArray(position, 4, Uint32Array); + $.keepDims = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MeanDescriptor(); + $.axis = reader.typedArray(json.axis, Uint32Array); + $.keepDims = reader.value(json.keepDims, false); + return $; + } +}; + +$root.armnnSerializer.PadLayer = class PadLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.PadLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.PadDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.PadLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.PadDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.PadDescriptor = class PadDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.PadDescriptor(); + $.padList = reader.typedArray(position, 4, Uint32Array); + $.padValue = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.PadDescriptor(); + $.padList = reader.typedArray(json.padList, Uint32Array); + $.padValue = reader.value(json.padValue, 0); + return $; + } +}; + +$root.armnnSerializer.RsqrtLayer = class RsqrtLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.RsqrtLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.RsqrtLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.BatchNormalizationLayer = class BatchNormalizationLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.BatchNormalizationLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.BatchNormalizationDescriptor.decode); + $.mean = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.variance = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + $.beta = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode); + $.gamma = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.BatchNormalizationLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.BatchNormalizationDescriptor.decodeText); + $.mean = reader.object(json.mean, $root.armnnSerializer.ConstTensor.decodeText); + $.variance = reader.object(json.variance, $root.armnnSerializer.ConstTensor.decodeText); + $.beta = reader.object(json.beta, $root.armnnSerializer.ConstTensor.decodeText); + $.gamma = reader.object(json.gamma, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.BatchNormalizationDescriptor = class BatchNormalizationDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.BatchNormalizationDescriptor(); + $.eps = reader.float32_(position, 4, 0); + $.dataLayout = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.BatchNormalizationDescriptor(); + $.eps = reader.value(json.eps, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.ResizeBilinearLayer = class ResizeBilinearLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ResizeBilinearLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ResizeBilinearDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ResizeBilinearLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ResizeBilinearDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ResizeBilinearDescriptor = class ResizeBilinearDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ResizeBilinearDescriptor(); + $.targetWidth = reader.uint32_(position, 4, 0); + $.targetHeight = reader.uint32_(position, 6, 0); + $.dataLayout = reader.int8_(position, 8, 0); + $.alignCorners = reader.bool_(position, 10, false); + $.halfPixelCenters = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ResizeBilinearDescriptor(); + $.targetWidth = reader.value(json.targetWidth, 0); + $.targetHeight = reader.value(json.targetHeight, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + $.alignCorners = reader.value(json.alignCorners, false); + $.halfPixelCenters = reader.value(json.halfPixelCenters, false); + return $; + } +}; + +$root.armnnSerializer.SliceLayer = class SliceLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SliceLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.SliceDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SliceLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SliceDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.SliceDescriptor = class SliceDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SliceDescriptor(); + $.begin = reader.typedArray(position, 4, Uint32Array); + $.size = reader.typedArray(position, 6, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SliceDescriptor(); + $.begin = reader.typedArray(json.begin, Uint32Array); + $.size = reader.typedArray(json.size, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.StridedSliceLayer = class StridedSliceLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StridedSliceLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.StridedSliceDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StridedSliceLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StridedSliceDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.StridedSliceDescriptor = class StridedSliceDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StridedSliceDescriptor(); + $.begin = reader.typedArray(position, 4, Int32Array); + $.end = reader.typedArray(position, 6, Int32Array); + $.stride = reader.typedArray(position, 8, Int32Array); + $.beginMask = reader.int32_(position, 10, 0); + $.endMask = reader.int32_(position, 12, 0); + $.shrinkAxisMask = reader.int32_(position, 14, 0); + $.ellipsisMask = reader.int32_(position, 16, 0); + $.newAxisMask = reader.int32_(position, 18, 0); + $.dataLayout = reader.int8_(position, 20, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StridedSliceDescriptor(); + $.begin = reader.typedArray(json.begin, Int32Array); + $.end = reader.typedArray(json.end, Int32Array); + $.stride = reader.typedArray(json.stride, Int32Array); + $.beginMask = reader.value(json.beginMask, 0); + $.endMask = reader.value(json.endMask, 0); + $.shrinkAxisMask = reader.value(json.shrinkAxisMask, 0); + $.ellipsisMask = reader.value(json.ellipsisMask, 0); + $.newAxisMask = reader.value(json.newAxisMask, 0); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.ConcatLayer = class ConcatLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ConcatLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.OriginsDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ConcatLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.OriginsDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.MergerLayer = class MergerLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MergerLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.OriginsDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MergerLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.OriginsDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.UintVector = class UintVector { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.UintVector(); + $.data = reader.typedArray(position, 4, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.UintVector(); + $.data = reader.typedArray(json.data, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.OriginsDescriptor = class OriginsDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.OriginsDescriptor(); + $.concatAxis = reader.uint32_(position, 4, 0); + $.numViews = reader.uint32_(position, 6, 0); + $.numDimensions = reader.uint32_(position, 8, 0); + $.viewOrigins = reader.tableArray(position, 10, $root.armnnSerializer.UintVector.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.OriginsDescriptor(); + $.concatAxis = reader.value(json.concatAxis, 0); + $.numViews = reader.value(json.numViews, 0); + $.numDimensions = reader.value(json.numDimensions, 0); + $.viewOrigins = reader.objectArray(json.viewOrigins, $root.armnnSerializer.UintVector.decodeText); + return $; + } +}; + +$root.armnnSerializer.ViewsDescriptor = class ViewsDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ViewsDescriptor(); + $.origins = reader.table(position, 4, $root.armnnSerializer.OriginsDescriptor.decode); + $.viewSizes = reader.tableArray(position, 6, $root.armnnSerializer.UintVector.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ViewsDescriptor(); + $.origins = reader.object(json.origins, $root.armnnSerializer.OriginsDescriptor.decodeText); + $.viewSizes = reader.objectArray(json.viewSizes, $root.armnnSerializer.UintVector.decodeText); + return $; + } +}; + +$root.armnnSerializer.SplitterLayer = class SplitterLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SplitterLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ViewsDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SplitterLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ViewsDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.DetectionPostProcessLayer = class DetectionPostProcessLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DetectionPostProcessLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.DetectionPostProcessDescriptor.decode); + $.anchors = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DetectionPostProcessLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DetectionPostProcessDescriptor.decodeText); + $.anchors = reader.object(json.anchors, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.DetectionPostProcessDescriptor = class DetectionPostProcessDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DetectionPostProcessDescriptor(); + $.maxDetections = reader.uint32_(position, 4, 0); + $.maxClassesPerDetection = reader.uint32_(position, 6, 0); + $.detectionsPerClass = reader.uint32_(position, 8, 0); + $.nmsScoreThreshold = reader.float32_(position, 10, 0); + $.nmsIouThreshold = reader.float32_(position, 12, 0); + $.numClasses = reader.uint32_(position, 14, 0); + $.useRegularNms = reader.bool_(position, 16, false); + $.scaleX = reader.float32_(position, 18, 0); + $.scaleY = reader.float32_(position, 20, 0); + $.scaleW = reader.float32_(position, 22, 0); + $.scaleH = reader.float32_(position, 24, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DetectionPostProcessDescriptor(); + $.maxDetections = reader.value(json.maxDetections, 0); + $.maxClassesPerDetection = reader.value(json.maxClassesPerDetection, 0); + $.detectionsPerClass = reader.value(json.detectionsPerClass, 0); + $.nmsScoreThreshold = reader.value(json.nmsScoreThreshold, 0); + $.nmsIouThreshold = reader.value(json.nmsIouThreshold, 0); + $.numClasses = reader.value(json.numClasses, 0); + $.useRegularNms = reader.value(json.useRegularNms, false); + $.scaleX = reader.value(json.scaleX, 0); + $.scaleY = reader.value(json.scaleY, 0); + $.scaleW = reader.value(json.scaleW, 0); + $.scaleH = reader.value(json.scaleH, 0); + return $; + } +}; + +$root.armnnSerializer.LstmInputParams = class LstmInputParams { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LstmInputParams(); + $.inputToForgetWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode); + $.inputToCellWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode); + $.inputToOutputWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToForgetWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToCellWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToOutputWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode); + $.forgetGateBias = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode); + $.cellBias = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode); + $.outputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode); + $.inputToInputWeights = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToInputWeights = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode); + $.cellToInputWeights = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode); + $.inputGateBias = reader.table(position, 28, $root.armnnSerializer.ConstTensor.decode); + $.projectionWeights = reader.table(position, 30, $root.armnnSerializer.ConstTensor.decode); + $.projectionBias = reader.table(position, 32, $root.armnnSerializer.ConstTensor.decode); + $.cellToForgetWeights = reader.table(position, 34, $root.armnnSerializer.ConstTensor.decode); + $.cellToOutputWeights = reader.table(position, 36, $root.armnnSerializer.ConstTensor.decode); + $.inputLayerNormWeights = reader.table(position, 38, $root.armnnSerializer.ConstTensor.decode); + $.forgetLayerNormWeights = reader.table(position, 40, $root.armnnSerializer.ConstTensor.decode); + $.cellLayerNormWeights = reader.table(position, 42, $root.armnnSerializer.ConstTensor.decode); + $.outputLayerNormWeights = reader.table(position, 44, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LstmInputParams(); + $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText); + $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToInputWeights = reader.object(json.cellToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.projectionWeights = reader.object(json.projectionWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.projectionBias = reader.object(json.projectionBias, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToForgetWeights = reader.object(json.cellToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToOutputWeights = reader.object(json.cellToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputLayerNormWeights = reader.object(json.inputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.forgetLayerNormWeights = reader.object(json.forgetLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellLayerNormWeights = reader.object(json.cellLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.outputLayerNormWeights = reader.object(json.outputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.LstmDescriptor = class LstmDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LstmDescriptor(); + $.activationFunc = reader.uint32_(position, 4, 0); + $.clippingThresCell = reader.float32_(position, 6, 0); + $.clippingThresProj = reader.float32_(position, 8, 0); + $.cifgEnabled = reader.bool_(position, 10, true); + $.peepholeEnabled = reader.bool_(position, 12, false); + $.projectionEnabled = reader.bool_(position, 14, false); + $.layerNormEnabled = reader.bool_(position, 16, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LstmDescriptor(); + $.activationFunc = reader.value(json.activationFunc, 0); + $.clippingThresCell = reader.value(json.clippingThresCell, 0); + $.clippingThresProj = reader.value(json.clippingThresProj, 0); + $.cifgEnabled = reader.value(json.cifgEnabled, true); + $.peepholeEnabled = reader.value(json.peepholeEnabled, false); + $.projectionEnabled = reader.value(json.projectionEnabled, false); + $.layerNormEnabled = reader.value(json.layerNormEnabled, false); + return $; + } +}; + +$root.armnnSerializer.LstmLayer = class LstmLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.LstmLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.LstmDescriptor.decode); + $.inputParams = reader.table(position, 8, $root.armnnSerializer.LstmInputParams.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.LstmLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.LstmDescriptor.decodeText); + $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.LstmInputParams.decodeText); + return $; + } +}; + +$root.armnnSerializer.QLstmInputParams = class QLstmInputParams { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QLstmInputParams(); + $.inputToForgetWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode); + $.inputToCellWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode); + $.inputToOutputWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToForgetWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToCellWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToOutputWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode); + $.forgetGateBias = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode); + $.cellBias = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode); + $.outputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode); + $.inputToInputWeights = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToInputWeights = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode); + $.inputGateBias = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode); + $.projectionWeights = reader.table(position, 28, $root.armnnSerializer.ConstTensor.decode); + $.projectionBias = reader.table(position, 30, $root.armnnSerializer.ConstTensor.decode); + $.cellToInputWeights = reader.table(position, 32, $root.armnnSerializer.ConstTensor.decode); + $.cellToForgetWeights = reader.table(position, 34, $root.armnnSerializer.ConstTensor.decode); + $.cellToOutputWeights = reader.table(position, 36, $root.armnnSerializer.ConstTensor.decode); + $.inputLayerNormWeights = reader.table(position, 38, $root.armnnSerializer.ConstTensor.decode); + $.forgetLayerNormWeights = reader.table(position, 40, $root.armnnSerializer.ConstTensor.decode); + $.cellLayerNormWeights = reader.table(position, 42, $root.armnnSerializer.ConstTensor.decode); + $.outputLayerNormWeights = reader.table(position, 44, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QLstmInputParams(); + $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText); + $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.projectionWeights = reader.object(json.projectionWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.projectionBias = reader.object(json.projectionBias, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToInputWeights = reader.object(json.cellToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToForgetWeights = reader.object(json.cellToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellToOutputWeights = reader.object(json.cellToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputLayerNormWeights = reader.object(json.inputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.forgetLayerNormWeights = reader.object(json.forgetLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.cellLayerNormWeights = reader.object(json.cellLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.outputLayerNormWeights = reader.object(json.outputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.QLstmDescriptor = class QLstmDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QLstmDescriptor(); + $.cifgEnabled = reader.bool_(position, 4, true); + $.peepholeEnabled = reader.bool_(position, 6, false); + $.projectionEnabled = reader.bool_(position, 8, false); + $.layerNormEnabled = reader.bool_(position, 10, false); + $.cellClip = reader.float32_(position, 12, 0); + $.projectionClip = reader.float32_(position, 14, 0); + $.inputIntermediateScale = reader.float32_(position, 16, 0); + $.forgetIntermediateScale = reader.float32_(position, 18, 0); + $.cellIntermediateScale = reader.float32_(position, 20, 0); + $.outputIntermediateScale = reader.float32_(position, 22, 0); + $.hiddenStateZeroPoint = reader.int32_(position, 24, 0); + $.hiddenStateScale = reader.float32_(position, 26, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QLstmDescriptor(); + $.cifgEnabled = reader.value(json.cifgEnabled, true); + $.peepholeEnabled = reader.value(json.peepholeEnabled, false); + $.projectionEnabled = reader.value(json.projectionEnabled, false); + $.layerNormEnabled = reader.value(json.layerNormEnabled, false); + $.cellClip = reader.value(json.cellClip, 0); + $.projectionClip = reader.value(json.projectionClip, 0); + $.inputIntermediateScale = reader.value(json.inputIntermediateScale, 0); + $.forgetIntermediateScale = reader.value(json.forgetIntermediateScale, 0); + $.cellIntermediateScale = reader.value(json.cellIntermediateScale, 0); + $.outputIntermediateScale = reader.value(json.outputIntermediateScale, 0); + $.hiddenStateZeroPoint = reader.value(json.hiddenStateZeroPoint, 0); + $.hiddenStateScale = reader.value(json.hiddenStateScale, 0); + return $; + } +}; + +$root.armnnSerializer.QLstmLayer = class QLstmLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QLstmLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.QLstmDescriptor.decode); + $.inputParams = reader.table(position, 8, $root.armnnSerializer.QLstmInputParams.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QLstmLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.QLstmDescriptor.decodeText); + $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.QLstmInputParams.decodeText); + return $; + } +}; + +$root.armnnSerializer.QuantizedLstmInputParams = class QuantizedLstmInputParams { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QuantizedLstmInputParams(); + $.inputToInputWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode); + $.inputToForgetWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode); + $.inputToCellWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.inputToOutputWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToInputWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToForgetWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToCellWeights = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode); + $.recurrentToOutputWeights = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode); + $.inputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode); + $.forgetGateBias = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode); + $.cellBias = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode); + $.outputGateBias = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QuantizedLstmInputParams(); + $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText); + $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText); + $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText); + $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.QuantizedLstmLayer = class QuantizedLstmLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.QuantizedLstmLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.inputParams = reader.table(position, 6, $root.armnnSerializer.QuantizedLstmInputParams.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.QuantizedLstmLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.QuantizedLstmInputParams.decodeText); + return $; + } +}; + +$root.armnnSerializer.DequantizeLayer = class DequantizeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.DequantizeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.DequantizeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.MergeLayer = class MergeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.MergeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.MergeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.SwitchLayer = class SwitchLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SwitchLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SwitchLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.PreluLayer = class PreluLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.PreluLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.PreluLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.TransposeConvolution2dLayer = class TransposeConvolution2dLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.TransposeConvolution2dLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.TransposeConvolution2dDescriptor.decode); + $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode); + $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.TransposeConvolution2dLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.TransposeConvolution2dDescriptor.decodeText); + $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText); + $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText); + return $; + } +}; + +$root.armnnSerializer.TransposeConvolution2dDescriptor = class TransposeConvolution2dDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.TransposeConvolution2dDescriptor(); + $.padLeft = reader.uint32_(position, 4, 0); + $.padRight = reader.uint32_(position, 6, 0); + $.padTop = reader.uint32_(position, 8, 0); + $.padBottom = reader.uint32_(position, 10, 0); + $.strideX = reader.uint32_(position, 12, 0); + $.strideY = reader.uint32_(position, 14, 0); + $.biasEnabled = reader.bool_(position, 16, false); + $.dataLayout = reader.int8_(position, 18, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.TransposeConvolution2dDescriptor(); + $.padLeft = reader.value(json.padLeft, 0); + $.padRight = reader.value(json.padRight, 0); + $.padTop = reader.value(json.padTop, 0); + $.padBottom = reader.value(json.padBottom, 0); + $.strideX = reader.value(json.strideX, 0); + $.strideY = reader.value(json.strideY, 0); + $.biasEnabled = reader.value(json.biasEnabled, false); + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + return $; + } +}; + +$root.armnnSerializer.TransposeLayer = class TransposeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.TransposeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.TransposeDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.TransposeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.TransposeDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.TransposeDescriptor = class TransposeDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.TransposeDescriptor(); + $.dimMappings = reader.typedArray(position, 4, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.TransposeDescriptor(); + $.dimMappings = reader.typedArray(json.dimMappings, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.ResizeLayer = class ResizeLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ResizeLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.ResizeDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ResizeLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ResizeDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.ResizeDescriptor = class ResizeDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.ResizeDescriptor(); + $.targetHeight = reader.uint32_(position, 4, 0); + $.targetWidth = reader.uint32_(position, 6, 0); + $.method = reader.int8_(position, 8, 0); + $.dataLayout = reader.int8_(position, 10, 0); + $.alignCorners = reader.bool_(position, 12, false); + $.halfPixelCenters = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.ResizeDescriptor(); + $.targetHeight = reader.value(json.targetHeight, 0); + $.targetWidth = reader.value(json.targetWidth, 0); + $.method = $root.armnnSerializer.ResizeMethod[json.method]; + $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout]; + $.alignCorners = reader.value(json.alignCorners, false); + $.halfPixelCenters = reader.value(json.halfPixelCenters, false); + return $; + } +}; + +$root.armnnSerializer.StackLayer = class StackLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StackLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.StackDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StackLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StackDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.StackDescriptor = class StackDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StackDescriptor(); + $.axis = reader.uint32_(position, 4, 0); + $.numInputs = reader.uint32_(position, 6, 0); + $.inputShape = reader.typedArray(position, 8, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StackDescriptor(); + $.axis = reader.value(json.axis, 0); + $.numInputs = reader.value(json.numInputs, 0); + $.inputShape = reader.typedArray(json.inputShape, Uint32Array); + return $; + } +}; + +$root.armnnSerializer.StandInDescriptor = class StandInDescriptor { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StandInDescriptor(); + $.numInputs = reader.uint32_(position, 4, 0); + $.numOutputs = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StandInDescriptor(); + $.numInputs = reader.value(json.numInputs, 0); + $.numOutputs = reader.value(json.numOutputs, 0); + return $; + } +}; + +$root.armnnSerializer.StandInLayer = class StandInLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.StandInLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + $.descriptor = reader.table(position, 6, $root.armnnSerializer.StandInDescriptor.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.StandInLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StandInDescriptor.decodeText); + return $; + } +}; + +$root.armnnSerializer.RankLayer = class RankLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.RankLayer(); + $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.RankLayer(); + $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText); + return $; + } +}; + +$root.armnnSerializer.Layer = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.armnnSerializer.ActivationLayer.decode(reader, position); + case 2: return $root.armnnSerializer.AdditionLayer.decode(reader, position); + case 3: return $root.armnnSerializer.BatchToSpaceNdLayer.decode(reader, position); + case 4: return $root.armnnSerializer.BatchNormalizationLayer.decode(reader, position); + case 5: return $root.armnnSerializer.ConstantLayer.decode(reader, position); + case 6: return $root.armnnSerializer.Convolution2dLayer.decode(reader, position); + case 7: return $root.armnnSerializer.DepthwiseConvolution2dLayer.decode(reader, position); + case 8: return $root.armnnSerializer.FullyConnectedLayer.decode(reader, position); + case 9: return $root.armnnSerializer.InputLayer.decode(reader, position); + case 10: return $root.armnnSerializer.MultiplicationLayer.decode(reader, position); + case 11: return $root.armnnSerializer.OutputLayer.decode(reader, position); + case 12: return $root.armnnSerializer.PermuteLayer.decode(reader, position); + case 13: return $root.armnnSerializer.Pooling2dLayer.decode(reader, position); + case 14: return $root.armnnSerializer.ReshapeLayer.decode(reader, position); + case 15: return $root.armnnSerializer.SoftmaxLayer.decode(reader, position); + case 16: return $root.armnnSerializer.SpaceToBatchNdLayer.decode(reader, position); + case 17: return $root.armnnSerializer.DivisionLayer.decode(reader, position); + case 18: return $root.armnnSerializer.MinimumLayer.decode(reader, position); + case 19: return $root.armnnSerializer.EqualLayer.decode(reader, position); + case 20: return $root.armnnSerializer.MaximumLayer.decode(reader, position); + case 21: return $root.armnnSerializer.NormalizationLayer.decode(reader, position); + case 22: return $root.armnnSerializer.PadLayer.decode(reader, position); + case 23: return $root.armnnSerializer.RsqrtLayer.decode(reader, position); + case 24: return $root.armnnSerializer.FloorLayer.decode(reader, position); + case 25: return $root.armnnSerializer.GreaterLayer.decode(reader, position); + case 26: return $root.armnnSerializer.ResizeBilinearLayer.decode(reader, position); + case 27: return $root.armnnSerializer.SubtractionLayer.decode(reader, position); + case 28: return $root.armnnSerializer.StridedSliceLayer.decode(reader, position); + case 29: return $root.armnnSerializer.GatherLayer.decode(reader, position); + case 30: return $root.armnnSerializer.MeanLayer.decode(reader, position); + case 31: return $root.armnnSerializer.MergerLayer.decode(reader, position); + case 32: return $root.armnnSerializer.L2NormalizationLayer.decode(reader, position); + case 33: return $root.armnnSerializer.SplitterLayer.decode(reader, position); + case 34: return $root.armnnSerializer.DetectionPostProcessLayer.decode(reader, position); + case 35: return $root.armnnSerializer.LstmLayer.decode(reader, position); + case 36: return $root.armnnSerializer.QuantizedLstmLayer.decode(reader, position); + case 37: return $root.armnnSerializer.QuantizeLayer.decode(reader, position); + case 38: return $root.armnnSerializer.DequantizeLayer.decode(reader, position); + case 39: return $root.armnnSerializer.MergeLayer.decode(reader, position); + case 40: return $root.armnnSerializer.SwitchLayer.decode(reader, position); + case 41: return $root.armnnSerializer.ConcatLayer.decode(reader, position); + case 42: return $root.armnnSerializer.SpaceToDepthLayer.decode(reader, position); + case 43: return $root.armnnSerializer.PreluLayer.decode(reader, position); + case 44: return $root.armnnSerializer.TransposeConvolution2dLayer.decode(reader, position); + case 45: return $root.armnnSerializer.ResizeLayer.decode(reader, position); + case 46: return $root.armnnSerializer.StackLayer.decode(reader, position); + case 47: return $root.armnnSerializer.AbsLayer.decode(reader, position); + case 48: return $root.armnnSerializer.ArgMinMaxLayer.decode(reader, position); + case 49: return $root.armnnSerializer.SliceLayer.decode(reader, position); + case 50: return $root.armnnSerializer.DepthToSpaceLayer.decode(reader, position); + case 51: return $root.armnnSerializer.InstanceNormalizationLayer.decode(reader, position); + case 52: return $root.armnnSerializer.LogSoftmaxLayer.decode(reader, position); + case 53: return $root.armnnSerializer.ComparisonLayer.decode(reader, position); + case 54: return $root.armnnSerializer.StandInLayer.decode(reader, position); + case 55: return $root.armnnSerializer.ElementwiseUnaryLayer.decode(reader, position); + case 56: return $root.armnnSerializer.TransposeLayer.decode(reader, position); + case 57: return $root.armnnSerializer.QLstmLayer.decode(reader, position); + case 58: return $root.armnnSerializer.FillLayer.decode(reader, position); + case 59: return $root.armnnSerializer.RankLayer.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'ActivationLayer': return $root.armnnSerializer.ActivationLayer.decodeText(reader, json); + case 'AdditionLayer': return $root.armnnSerializer.AdditionLayer.decodeText(reader, json); + case 'BatchToSpaceNdLayer': return $root.armnnSerializer.BatchToSpaceNdLayer.decodeText(reader, json); + case 'BatchNormalizationLayer': return $root.armnnSerializer.BatchNormalizationLayer.decodeText(reader, json); + case 'ConstantLayer': return $root.armnnSerializer.ConstantLayer.decodeText(reader, json); + case 'Convolution2dLayer': return $root.armnnSerializer.Convolution2dLayer.decodeText(reader, json); + case 'DepthwiseConvolution2dLayer': return $root.armnnSerializer.DepthwiseConvolution2dLayer.decodeText(reader, json); + case 'FullyConnectedLayer': return $root.armnnSerializer.FullyConnectedLayer.decodeText(reader, json); + case 'InputLayer': return $root.armnnSerializer.InputLayer.decodeText(reader, json); + case 'MultiplicationLayer': return $root.armnnSerializer.MultiplicationLayer.decodeText(reader, json); + case 'OutputLayer': return $root.armnnSerializer.OutputLayer.decodeText(reader, json); + case 'PermuteLayer': return $root.armnnSerializer.PermuteLayer.decodeText(reader, json); + case 'Pooling2dLayer': return $root.armnnSerializer.Pooling2dLayer.decodeText(reader, json); + case 'ReshapeLayer': return $root.armnnSerializer.ReshapeLayer.decodeText(reader, json); + case 'SoftmaxLayer': return $root.armnnSerializer.SoftmaxLayer.decodeText(reader, json); + case 'SpaceToBatchNdLayer': return $root.armnnSerializer.SpaceToBatchNdLayer.decodeText(reader, json); + case 'DivisionLayer': return $root.armnnSerializer.DivisionLayer.decodeText(reader, json); + case 'MinimumLayer': return $root.armnnSerializer.MinimumLayer.decodeText(reader, json); + case 'EqualLayer': return $root.armnnSerializer.EqualLayer.decodeText(reader, json); + case 'MaximumLayer': return $root.armnnSerializer.MaximumLayer.decodeText(reader, json); + case 'NormalizationLayer': return $root.armnnSerializer.NormalizationLayer.decodeText(reader, json); + case 'PadLayer': return $root.armnnSerializer.PadLayer.decodeText(reader, json); + case 'RsqrtLayer': return $root.armnnSerializer.RsqrtLayer.decodeText(reader, json); + case 'FloorLayer': return $root.armnnSerializer.FloorLayer.decodeText(reader, json); + case 'GreaterLayer': return $root.armnnSerializer.GreaterLayer.decodeText(reader, json); + case 'ResizeBilinearLayer': return $root.armnnSerializer.ResizeBilinearLayer.decodeText(reader, json); + case 'SubtractionLayer': return $root.armnnSerializer.SubtractionLayer.decodeText(reader, json); + case 'StridedSliceLayer': return $root.armnnSerializer.StridedSliceLayer.decodeText(reader, json); + case 'GatherLayer': return $root.armnnSerializer.GatherLayer.decodeText(reader, json); + case 'MeanLayer': return $root.armnnSerializer.MeanLayer.decodeText(reader, json); + case 'MergerLayer': return $root.armnnSerializer.MergerLayer.decodeText(reader, json); + case 'L2NormalizationLayer': return $root.armnnSerializer.L2NormalizationLayer.decodeText(reader, json); + case 'SplitterLayer': return $root.armnnSerializer.SplitterLayer.decodeText(reader, json); + case 'DetectionPostProcessLayer': return $root.armnnSerializer.DetectionPostProcessLayer.decodeText(reader, json); + case 'LstmLayer': return $root.armnnSerializer.LstmLayer.decodeText(reader, json); + case 'QuantizedLstmLayer': return $root.armnnSerializer.QuantizedLstmLayer.decodeText(reader, json); + case 'QuantizeLayer': return $root.armnnSerializer.QuantizeLayer.decodeText(reader, json); + case 'DequantizeLayer': return $root.armnnSerializer.DequantizeLayer.decodeText(reader, json); + case 'MergeLayer': return $root.armnnSerializer.MergeLayer.decodeText(reader, json); + case 'SwitchLayer': return $root.armnnSerializer.SwitchLayer.decodeText(reader, json); + case 'ConcatLayer': return $root.armnnSerializer.ConcatLayer.decodeText(reader, json); + case 'SpaceToDepthLayer': return $root.armnnSerializer.SpaceToDepthLayer.decodeText(reader, json); + case 'PreluLayer': return $root.armnnSerializer.PreluLayer.decodeText(reader, json); + case 'TransposeConvolution2dLayer': return $root.armnnSerializer.TransposeConvolution2dLayer.decodeText(reader, json); + case 'ResizeLayer': return $root.armnnSerializer.ResizeLayer.decodeText(reader, json); + case 'StackLayer': return $root.armnnSerializer.StackLayer.decodeText(reader, json); + case 'AbsLayer': return $root.armnnSerializer.AbsLayer.decodeText(reader, json); + case 'ArgMinMaxLayer': return $root.armnnSerializer.ArgMinMaxLayer.decodeText(reader, json); + case 'SliceLayer': return $root.armnnSerializer.SliceLayer.decodeText(reader, json); + case 'DepthToSpaceLayer': return $root.armnnSerializer.DepthToSpaceLayer.decodeText(reader, json); + case 'InstanceNormalizationLayer': return $root.armnnSerializer.InstanceNormalizationLayer.decodeText(reader, json); + case 'LogSoftmaxLayer': return $root.armnnSerializer.LogSoftmaxLayer.decodeText(reader, json); + case 'ComparisonLayer': return $root.armnnSerializer.ComparisonLayer.decodeText(reader, json); + case 'StandInLayer': return $root.armnnSerializer.StandInLayer.decodeText(reader, json); + case 'ElementwiseUnaryLayer': return $root.armnnSerializer.ElementwiseUnaryLayer.decodeText(reader, json); + case 'TransposeLayer': return $root.armnnSerializer.TransposeLayer.decodeText(reader, json); + case 'QLstmLayer': return $root.armnnSerializer.QLstmLayer.decodeText(reader, json); + case 'FillLayer': return $root.armnnSerializer.FillLayer.decodeText(reader, json); + case 'RankLayer': return $root.armnnSerializer.RankLayer.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.armnnSerializer.AnyLayer = class AnyLayer { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.AnyLayer(); + $.layer = reader.union(position, 4, $root.armnnSerializer.Layer.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.AnyLayer(); + $.layer = $root.armnnSerializer.Layer.decodeText(reader, json.layer, json.layer_type); + return $; + } +}; + +$root.armnnSerializer.FeatureCompatibilityVersions = class FeatureCompatibilityVersions { + + static decode(reader, position) { + const $ = new $root.armnnSerializer.FeatureCompatibilityVersions(); + $.bindingIdsScheme = reader.uint32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.FeatureCompatibilityVersions(); + $.bindingIdsScheme = reader.value(json.bindingIdsScheme, 0); + return $; + } +}; + +$root.armnnSerializer.SerializedGraph = class SerializedGraph { + + static identifier(reader) { + return reader.identifier === 'ARMN'; + } + + static create(reader) { + return $root.armnnSerializer.SerializedGraph.decode(reader, reader.root); + } + + static createText(reader) { + return $root.armnnSerializer.SerializedGraph.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.armnnSerializer.SerializedGraph(); + $.layers = reader.tableArray(position, 4, $root.armnnSerializer.AnyLayer.decode); + $.inputIds = reader.typedArray(position, 6, Int32Array); + $.outputIds = reader.typedArray(position, 8, Int32Array); + $.featureVersions = reader.table(position, 10, $root.armnnSerializer.FeatureCompatibilityVersions.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.armnnSerializer.SerializedGraph(); + $.layers = reader.objectArray(json.layers, $root.armnnSerializer.AnyLayer.decodeText); + $.inputIds = reader.typedArray(json.inputIds, Int32Array); + $.outputIds = reader.typedArray(json.outputIds, Int32Array); + $.featureVersions = reader.object(json.featureVersions, $root.armnnSerializer.FeatureCompatibilityVersions.decodeText); + return $; + } +}; diff --git a/armnn.js b/armnn.js new file mode 100644 index 00000000000..e4da7025000 --- /dev/null +++ b/armnn.js @@ -0,0 +1,320 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const armnn = {}; + +armnn.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + if (stream && extension === 'armnn') { + return { name: 'armnn.flatbuffers', value: stream }; + } + if (extension === 'json') { + const obj = context.peek('json'); + if (obj && obj.layers && obj.inputIds && obj.outputIds) { + return { name: 'armnn.flatbuffers.json', value: obj }; + } + } + return undefined; + } + + async open(context, target) { + await context.require('./armnn-schema'); + armnn.schema = flatbuffers.get('armnn').armnnSerializer; + let model = null; + switch (target.name) { + case 'armnn.flatbuffers': { + try { + const stream = target.value; + const reader = flatbuffers.BinaryReader.open(stream); + model = armnn.schema.SerializedGraph.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new armnn.Error(`File format is not armnn.SerializedGraph (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'armnn.flatbuffers.json': { + try { + const obj = target.value; + const reader = flatbuffers.TextReader.open(obj); + model = armnn.schema.SerializedGraph.createText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new armnn.Error(`File text format is not armnn.SerializedGraph (${message.replace(/\.$/, '')}).`); + } + break; + } + default: { + throw new armnn.Error(`Unsupported Arm NN '${target}'.`); + } + } + const metadata = await context.metadata('armnn-metadata.json'); + return new armnn.Model(metadata, model); + } +}; + +armnn.Model = class { + + constructor(metadata, model) { + this.format = 'Arm NN'; + this.graphs = [ new armnn.Graph(metadata, model) ]; + } +}; + +armnn.Graph = class { + + constructor(metadata, graph) { + this.name = ''; + this.nodes = []; + this.inputs = []; + this.outputs = []; + const counts = new Map(); + for (const layer of graph.layers) { + const base = armnn.Node.getBase(layer); + for (const slot of base.inputSlots) { + const name = `${slot.connection.sourceLayerIndex}:${slot.connection.outputSlotIndex}`; + counts.set(name, counts.has(name) ? counts.get(name) + 1 : 1); + } + } + const values = new Map(); + const value = (layerIndex, slotIndex, tensor) => { + const name = `${layerIndex}:${slotIndex}`; + if (!values.has(name)) { + const layer = graph.layers[layerIndex]; + const base = layerIndex < graph.layers.length ? armnn.Node.getBase(layer) : null; + const tensorInfo = base && slotIndex < base.outputSlots.length ? base.outputSlots[slotIndex].tensorInfo : null; + values.set(name, new armnn.Value(name, tensorInfo, tensor)); + } + return values.get(name); + }; + const layers = graph.layers.filter((layer) => { + const base = armnn.Node.getBase(layer); + if (base.layerType == armnn.schema.LayerType.Constant && base.outputSlots.length === 1 && layer.layer.input) { + /* eslint-disable prefer-destructuring */ + const slot = base.outputSlots[0]; + /* eslint-enable prefer-destructuring */ + const name = `${base.index}:${slot.index}`; + if (counts.get(name) === 1) { + const tensor = new armnn.Tensor(layer.layer.input, 'Constant'); + value(base.index, slot.index, tensor); + return false; + } + } + return true; + }); + for (const layer of layers) { + const base = armnn.Node.getBase(layer); + for (const slot of base.inputSlots) { + value(slot.connection.sourceLayerIndex, slot.connection.outputSlotIndex); + } + } + for (const layer of layers) { + const base = armnn.Node.getBase(layer); + switch (base.layerType) { + case armnn.schema.LayerType.Input: { + const name = base ? base.layerName : ''; + for (const slot of base.outputSlots) { + const argument = new armnn.Argument(name, [ value(base.index, slot.index) ]); + this.inputs.push(argument); + } + break; + } + case armnn.schema.LayerType.Output: { + const base = armnn.Node.getBase(layer); + const name = base ? base.layerName : ''; + for (const slot of base.inputSlots) { + const argument = new armnn.Argument(name, [ value(slot.connection.sourceLayerIndex, slot.connection.outputSlotIndex) ]); + this.outputs.push(argument); + } + break; + } + default: + this.nodes.push(new armnn.Node(metadata, layer, value)); + break; + } + } + } +}; + +armnn.Node = class { + + constructor(metadata, layer, value) { + const type = layer.layer.constructor.name; + this.type = Object.assign({}, metadata.type(type) || { name: type }); + this.type.name = this.type.name.replace(/Layer$/, ''); + this.name = ''; + this.outputs = []; + this.inputs = []; + this.attributes = []; + const inputSchemas = (this.type && this.type.inputs) ? [...this.type.inputs] : [ { name: 'input' } ]; + const outputSchemas = (this.type && this.type.outputs) ? [...this.type.outputs] : [ { name: 'output' } ]; + const base = armnn.Node.getBase(layer); + if (base) { + this.name = base.layerName; + const inputs = [...base.inputSlots]; + while (inputs.length > 0) { + const inputSchema = inputSchemas.length > 0 ? inputSchemas.shift() : { name: '?' }; + const count = inputSchema.list ? inputs.length : 1; + const argument = new armnn.Argument(inputSchema.name, inputs.splice(0, count).map((inputSlot) => { + return value(inputSlot.connection.sourceLayerIndex, inputSlot.connection.outputSlotIndex); + })); + this.inputs.push(argument); + } + const outputs = [...base.outputSlots]; + while (outputs.length > 0) { + const outputSchema = outputSchemas.length > 0 ? outputSchemas.shift() : { name: '?' }; + const count = outputSchema.list ? outputs.length : 1; + this.outputs.push(new armnn.Argument(outputSchema.name, outputs.splice(0, count).map((outputSlot) => { + return value(base.index, outputSlot.index); + }))); + } + } + if (layer.layer) { + if (layer.layer.descriptor && this.type.attributes) { + for (const [name, value] of Object.entries(layer.layer.descriptor)) { + const attribute = new armnn.Attribute(metadata.attribute(type, name), name, value); + this.attributes.push(attribute); + } + } + for (const [name, tensor] of Object.entries(layer.layer).filter(([, value]) => value instanceof armnn.schema.ConstTensor)) { + const value = new armnn.Value('', tensor.info, new armnn.Tensor(tensor)); + const argument = new armnn.Argument(name, [ value ]); + this.inputs.push(argument); + } + } + } + + static getBase(layer) { + return layer.layer.base.base ? layer.layer.base.base : layer.layer.base; + } + + static makeKey(layer_id, index) { + return `${layer_id}_${index}`; + } +}; + +armnn.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.type = metadata ? metadata.type : null; + this.value = ArrayBuffer.isView(value) ? Array.from(value) : value; + if (armnn.schema[this.type]) { + this.value = armnn.Utility.enum(this.type, this.value); + } + } +}; + +armnn.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +armnn.Value = class { + + constructor(name, tensorInfo, initializer) { + if (typeof name !== 'string') { + throw new armnn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = new armnn.TensorType(tensorInfo); + this.initializer = initializer; + if (tensorInfo.quantizationScale !== 0 || + tensorInfo.quantizationOffset !== 0 || + tensorInfo.quantizationScales.length > 0 || + tensorInfo.quantizationDim !== 0) { + this.quantization = { + type: 'linear', + dimension: tensorInfo.quantizationDim, + scale: [ tensorInfo.quantizationScale ], + offset: [ tensorInfo.quantizationOffset ] + }; + } + } +}; + +armnn.Tensor = class { + + constructor(tensor, category) { + this.type = new armnn.TensorType(tensor.info); + this.category = category || ''; + const data = tensor.data.data.slice(0); + this.values = new Uint8Array(data.buffer, data.byteOffset, data.byteLength); + } +}; + +armnn.TensorType = class { + + constructor(tensorInfo) { + const dataType = tensorInfo.dataType; + switch (dataType) { + case 0: this.dataType = 'float16'; break; + case 1: this.dataType = 'float32'; break; + case 2: this.dataType = 'quint8'; break; // QuantisedAsymm8 + case 3: this.dataType = 'int32'; break; + case 4: this.dataType = 'boolean'; break; + case 5: this.dataType = 'qint16'; break; // QuantisedSymm16 + case 6: this.dataType = 'quint8'; break; // QAsymmU8 + case 7: this.dataType = 'qint16'; break; // QSymmS16 + case 8: this.dataType = 'qint8'; break; // QAsymmS8 + case 9: this.dataType = 'qint8'; break; // QSymmS8 + default: + throw new armnn.Error(`Unsupported data type '${JSON.stringify(dataType)}'.`); + } + this.shape = new armnn.TensorShape(tensorInfo.dimensions); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +armnn.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.from(dimensions); + } + + toString() { + if (!this.dimensions || this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +armnn.Utility = class { + + static enum(name, value) { + const type = name && armnn.schema ? armnn.schema[name] : undefined; + if (type) { + armnn.Utility._enums = armnn.Utility._enums || new Map(); + if (!armnn.Utility._enums.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + armnn.Utility._enums.set(name, entries); + } + const entries = armnn.Utility._enums.get(name); + if (entries.has(value)) { + return entries.get(value); + } + } + return value; + } +}; + +armnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Arm NN model.'; + } +}; + +export const ModelFactory = armnn.ModelFactory; diff --git a/barracuda.js b/barracuda.js new file mode 100755 index 00000000000..7cebabcd888 --- /dev/null +++ b/barracuda.js @@ -0,0 +1,425 @@ + +// Experimental + +import * as base from './base.js'; + +const barracuda = {}; + +barracuda.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream && stream.length > 12) { + const buffer = stream.peek(12); + if (buffer[0] <= 0x20 && buffer.subarray(1, 8).every((value) => value == 0x00)) { + return 'barracuda'; + } + } + return null; + } + + async open(context) { + const metadata = barracuda.Metadata.open(); + const model = new barracuda.NNModel(context.stream.peek()); + return new barracuda.Model(metadata, model); + } +}; + +barracuda.Model = class { + + constructor(metadata, model) { + const version = model.version.toString(); + this.format = `Barracuda v${version}`; + this.graphs = [ new barracuda.Graph(metadata, model) ]; + } +}; + +barracuda.Graph = class { + + constructor(metadata, model) { + this.name = ''; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = new Map(); + values.map = (name, type, tensor) => { + if (!values.has(name)) { + type = tensor ? tensor.type : type; + values.set(name, new barracuda.Value(name, type, tensor)); + } else if (type || tensor) { + throw new barracuda.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const layers = []; + for (const layer of model.layers) { + if (layer.type !== 255 || layer.inputs.length > 0) { + layers.push(layer); + } else { + for (const tensor of layer.tensors) { + values.map(tensor.name, null, new barracuda.Tensor(tensor)); + } + } + } + for (const input of model.inputs) { + const shape = new barracuda.TensorShape(input.shape); + const type = new barracuda.TensorType(4, shape); + const argument = new barracuda.Argument(input.name, [ values.map(input.name, type) ]); + this.inputs.push(argument); + } + for (const output of model.outputs) { + const argument = new barracuda.Argument(output, [ values.map(output) ]); + this.outputs.push(argument); + } + for (const layer of layers) { + const node = new barracuda.Node(metadata, layer, null, values); + this.nodes.push(node); + } + } +}; + +barracuda.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +barracuda.Value = class { + + constructor(name, type, initializer) { + this.name = name; + this.type = type || null; + this.initializer = initializer || null; + } +}; + + +barracuda.Node = class { + + constructor(metadata, layer, type, values) { + this.name = layer.name || ''; + this.type = type ? type : metadata.type(layer.type); + this.inputs = []; + this.outputs = []; + this.attributes = []; + const inputs = Array.prototype.slice.call(this.type.inputs || [ 'input' ]); + if (this.type.inputs && this.type.inputs.length === 1 && this.type.inputs[0].name === 'inputs') { + const argument = new barracuda.Argument('inputs', layer.inputs.map((input) => values.map(input))); + this.inputs.push(argument); + } else if (layer.inputs) { + for (let i = 0; i < layer.inputs.length; i++) { + const input = layer.inputs[i]; + const name = inputs.length > 0 ? inputs.shift().name : i.toString(); + const argument = new barracuda.Argument(name, [ values.map(input) ]); + this.inputs.push(argument); + } + } + if (layer.tensors) { + for (let i = 0; i < layer.tensors.length; i++) { + const tensor = layer.tensors[i]; + const initializer = new barracuda.Tensor(tensor); + const name = inputs.length > 0 ? inputs.shift().name : i.toString(); + const argument = new barracuda.Argument(name, [ values.map(tensor.name, initializer.type, initializer) ]); + this.inputs.push(argument); + } + } + if (layer.inputs !== undefined) { + const argument = new barracuda.Argument('output', [ values.map(this.name) ]); + this.outputs.push(argument); + } + if (layer.activation !== undefined && (layer.type === 50 || layer.activation !== 0)) { + const type = barracuda.Activation[layer.activation]; + if (!type) { + throw new barracuda.Error(`Unsupported activation '${layer.activation}'.`); + } + const node = new barracuda.Node(metadata, {}, { name: type, category: 'Activation' }, values); + this.chain = [ node ]; + } + const attribute = (name, type, value, defaultValue) => { + if (value === undefined) { + return; + } + if (Array.isArray(defaultValue) && Array.isArray(value) && value.length == defaultValue.length && value.every((v, i) => v === defaultValue[i])) { + return; + } + if (typeof defaultValue == 'function' && defaultValue(value)) { + return; + } + if (defaultValue === value) { + return; + } + const attribute = new barracuda.Attribute(name, type, value); + this.attributes.push(attribute); + }; + attribute('strides', 'int32[]', layer.strides, []); + attribute('pads', 'int32[]', layer.pads, (value) => Array.isArray(value) && (value.every((v) => v === 0) || value.every((v) => v === -1))); + attribute('size', 'int32[]', layer.pool_size, []); + attribute('alpha', 'float32', layer.alpha, 1); + attribute('beta', 'float32', layer.beta, 0); + attribute('axis', 'int32', layer.axis, -1); + } +}; + +barracuda.Attribute = class { + + constructor(name, type, value) { + this.name = name; + this.type = type; + this.value = value; + } +}; + +barracuda.Tensor = class { + + constructor(tensor) { + this.type = new barracuda.TensorType(tensor.itemsize, new barracuda.TensorShape(tensor.shape)); + this.values = tensor.data; + } +}; + +barracuda.TensorType = class { + + constructor(itemsize, shape) { + switch (itemsize) { + case 4: this.dataType = 'float32'; break; + default: throw new barracuda.Error(`Unsupported data type size '${itemsize}'.`); + } + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +barracuda.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : ''; + } +}; + +barracuda.NNModel = class { + + constructor(buffer) { + // https://github.com/Unity-Technologies/barracuda-release/blob/release/1.3.2/Barracuda/Runtime/Core/Model.cs + const reader = new barracuda.BinaryReader(buffer); + this.version = reader.int32(); + reader.int32(); + this.inputs = new Array(reader.int32()); + for (let i = 0; i < this.inputs.length; i++) { + this.inputs[i] = { + name: reader.string(), + shape: reader.shape() + }; + } + this.outputs = reader.strings(); + this.memories = new Array(reader.int32()); + for (let i = 0; i < this.memories.length; i++) { + this.memories[i] = { + shape: reader.shape(), + in: reader.string(), + out: reader.string() + }; + } + this.layers = new Array(reader.int32()); + for (let i = 0; i < this.layers.length; i++) { + const layer = {}; + layer.name = reader.string(); + layer.type = reader.int32(); + layer.activation = reader.int32(); + reader.int32(); + reader.int32(); + layer.pads = reader.int32s(); + layer.strides = reader.int32s(); + layer.pool_size = reader.int32s(); + layer.axis = reader.int32(); + layer.alpha = reader.float32(); + layer.beta = reader.float32(); + reader.int32(); + layer.inputs = reader.strings(); + layer.tensors = []; + const tensorsLength = reader.int32(); + for (let j = 0; j < tensorsLength; j++) { + layer.tensors.push({ + name: reader.string(), + shape: reader.shape(), + offset: reader.int64(), + itemsize: reader.int32(), + length: reader.int32() + }); + } + this.layers[i] = layer; + } + const position = reader.position; + for (const layer of this.layers) { + for (const tensor of layer.tensors) { + reader.seek(position + (tensor.offset * tensor.itemsize)); + tensor.data = reader.read(tensor.length * tensor.itemsize); + } + } + } +}; + +barracuda.Activation = { + 0: "Linear", 1: "Relu", 2: "Softmax", 3: "Tanh", 4: "Sigmoid", 5: "Elu", 6: "Relu6", 7: "LeakyRelu", 8: "Selu", 9: "Swish", + 10: "LogSoftmax", 11: "Softplus", 12: "Softsign", 13: "PRelu", + 20: "Hardmax", 21: "HardSigmoid", + 100: "Abs", 101: "Neg", 102: "Ceil", 103: "Clip", 104: "Floor", 105: "Round", + 110: "Reciprocal", 111: "Sqrt", 113: "Exp", 114: "Log", + 200: "Acos", 201: "Acosh", 202: "Asin", 203: "Asinh", 204: "Atan", 205: "Atanh", 206: "Cos", 207: "Cosh", 208: "Sin", 209: "Sinh", 210: "Tan" +}; + +barracuda.BinaryReader = class extends base.BinaryReader { + + int32s() { + const values = new Array(this.int32()); + for (let i = 0; i < values.length; i++) { + values[i] = this.int32(); + } + return values; + } + + string() { + let content = ''; + const size = this.int32(); + let position = this._position; + this.skip(size); + for (let i = 0; i < size; i++) { + content += String.fromCharCode(this._buffer[position++]); + } + return content; + } + + strings() { + const values = []; + const length = this.int32(); + for (let i = 0; i < length; i++) { + values.push(this.string()); + } + return values; + } + + shape() { + return this.int32s(); + } +}; + +barracuda.Metadata = class { + + static open() { + barracuda.Metadata._metadata = barracuda.Metadata._metadata || new barracuda.Metadata(); + return barracuda.Metadata._metadata; + } + + constructor() { + this._types = new Map(); + const register = (id, name, category, inputs) => { + this._types.set(id, { name: name, category: category, inputs: (inputs || []).map((input) => { + return { name: input }; + }) }); + }; + register(0, 'Nop', ''); + register(1, 'Dense', 'Layer', [ 'input', 'kernel', 'bias' ]); + register(2, 'MatMul', '', [ 'input', 'kernel', 'bias' ]); + register(20, 'Conv2D', 'Layer', [ 'input', 'kernel', 'bias' ]); + register(21, 'DepthwiseConv2D', 'Layer', [ 'input', 'kernel', 'bias' ]); + register(22, 'Conv2DTrans', 'Layer', [ 'input', 'kernel', 'bias' ]); + register(23, 'Upsample2D', 'Data'); + register(25, 'MaxPool2D', 'Pool'); + register(26, 'AvgPool2D', 'Pool'); + register(27, 'GlobalMaxPool2D', 'Pool'); + register(28, 'GlobalAvgPool2D', 'Pool'); + register(29, 'Border2D', ''); + register(30, 'Conv3D', 'Layer'); + register(32, 'Conv3DTrans', 'Layer'); + register(33, 'Upsample3D', 'Data'); + register(35, 'MaxPool3D', 'Pool'); + register(36, 'AvgPool3D', 'Pool'); + register(37, 'GlobalMaxPool3D', 'Pool'); + register(38, 'GlobalAvgPool3D', 'Pool'); + register(39, 'Border3D', ''); + register(50, 'Activation', '', [ 'input' ]); + register(51, 'ScaleBias', 'Normalization', [ 'input', 'scale', 'bias' ]); + register(52, 'Normalization', 'Normalization'); + register(53, 'LRN', 'Normalization'); + register(60, 'Dropout', 'Dropout'); + register(64, 'RandomNormal', ''); + register(65, 'RandomUniform', ''); + register(66, 'Multinomial', ''); + register(67, 'OneHot', ''); + register(68, 'TopKIndices', ''); + register(69, 'TopKValues', ''); + register(100, 'Add', '', [ 'inputs' ]); + register(101, 'Sub', '', [ 'inputs' ]); + register(102, 'Mul', '', [ 'inputs' ]); + register(103, 'RealDiv', '', [ 'inputs' ]); + register(104, 'Pow', '', [ 'inputs' ]); + register(110, 'Minimum', '', [ 'inputs' ]); + register(111, 'Maximum', '', [ 'inputs' ]); + register(112, 'Mean', '', [ 'inputs' ]); + register(120, 'ReduceL1', '', [ 'inputs' ]); + register(121, 'ReduceL2', '', [ 'inputs' ]); + register(122, 'ReduceLogSum', '', [ 'inputs' ]); + register(123, 'ReduceLogSumExp', '', [ 'inputs' ]); + register(124, 'ReduceMax', '', [ 'inputs' ]); + register(125, 'ReduceMean', '', [ 'inputs' ]); + register(126, 'ReduceMin', '', [ 'inputs' ]); + register(127, 'ReduceProd', '', [ 'inputs' ]); + register(128, 'ReduceSum', '', [ 'inputs' ]); + register(129, 'ReduceSumSquare', '', [ 'inputs' ]); + register(140, 'Greater', ''); + register(141, 'GreaterEqual', ''); + register(142, 'Less', ''); + register(143, 'LessEqual', ''); + register(144, 'Equal', ''); + register(145, 'LogicalOr', ''); + register(146, 'LogicalAnd', ''); + register(147, 'LogicalNot', ''); + register(148, 'LogicalXor', ''); + register(160, 'Pad2DReflect', ''); + register(161, 'Pad2DSymmetric', ''); + register(162, 'Pad2DEdge', ''); + register(200, 'Flatten', 'Shape'); + register(201, 'Reshape', 'Shape'); + register(202, 'Transpose', ''); + register(203, 'Squeeze', ''); + register(204, 'Unsqueeze', ''); + register(205, 'Gather', ''); + register(206, 'DepthToSpace', ''); + register(207, 'SpaceToDepth', ''); + register(208, 'Expand', ''); + register(209, 'Resample2D', ''); + register(210, 'Concat', 'Tensor', [ 'inputs' ]); + register(211, 'StridedSlice', 'Shape'); + register(212, 'Tile', ''); + register(213, 'Shape', ''); + register(214, 'NonMaxSuppression', ''); + register(215, 'LSTM', ''); + register(255, 'Load', ''); + } + + type(name) { + if (!this._types.has(name)) { + this._types.set(name, { name: name.toString() }); + } + return this._types.get(name); + } +}; + +barracuda.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Barracuda model.'; + } +}; + +export const ModelFactory = barracuda.ModelFactory; + diff --git a/base.js b/base.js new file mode 100644 index 00000000000..3e845adf5a8 --- /dev/null +++ b/base.js @@ -0,0 +1,1188 @@ + +const base = {}; + +base.Int64 = class Int64 { + + constructor(low, high) { + this.low = low | 0; + this.high = high | 0; + } + + static create(value) { + if (isNaN(value)) { + return base.Int64.zero; + } + if (value <= -9223372036854776000) { + return base.Int64.min; + } + if (value + 1 >= 9223372036854776000) { + return base.Int64.max; + } + if (value < 0) { + return base.Int64.create(-value).negate(); + } + return new base.Int64((value % 4294967296) | 0, (value / 4294967296)); + } + + get isZero() { + return this.low === 0 && this.high === 0; + } + + get isNegative() { + return this.high < 0; + } + + negate() { + if (this.equals(base.Int64.min)) { + return base.Int64.min; + } + return this.not().add(base.Int64.one); + } + + not() { + return new base.Int64(~this.low, ~this.high); + } + + equals(other) { + if (!(other instanceof base.Int64) && (this.high >>> 31) === 1 && (other.high >>> 31) === 1) { + return false; + } + return this.high === other.high && this.low === other.low; + } + + compare(other) { + if (this.equals(other)) { + return 0; + } + const thisNeg = this.isNegative; + const otherNeg = other.isNegative; + if (thisNeg && !otherNeg) { + return -1; + } + if (!thisNeg && otherNeg) { + return 1; + } + return this.subtract(other).isNegative ? -1 : 1; + } + + add(other) { + return base.Utility.add(this, other, false); + } + + subtract(other) { + return base.Utility.subtract(this, other, false); + } + + multiply(other) { + return base.Utility.multiply(this, other, false); + } + + divide(other) { + return base.Utility.divide(this, other, false); + } + + toInteger() { + return this.low; + } + + toNumber() { + if (this.high === 0) { + return this.low >>> 0; + } + if (this.high === -1) { + return this.low; + } + return (this.high * 4294967296) + (this.low >>> 0); + } + + toString(radix) { + const r = radix || 10; + if (r < 2 || r > 16) { + throw new RangeError('radix'); + } + if (this.isZero) { + return '0'; + } + if (this.high < 0) { + if (this.equals(base.Int64.min)) { + const radix = new base.Int64(r, 0); + const div = this.divide(radix); + const remainder = div.multiply(radix).subtract(this); + return div.toString(radix) + (remainder.low >>> 0).toString(radix); + } + return `-${this.negate().toString(r)}`; + } + if (this.high === 0) { + return this.low.toString(radix); + } + return base.Utility.text(this, false, r); + } +}; + +base.Int64.min = new base.Int64(0, -2147483648); +base.Int64.zero = new base.Int64(0, 0); +base.Int64.one = new base.Int64(1, 0); +base.Int64.negativeOne = new base.Int64(-1, 0); +base.Int64.power24 = new base.Int64(1 << 24, 0); +base.Int64.max = new base.Int64(0, 2147483647); + +base.Uint64 = class Uint64 { + + constructor(low, high) { + this.low = low | 0; + this.high = high | 0; + } + + static create(value) { + if (isNaN(value)) { + return base.Uint64.zero; + } + if (value < 0) { + return base.Uint64.zero; + } + if (value >= 18446744073709552000) { + return base.Uint64.max; + } + if (value < 0) { + return base.Uint64.create(-value).negate(); + } + return new base.Uint64((value % 4294967296) | 0, (value / 4294967296)); + } + + get isZero() { + return this.low === 0 && this.high === 0; + } + + get isNegative() { + return false; + } + + negate() { + return this.not().add(base.Int64.one); + } + + not() { + return new base.Uint64(~this.low, ~this.high); + } + + equals(other) { + if (!(other instanceof base.Uint64) && (this.high >>> 31) === 1 && (other.high >>> 31) === 1) { + return false; + } + return this.high === other.high && this.low === other.low; + } + + compare(other) { + if (this.equals(other)) { + return 0; + } + const thisNeg = this.isNegative; + const otherNeg = other.isNegative; + if (thisNeg && !otherNeg) { + return -1; + } + if (!thisNeg && otherNeg) { + return 1; + } + return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1; + } + + add(other) { + return base.Utility.add(this, other, true); + } + + subtract(other) { + return base.Utility.subtract(this, other, true); + } + + multiply(other) { + return base.Utility.multiply(this, other, true); + } + + divide(other) { + return base.Utility.divide(this, other, true); + } + + toInteger() { + return this.low >>> 0; + } + + toNumber() { + if (this.high === 0) { + return this.low >>> 0; + } + return ((this.high >>> 0) * 4294967296) + (this.low >>> 0); + } + + toString(radix) { + const r = radix || 10; + if (r < 2 || 36 < r) { + throw new RangeError('radix'); + } + if (this.isZero) { + return '0'; + } + if (this.high === 0) { + return this.low.toString(radix); + } + return base.Utility.text(this, true, r); + } +}; + +base.Utility = class { + + static add(a, b, unsigned) { + const a48 = a.high >>> 16; + const a32 = a.high & 0xFFFF; + const a16 = a.low >>> 16; + const a00 = a.low & 0xFFFF; + const b48 = b.high >>> 16; + const b32 = b.high & 0xFFFF; + const b16 = b.low >>> 16; + const b00 = b.low & 0xFFFF; + let c48 = 0; + let c32 = 0; + let c16 = 0; + let c00 = 0; + c00 += a00 + b00; + c16 += c00 >>> 16; + c00 &= 0xFFFF; + c16 += a16 + b16; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c32 += a32 + b32; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c48 += a48 + b48; + c48 &= 0xFFFF; + return base.Utility._create((c16 << 16) | c00, (c48 << 16) | c32, unsigned); + } + + static subtract(a, b, unsigned) { + return base.Utility.add(a, b.negate(), unsigned); + } + + static multiply(a, b, unsigned) { + if (a.isZero) { + return base.Int64.zero; + } + if (b.isZero) { + return base.Int64.zero; + } + if (a.equals(base.Int64.min)) { + return b.isOdd() ? base.Int64.min : base.Int64.zero; + } + if (b.equals(base.Int64.min)) { + return a.isOdd() ? base.Int64.min : base.Int64.zero; + } + if (a.isNegative) { + if (b.isNegative) { + return a.negate().multiply(b.negate()); + } + return a.negate().multiply(b).negate(); + } else if (b.isNegative) { + return a.multiply(b.negate()).negate(); + } + if (a.compare(base.Int64.power24) < 0 && b.compare(base.Int64.power24) < 0) { + return unsigned ? base.Uint64.create(a.toNumber() * b.toNumber()) : base.Int64.create(a.toNumber() * b.toNumber()); + } + const a48 = a.high >>> 16; + const a32 = a.high & 0xFFFF; + const a16 = a.low >>> 16; + const a00 = a.low & 0xFFFF; + const b48 = b.high >>> 16; + const b32 = b.high & 0xFFFF; + const b16 = b.low >>> 16; + const b00 = b.low & 0xFFFF; + let c48 = 0; + let c32 = 0; + let c16 = 0; + let c00 = 0; + c00 += a00 * b00; + c16 += c00 >>> 16; + c00 &= 0xFFFF; + c16 += a16 * b00; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c16 += a00 * b16; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c32 += a32 * b00; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c32 += a16 * b16; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c32 += a00 * b32; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48; + c48 &= 0xFFFF; + return base.Utility._create((c16 << 16) | c00, (c48 << 16) | c32, unsigned); + } + + static divide(a, b, unsigned) { + if (b.isZero) { + throw new Error('Division by zero.'); + } + if (a.isZero) { + return unsigned ? base.Uint64.zero : base.Int64.zero; + } + let approx; + let remainder; + let result; + if (!unsigned) { + if (a.equals(base.Int64.min)) { + if (b.equals(base.Int64.one) || b.equals(base.Int64.negativeOne)) { + return base.Int64.min; + } else if (b.equals(base.Int64.min)) { + return base.Int64.one; + } + const half = base.Utility._shiftRight(a, unsigned, 1); + const halfDivide = half.divide(b); + approx = base.Utility._shiftLeft(halfDivide, halfDivide instanceof base.Uint64, 1); + if (approx.equals(base.Int64.zero)) { + return b.isNegative ? base.Int64.one : base.Int64.negativeOne; + } + remainder = a.subtract(b.multiply(approx)); + result = approx.add(remainder.divide(b)); + return result; + } else if (b.equals(base.Int64.min)) { + return base.Int64.zero; + } + if (a.isNegative) { + if (b.isNegative) { + return this.negate().divide(b.negate()); + } + return a.negate().divide(b).negate(); + } else if (b.isNegative) { + return a.divide(b.negate()).negate(); + } + result = base.Int64.zero; + } else { + if (!(b instanceof base.Uint64)) { + b = new base.Uint64(b.low, b.high); + } + if (b.compare(a) > 0) { + return base.Int64.zero; + } + if (b.compare(base.Utility._shiftRight(a, unsigned, 1)) > 0) { + return base.Uint64.one; + } + result = base.Uint64.zero; + } + remainder = a; + while (remainder.compare(b) >= 0) { + let approx = Math.max(1, Math.floor(remainder.toNumber() / b.toNumber())); + const log2 = Math.ceil(Math.log(approx) / Math.LN2); + const delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48); + let approxResult = base.Int64.create(approx); + let approxRemainder = approxResult.multiply(b); + while (approxRemainder.isNegative || approxRemainder.compare(remainder) > 0) { + approx -= delta; + approxResult = unsigned ? base.Uint64.create(approx) : base.Int64.create(approx); + approxRemainder = approxResult.multiply(b); + } + if (approxResult.isZero) { + approxResult = base.Int64.one; + } + result = result.add(approxResult); + remainder = remainder.subtract(approxRemainder); + } + return result; + } + + static text(value, unsigned, radix) { + const power = unsigned ? base.Uint64.create(Math.pow(radix, 6)) : base.Int64.create(Math.pow(radix, 6)); + let remainder = value; + let result = ''; + for (;;) { + const remainderDiv = remainder.divide(power); + const intval = remainder.subtract(remainderDiv.multiply(power)).toInteger() >>> 0; + let digits = intval.toString(radix); + remainder = remainderDiv; + if (remainder.low === 0 && remainder.high === 0) { + return digits + result; + } + while (digits.length < 6) { + digits = `0${digits}`; + } + result = `${digits}${result}`; + } + } + + static _shiftLeft(value, unsigned, shift) { + return base.Utility._create(value.low << shift, (value.high << shift) | (value.low >>> (32 - shift)), unsigned); + } + + static _shiftRight(value, unsigned, shift) { + return base.Utility._create((value.low >>> shift) | (value.high << (32 - shift)), value.high >> shift, unsigned); + } + + static _create(low, high, unsigned) { + return unsigned ? new base.Uint64(low, high) : new base.Int64(low, high); + } +}; + +base.Uint64.zero = new base.Uint64(0, 0); +base.Uint64.one = new base.Uint64(1, 0); +base.Uint64.max = new base.Uint64(-1, -1); + +base.Complex64 = class Complex { + + constructor(real, imaginary) { + this.real = real; + this.imaginary = imaginary; + } + + static create(real, imaginary) { + return new base.Complex64(real, imaginary); + } + + toString(/* radix */) { + return `${this.real} + ${this.imaginary}i`; + } +}; + +base.Complex128 = class Complex { + + constructor(real, imaginary) { + this.real = real; + this.imaginary = imaginary; + } + + static create(real, imaginary) { + return new base.Complex128(real, imaginary); + } + + toString(/* radix */) { + return `${this.real} + ${this.imaginary}i`; + } +}; + +if (!DataView.prototype.getFloat16) { + DataView.prototype.getFloat16 = function(byteOffset, littleEndian) { + const value = this.getUint16(byteOffset, littleEndian); + const e = (value & 0x7C00) >> 10; + let f = value & 0x03FF; + if (e == 0) { + f = 0.00006103515625 * (f / 1024); + } else if (e == 0x1F) { + f = f ? NaN : Infinity; + } else { + f = DataView.__float16_pow[e] * (1 + (f / 1024)); + } + return value & 0x8000 ? -f : f; + }; + DataView.__float16_pow = { + 1: 1/16384, 2: 1/8192, 3: 1/4096, 4: 1/2048, 5: 1/1024, 6: 1/512, 7: 1/256, 8: 1/128, + 9: 1/64, 10: 1/32, 11: 1/16, 12: 1/8, 13: 1/4, 14: 1/2, 15: 1, 16: 2, + 17: 4, 18: 8, 19: 16, 20: 32, 21: 64, 22: 128, 23: 256, 24: 512, + 25: 1024, 26: 2048, 27: 4096, 28: 8192, 29: 16384, 30: 32768, 31: 65536 + }; +} + +if (!DataView.prototype.setFloat16) { + DataView.prototype.setFloat16 = function(byteOffset, value, littleEndian) { + DataView.__float16_float[0] = value; + [value] = DataView.__float16_int; + const s = (value >>> 16) & 0x8000; + const e = (value >>> 23) & 0xff; + const f = value & 0x7fffff; + const v = s | DataView.__float16_base[e] | (f >> DataView.__float16_shift[e]); + this.setUint16(byteOffset, v, littleEndian); + }; + DataView.__float16_float = new Float32Array(1); + DataView.__float16_int = new Uint32Array(DataView.__float16_float.buffer, 0, DataView.__float16_float.length); + DataView.__float16_base = new Uint32Array(256); + DataView.__float16_shift = new Uint32Array(256); + for (let i = 0; i < 256; ++i) { + const e = i - 127; + if (e < -27) { + DataView.__float16_base[i] = 0x0000; + DataView.__float16_shift[i] = 24; + } else if (e < -14) { + DataView.__float16_base[i] = 0x0400 >> -e - 14; + DataView.__float16_shift[i] = -e - 1; + } else if (e <= 15) { + DataView.__float16_base[i] = e + 15 << 10; + DataView.__float16_shift[i] = 13; + } else if (e < 128) { + DataView.__float16_base[i] = 0x7c00; + DataView.__float16_shift[i] = 24; + } else { + DataView.__float16_base[i] = 0x7c00; + DataView.__float16_shift[i] = 13; + } + } +} + +if (!DataView.prototype.getBfloat16) { + DataView.prototype.getBfloat16 = function(byteOffset, littleEndian) { + if (littleEndian) { + DataView.__bfloat16_get_uint16_le[1] = this.getUint16(byteOffset, littleEndian); + return DataView.__bfloat16_get_float32_le[0]; + } + DataView.__bfloat16_uint16_be[0] = this.getUint16(byteOffset, littleEndian); + return DataView.__bfloat16_get_float32_be[0]; + }; + DataView.__bfloat16_get_float32_le = new Float32Array(1); + DataView.__bfloat16_get_float32_be = new Float32Array(1); + DataView.__bfloat16_get_uint16_le = new Uint16Array(DataView.__bfloat16_get_float32_le.buffer, DataView.__bfloat16_get_float32_le.byteOffset, 2); + DataView.__bfloat16_get_uint16_be = new Uint16Array(DataView.__bfloat16_get_float32_be.buffer, DataView.__bfloat16_get_float32_be.byteOffset, 2); +} + +DataView.__float8e4m3_float32 = new Float32Array(1); +DataView.__float8e4m3_uint32 = new Uint32Array(DataView.__float8e4m3_float32.buffer, DataView.__float8e4m3_float32.byteOffset, 1); +DataView.prototype.getFloat8e4m3 = function(byteOffset, fn, uz) { + const value = this.getUint8(byteOffset); + let exponent_bias = 7; + if (uz) { + exponent_bias = 8; + if (value == 0x80) { + return NaN; + } + } else if (value === 255) { + return -NaN; + } else if (value === 0x7f) { + return NaN; + } + let expo = (value & 0x78) >> 3; + let mant = value & 0x07; + const sign = value & 0x80; + let res = sign << 24; + if (expo == 0) { + if (mant > 0) { + expo = 0x7F - exponent_bias; + if (mant & 0x4 == 0) { + mant &= 0x3; + mant <<= 1; + expo -= 1; + } + if (mant & 0x4 == 0) { + mant &= 0x3; + mant <<= 1; + expo -= 1; + } + res |= (mant & 0x3) << 21; + res |= expo << 23; + } + } else { + res |= mant << 20; + expo += 0x7F - exponent_bias; + res |= expo << 23; + } + DataView.__float8e4m3_uint32[0] = res; + return DataView.__float8e4m3_float32[0]; +}; + +DataView.__float8e5m2_float32 = new Float32Array(1); +DataView.__float8e5m2_uint32 = new Uint32Array(DataView.__float8e5m2_float32.buffer, DataView.__float8e5m2_float32.byteOffset, 1); +DataView.prototype.getFloat8e5m2 = function(byteOffset, fn, uz) { + const value = this.getUint8(byteOffset); + let exponent_bias = NaN; + if (fn && uz) { + if (value == 0x80) { + return NaN; + } + exponent_bias = 16; + } else if (!fn && !uz) { + if (value >= 253 && value <= 255) { + return -NaN; + } + if (value >= 126 && value <= 127) { + return NaN; + } + if (value === 252) { + return -Infinity; + } + if (value === 124) { + return Infinity; + } + exponent_bias = 15; + } + let expo = (value & 0x7C) >> 2; + let mant = value & 0x03; + let res = (value & 0x80) << 24; + if (expo == 0) { + if (mant > 0) { + expo = 0x7F - exponent_bias; + if (mant & 0x2 == 0) { + mant &= 0x1; + mant <<= 1; + expo -= 1; + } + res |= (mant & 0x1) << 22; + res |= expo << 23; + } + } else { + res |= mant << 21; + expo += 0x7F - exponent_bias; + res |= expo << 23; + } + DataView.__float8e5m2_uint32[0] = res; + return DataView.__float8e5m2_float32[0]; +}; + +DataView.prototype.getInt64 = DataView.prototype.getInt64 || function(byteOffset, littleEndian) { + return littleEndian ? + new base.Int64(this.getUint32(byteOffset, true), this.getUint32(byteOffset + 4, true)) : + new base.Int64(this.getUint32(byteOffset + 4, true), this.getUint32(byteOffset, true)); +}; + +DataView.prototype.setInt64 = DataView.prototype.setInt64 || function(byteOffset, value, littleEndian) { + if (littleEndian) { + this.setUint32(byteOffset, value.low, true); + this.setUint32(byteOffset + 4, value.high, true); + } else { + this.setUint32(byteOffset + 4, value.low, false); + this.setUint32(byteOffset, value.high, false); + } +}; + +DataView.prototype.getIntBits = DataView.prototype.getUintBits || function(offset, bits, littleEndian) { + offset = offset * bits; + const position = Math.floor(offset / 8); + const remainder = offset % 8; + let value = (remainder + bits) <= 8 ? + littleEndian ? this.getUint8(position) >> remainder /* TODO */ : this.getUint8(position) >> (8 - remainder - bits) : + littleEndian ? this.getUint16(position, true) >> remainder /* TODO */ : this.getUint16(position, false) >> (16 - remainder - bits); + value &= (1 << bits) - 1; + if (value & (1 << (bits - 1))) { + value -= 1 << bits; + } + return value; +}; + +DataView.prototype.getUint64 = DataView.prototype.getUint64 || function(byteOffset, littleEndian) { + return littleEndian ? + new base.Uint64(this.getUint32(byteOffset, true), this.getUint32(byteOffset + 4, true)) : + new base.Uint64(this.getUint32(byteOffset + 4, true), this.getUint32(byteOffset, true)); +}; + +DataView.prototype.setUint64 = DataView.prototype.setUint64 || function(byteOffset, value, littleEndian) { + if (littleEndian) { + this.setUint32(byteOffset, value.low, true); + this.setUint32(byteOffset + 4, value.high, true); + } else { + this.setUint32(byteOffset + 4, value.low, false); + this.setUint32(byteOffset, value.high, false); + } +}; + +DataView.prototype.getUintBits = DataView.prototype.getUintBits || function(offset, bits, littleEndian) { + offset = offset * bits; + const position = Math.floor(offset / 8); + const remainder = offset % 8; + const value = (remainder + bits) <= 8 ? + littleEndian ? this.getUint8(position) >> remainder /* TODO */ : this.getUint8(position) >> (8 - remainder - bits) : + littleEndian ? this.getUint16(position, true) >> remainder /* TODO */ : this.getUint16(position, false) >> (16 - remainder - bits); + return value & ((1 << bits) - 1); +}; + +DataView.prototype.getComplex64 = DataView.prototype.getComplex64 || function(byteOffset, littleEndian) { + const real = littleEndian ? this.getFloat32(byteOffset, littleEndian) : this.getFloat32(byteOffset + 4, littleEndian); + const imaginary = littleEndian ? this.getFloat32(byteOffset + 4, littleEndian) : this.getFloat32(byteOffset, littleEndian); + return base.Complex64.create(real, imaginary); +}; + +DataView.prototype.setComplex64 = DataView.prototype.setComplex64 || function(byteOffset, value, littleEndian) { + if (littleEndian) { + this.setFloat32(byteOffset, value.real, littleEndian); + this.setFloat32(byteOffset + 4, value.imaginary, littleEndian); + } else { + this.setFloat32(byteOffset + 4, value.real, littleEndian); + this.setFloat32(byteOffset, value.imaginary, littleEndian); + } +}; + +DataView.prototype.getComplex128 = DataView.prototype.getComplex128 || function(byteOffset, littleEndian) { + const real = littleEndian ? this.getFloat64(byteOffset, littleEndian) : this.getFloat64(byteOffset + 8, littleEndian); + const imaginary = littleEndian ? this.getFloat64(byteOffset + 8, littleEndian) : this.getFloat64(byteOffset, littleEndian); + return base.Complex128.create(real, imaginary); +}; + +DataView.prototype.setComplex128 = DataView.prototype.setComplex128 || function(byteOffset, value, littleEndian) { + if (littleEndian) { + this.setFloat64(byteOffset, value.real, littleEndian); + this.setFloat64(byteOffset + 8, value.imaginary, littleEndian); + } else { + this.setFloat64(byteOffset + 8, value.real, littleEndian); + this.setFloat64(byteOffset, value.imaginary, littleEndian); + } +}; + +base.BinaryStream = class { + + constructor(buffer) { + this._buffer = buffer; + this._length = buffer.length; + this._position = 0; + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + stream(length) { + const buffer = this.read(length); + return new base.BinaryStream(buffer.slice(0)); + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + if (this._position > this._buffer.length) { + throw new Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + peek(length) { + if (this._position === 0 && length === undefined) { + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + const end = this._position; + this.seek(position); + return this._buffer.subarray(position, end); + } + + read(length) { + if (this._position === 0 && length === undefined) { + this._position = this._length; + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + return this._buffer.subarray(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } +}; + +base.BinaryReader = class { + + constructor(data, littleEndian) { + this._buffer = data instanceof Uint8Array ? data : data.peek(); + this._littleEndian = littleEndian !== false; + this._position = 0; + this._length = this._buffer.length; + this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + } + + get length() { + return this._length; + } + + get position() { + return this._position; + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + if (this._position > this._length || this._position < 0) { + throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._length) { + throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + align(mod) { + if (this._position % mod != 0) { + this.skip(mod - (this._position % mod)); + } + } + + peek(length) { + if (this._position === 0 && length === undefined) { + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + const end = this._position; + this._position = position; + return this._buffer.slice(position, end); + } + + read(length) { + if (this._position === 0 && length === undefined) { + this._position = this._length; + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + return this._buffer.slice(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } + + int8() { + const position = this._position; + this.skip(1); + return this._view.getInt8(position, this._littleEndian); + } + + int16() { + const position = this._position; + this.skip(2); + return this._view.getInt16(position, this._littleEndian); + } + + int32() { + const position = this._position; + this.skip(4); + return this._view.getInt32(position, this._littleEndian); + } + + int64() { + const position = this._position; + this.skip(8); + return this._view.getInt64(position, this._littleEndian).toNumber(); + } + + uint16() { + const position = this._position; + this.skip(2); + return this._view.getUint16(position, this._littleEndian); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._view.getUint32(position, this._littleEndian); + } + + uint64() { + const position = this._position; + this.skip(8); + const low = this._view.getUint32(position, this._littleEndian); + const high = this._view.getUint32(position + 4, this._littleEndian); + if (high === 0) { + return low; + } + const value = (high * 4294967296) + low; + if (Number.isSafeInteger(value)) { + return value; + } + throw new Error("Unsigned 64-bit value exceeds safe integer."); + } + + float32() { + const position = this._position; + this.skip(4); + return this._view.getFloat32(position, this._littleEndian); + } + + float64() { + const position = this._position; + this.skip(8); + return this._view.getFloat64(position, this._littleEndian); + } + + string() { + const length = this.uint32(); + const position = this._position; + this.skip(length); + const data = this._buffer.subarray(position, this._position); + this._decoder = this._decoder || new TextDecoder('utf-8'); + return this._decoder.decode(data); + } + + boolean() { + return this.byte() !== 0 ? true : false; + } +}; + +base.StreamReader = class { + + constructor(stream, littleEndian) { + this._stream = stream; + this._littleEndian = littleEndian !== false; + this._buffer = new Uint8Array(8); + this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + } + + get position() { + return this._stream.position; + } + + get length() { + return this._stream.length; + } + + seek(position) { + this._stream.seek(position); + } + + skip(position) { + this._stream.skip(position); + } + + stream(length) { + return this._stream.stream(length); + } + + read(length) { + return this._stream.read(length); + } + + byte() { + return this._stream.byte(); + } + + int16() { + const buffer = this._stream.read(2); + this._buffer.set(buffer, 0); + return this._view.getInt16(0, this._littleEndian); + } + + int32() { + const buffer = this._stream.read(4); + this._buffer.set(buffer, 0); + return this._view.getInt32(0, this._littleEndian); + } + + uint16() { + const buffer = this._stream.read(2); + this._buffer.set(buffer, 0); + return this._view.getUint16(0, this._littleEndian); + } + + uint32() { + const buffer = this._stream.read(4); + this._buffer.set(buffer, 0); + return this._view.getUint32(0, this._littleEndian); + } + + uint64() { + const low = this.uint32(); + const high = this.uint32(); + if (high === 0) { + return low; + } + const value = (high * 4294967296) + low; + if (Number.isSafeInteger(value)) { + return value; + } + throw new Error("Unsigned 64-bit value exceeds safe integer."); + } + + float32() { + const buffer = this._stream.read(4); + this._buffer.set(buffer, 0); + return this._view.getFloat32(0, this._littleEndian); + } +}; + +base.Telemetry = class { + + constructor(window) { + this._window = window; + this._navigator = window.navigator; + this._config = new Map(); + this._metadata = {}; + this._schema = new Map([ + [ 'protocol_version', 'v' ], + [ 'tracking_id', 'tid' ], + [ 'hash_info', 'gtm' ], + [ '_page_id', '_p'], + [ 'client_id', 'cid' ], + [ 'language', 'ul' ], + [ 'screen_resolution', 'sr' ], + [ '_user_agent_architecture', 'uaa' ], + [ '_user_agent_bitness', 'uab' ], + [ '_user_agent_full_version_list', 'uafvl' ], + [ '_user_agent_mobile', 'uamb' ], + [ '_user_agent_model', 'uam' ], + [ '_user_agent_platform', 'uap' ], + [ '_user_agent_platform_version', 'uapv' ], + [ '_user_agent_wow64', 'uaw' ], + [ 'hit_count', '_s' ], + [ 'session_id', 'sid' ], + [ 'session_number', 'sct' ], + [ 'session_engaged', 'seg' ], + [ 'engagement_time_msec', '_et' ], + [ 'page_location', 'dl' ], + [ 'page_title', 'dt' ], + [ 'page_referrer', 'dr' ], + [ 'is_first_visit', '_fv' ], + [ 'is_external_event', '_ee' ], + [ 'is_new_to_site', '_nsi' ], + [ 'is_session_start', '_ss' ], + [ 'event_name', 'en' ] + ]); + } + + async start(measurement_id, client_id, session) { + this._session = session && typeof session === 'string' ? session.replace(/^GS1\.1\./, '').split('.') : null; + this._session = Array.isArray(this._session) && this._session.length >= 7 ? this._session : [ '0', '0', '0', '0', '0', '0', '0' ]; + this._session[0] = Date.now(); + this._session[1] = parseInt(this._session[1], 10) + 1; + this._engagement_time_msec = 0; + if (this._config.size > 0) { + throw new Error('Invalid session state.'); + } + this.set('protocol_version', 2); + this.set('tracking_id', measurement_id); + this.set('hash_info', '2oebu0'); + this.set('_page_id', Math.floor(Math.random() * 2147483648)); + client_id = client_id ? client_id.replace(/^(GA1\.\d\.)*/, '') : null; + if (client_id && client_id.indexOf('.') !== 1) { + this.set('client_id', client_id); + } else { + const random = String(Math.round(0x7FFFFFFF * Math.random())); + const time = Date.now(); + const value = [ random, Math.round(time / 1e3) ].join('.'); + this.set('client_id', value); + this._metadata.is_first_visit = 1; + this._metadata.is_new_to_site = 1; + } + this.set('language', ((this._navigator && (this._navigator.language || this._navigator.browserLanguage)) || '').toLowerCase()); + this.set('screen_resolution', `${window.screen ? window.screen.width : 0}x${window.screen ? window.screen.height : 0}`); + if (this._navigator && this._navigator.userAgentData && this._navigator.userAgentData.getHighEntropyValues) { + const values = await this._navigator.userAgentData.getHighEntropyValues([ 'platform', 'platformVersion', 'architecture', 'model', 'uaFullVersion', 'bitness', 'fullVersionList', 'wow64' ]); + if (values) { + this.set('_user_agent_architecture', values.architecture); + this.set('_user_agent_bitness', values.bitness); + this.set('_user_agent_full_version_list', Array.isArray(values.fullVersionList) ? values.fullVersionList.map((h) => `${encodeURIComponent(h.brand || '')};${encodeURIComponent(h.version || '')}`).join('|') : ''); + this.set('_user_agent_mobile', values.mobile ? 1 : 0); + this.set('_user_agent_model', values.model); + this.set('_user_agent_platform', values.platform); + this.set('_user_agent_platform_version', values.platformVersion); + this.set('_user_agent_wow64', values.wow64 ? 1 : 0); + } + } + this.set('hit_count', 1); + this.set('session_id', this._session[0]); + this.set('session_number', this._session[1]); + this.set('session_engaged', 0); + this._metadata.is_session_start = 1; + this._metadata.is_external_event = 1; + window.addEventListener('focus', () => this._update(true, undefined, undefined)); + window.addEventListener('blur', () => this._update(false, undefined, undefined)); + window.addEventListener('pageshow', () => this._update(undefined, true, undefined)); + window.addEventListener('pagehide', () => this._update(undefined, false, undefined)); + window.addEventListener('visibilitychange', () => this._update(undefined, undefined, window.document.visibilityState !== 'hidden')); + window.addEventListener('beforeunload', () => this._update() && this.send('user_engagement', {})); + } + + get session() { + return this._session ? this._session.join('.') : null; + } + + set(name, value) { + const key = this._schema.get(name); + if (value !== undefined && value !== null) { + this._config.set(key, value); + } else if (this._config.has(key)) { + this._config.delete(key); + } + this._cache = null; + } + + get(name) { + const key = this._schema.get(name); + return this._config.get(key); + } + + send(name, params) { + if (this._session) { + try { + params = Object.assign({ event_name: name }, this._metadata, /* { debug_mode: true },*/ params); + this._metadata = {}; + if (this._update()) { + params.engagement_time_msec = this._engagement_time_msec; + this._engagement_time_msec = 0; + } + const build = (entries) => entries.map(([name, value]) => `${name}=${encodeURIComponent(value)}`).join('&'); + this._cache = this._cache || build(Array.from(this._config)); + const key = (name, value) => this._schema.get(name) || ('number' === typeof value && !isNaN(value) ? 'epn.' : 'ep.') + name; + const body = build(Object.entries(params).map(([name, value]) => [ key(name, value), value ])); + const url = `https://analytics.google.com/g/collect?${this._cache}`; + this._navigator.sendBeacon(url, body); + this._session[2] = this.get('session_engaged') || '0'; + this.set('hit_count', this.get('hit_count') + 1); + } catch (e) { + // continue regardless of error + } + } + } + + _update(focused, page, visible) { + this._focused = focused === true || focused === false ? focused : this._focused; + this._page = page === true || page === false ? page : this._page; + this._visible = visible === true || visible === false ? visible : this._visible; + const time = Date.now(); + if (this._start_time) { + this._engagement_time_msec += (time - this._start_time); + this._start_time = 0; + } + if (this._focused !== false && this._page !== false && this._visible !== false) { + this._start_time = time; + } + return this._engagement_time_msec > 20; + } +}; + +base.Metadata = class { + + get extensions() { + return [ + 'onnx', 'tflite', 'pb', 'pt', 'pt2', 'pth', 'h5', 'pbtxt', 'prototxt', 'caffemodel', 'mlmodel', 'mlpackage', + 'model', 'json', 'xml', 'cfg', 'weights', 'bin', + 'ort', + 'dnn', 'cmf', + 'gguf', + 'hd5', 'hdf5', 'keras', + 'tfl', 'circle', 'lite', + 'mlnet', 'mar', 'maxviz', 'meta', 'nn', 'ngf', 'hn', 'har', + 'param', 'params', + 'paddle', 'pdiparams', 'pdmodel', 'pdopt', 'pdparams', 'nb', + 'pkl', 'joblib', 'safetensors', + 'ptl', 't7', + 'dlc', 'uff', 'armnn', + 'mnn', 'ms', 'ncnn', 'om', 'tm', 'mge', 'tmfile', 'tnnproto', 'xmodel', 'kmodel', 'rknn', + 'tar', 'zip' + ]; + } +}; + +if (typeof window !== 'undefined' && typeof window.Long != 'undefined') { + window.long = { Long: window.Long }; + window.Int64 = base.Int64; + window.Uint64 = base.Uint64; +} + +export const Int64 = base.Int64; +export const Uint64 = base.Uint64; +export const Complex64 = base.Complex64; +export const Complex128 = base.Complex128; +export const BinaryStream = base.BinaryStream; +export const BinaryReader = base.BinaryReader; +export const StreamReader = base.StreamReader; +export const Telemetry = base.Telemetry; +export const Metadata = base.Metadata; diff --git a/bigdl-metadata.json b/bigdl-metadata.json new file mode 100644 index 00000000000..d42b79c7e68 --- /dev/null +++ b/bigdl-metadata.json @@ -0,0 +1,95 @@ +[ + { + "name": "com.intel.analytics.bigdl.nn.Dropout", + "category": "Dropout" + }, + { + "name": "com.intel.analytics.bigdl.nn.InferReshape", + "category": "Shape" + }, + { + "name": "com.intel.analytics.bigdl.nn.JoinTable", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "list": true } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "com.intel.analytics.bigdl.nn.Linear", + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "com.intel.analytics.bigdl.nn.NormalizeScale", + "category": "Normalization", + "inputs": [ + { "name": "inputs" }, + { "name": "w" } + ] + }, + { + "name": "com.intel.analytics.bigdl.nn.ReLU", + "category": "Activation" + }, + { + "name": "Scale", + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "SoftMax", + "category": "Activation" + }, + { + "name": "com.intel.analytics.bigdl.nn.SpatialAveragePooling", + "category": "Pool" + }, + { + "name": "com.intel.analytics.bigdl.nn.SpatialBatchNormalization", + "category": "Normalization" + }, + { + "name": "com.intel.analytics.bigdl.nn.quantized.SpatialConvolution", + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "com.intel.analytics.bigdl.nn.SpatialCrossMapLRN", + "category": "Normalization" + }, + { + "name": "com.intel.analytics.bigdl.nn.SpatialDilatedConvolution", + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "com.intel.analytics.bigdl.nn.SpatialMaxPooling", + "category": "Pool" + }, + { + "name": "com.intel.analytics.bigdl.nn.Transpose", + "category": "Shape" + }, + { + "name": "com.intel.analytics.bigdl.nn.View" + } +] \ No newline at end of file diff --git a/bigdl-proto.js b/bigdl-proto.js new file mode 100644 index 00000000000..c8410c7af60 --- /dev/null +++ b/bigdl-proto.js @@ -0,0 +1,641 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('bigdl'); + +$root.com = {}; + +$root.com.intel = {}; + +$root.com.intel.analytics = {}; + +$root.com.intel.analytics.bigdl = {}; + +$root.com.intel.analytics.bigdl.serialization = {}; + +$root.com.intel.analytics.bigdl.serialization.BigDLModule = class BigDLModule { + + constructor() { + this.subModules = []; + this.preModules = []; + this.nextModules = []; + this.attr = {}; + this.parameters = []; + this.inputScales = []; + this.outputScales = []; + this.weightScales = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.BigDLModule(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.subModules.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32())); + break; + case 3: + message.weight = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 4: + message.bias = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 5: + message.preModules.push(reader.string()); + break; + case 6: + message.nextModules.push(reader.string()); + break; + case 7: + message.moduleType = reader.string(); + break; + case 8: + reader.entry(message.attr, () => reader.string(), () => $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + case 9: + message.version = reader.string(); + break; + case 10: + message.train = reader.bool(); + break; + case 11: + message.namePostfix = reader.string(); + break; + case 12: + message.id = reader.int32(); + break; + case 13: + message.inputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + case 14: + message.outputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + case 15: + message.hasParameters = reader.bool(); + break; + case 16: + message.parameters.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32())); + break; + case 17: + message.isMklInt8Enabled = reader.bool(); + break; + case 18: + message.inputDimMasks = reader.int32(); + break; + case 19: + message.inputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + case 20: + message.outputDimMasks = reader.int32(); + break; + case 21: + message.outputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + case 22: + message.weightDimMasks = reader.int32(); + break; + case 23: + message.weightScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.name = ""; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.weight = null; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.bias = null; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.moduleType = ""; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.version = ""; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.train = false; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.namePostfix = ""; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.id = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.inputShape = null; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.outputShape = null; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.hasParameters = false; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.isMklInt8Enabled = false; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.inputDimMasks = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.outputDimMasks = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.weightDimMasks = 0; + +$root.com.intel.analytics.bigdl.serialization.VarFormat = { + "EMPTY_FORMAT": 0, + "DEFAULT": 1, + "ONE_D": 2, + "IN_OUT": 3, + "OUT_IN": 4, + "IN_OUT_KW_KH": 5, + "OUT_IN_KW_KH": 6, + "GP_OUT_IN_KW_KH": 7, + "GP_IN_OUT_KW_KH": 8, + "OUT_IN_KT_KH_KW": 9 +}; + +$root.com.intel.analytics.bigdl.serialization.InitMethodType = { + "EMPTY_INITIALIZATION": 0, + "RANDOM_UNIFORM": 1, + "RANDOM_UNIFORM_PARAM": 2, + "RANDOM_NORMAL": 3, + "ZEROS": 4, + "ONES": 5, + "CONST": 6, + "XAVIER": 7, + "BILINEARFILLER": 8 +}; + +$root.com.intel.analytics.bigdl.serialization.RegularizerType = { + "L1L2Regularizer": 0, + "L1Regularizer": 1, + "L2Regularizer": 2 +}; + +$root.com.intel.analytics.bigdl.serialization.InputDataFormat = { + "NCHW": 0, + "NHWC": 1 +}; + +$root.com.intel.analytics.bigdl.serialization.TensorType = { + "DENSE": 0, + "QUANT": 1 +}; + +$root.com.intel.analytics.bigdl.serialization.InitMethod = class InitMethod { + + constructor() { + this.data = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.InitMethod(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.methodType = reader.int32(); + break; + case 2: + message.data = reader.doubles(message.data, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.InitMethod.prototype.methodType = 0; + +$root.com.intel.analytics.bigdl.serialization.BigDLTensor = class BigDLTensor { + + constructor() { + this.size = []; + this.stride = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.BigDLTensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.datatype = reader.int32(); + break; + case 2: + message.size = reader.array(message.size, () => reader.int32(), tag); + break; + case 3: + message.stride = reader.array(message.stride, () => reader.int32(), tag); + break; + case 4: + message.offset = reader.int32(); + break; + case 5: + message.dimension = reader.int32(); + break; + case 6: + message.nElements = reader.int32(); + break; + case 7: + message.isScalar = reader.bool(); + break; + case 8: + message.storage = $root.com.intel.analytics.bigdl.serialization.TensorStorage.decode(reader, reader.uint32()); + break; + case 9: + message.id = reader.int32(); + break; + case 10: + message.tensorType = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.datatype = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.offset = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.dimension = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.nElements = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.isScalar = false; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.storage = null; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.id = 0; +$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.tensorType = 0; + +$root.com.intel.analytics.bigdl.serialization.TensorStorage = class TensorStorage { + + constructor() { + this.float_data = []; + this.double_data = []; + this.bool_data = []; + this.string_data = []; + this.int_data = []; + this.long_data = []; + this.bytes_data = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.TensorStorage(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.datatype = reader.int32(); + break; + case 2: + message.float_data = reader.floats(message.float_data, tag); + break; + case 3: + message.double_data = reader.doubles(message.double_data, tag); + break; + case 4: + message.bool_data = reader.array(message.bool_data, () => reader.bool(), tag); + break; + case 5: + message.string_data.push(reader.string()); + break; + case 6: + message.int_data = reader.array(message.int_data, () => reader.int32(), tag); + break; + case 7: + message.long_data = reader.array(message.long_data, () => reader.int64(), tag); + break; + case 8: + message.bytes_data.push(reader.bytes()); + break; + case 9: + message.id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.TensorStorage.prototype.datatype = 0; +$root.com.intel.analytics.bigdl.serialization.TensorStorage.prototype.id = 0; + +$root.com.intel.analytics.bigdl.serialization.Regularizer = class Regularizer { + + constructor() { + this.regularData = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.Regularizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.regularizerType = reader.int32(); + break; + case 2: + message.regularData = reader.doubles(message.regularData, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.Regularizer.prototype.regularizerType = 0; + +$root.com.intel.analytics.bigdl.serialization.DataType = { + "INT32": 0, + "INT64": 1, + "FLOAT": 2, + "DOUBLE": 3, + "STRING": 4, + "BOOL": 5, + "CHAR": 6, + "SHORT": 7, + "BYTES": 8, + "REGULARIZER": 9, + "TENSOR": 10, + "VARIABLE_FORMAT": 11, + "INITMETHOD": 12, + "MODULE": 13, + "NAME_ATTR_LIST": 14, + "ARRAY_VALUE": 15, + "DATA_FORMAT": 16, + "CUSTOM": 17, + "SHAPE": 18 +}; + +$root.com.intel.analytics.bigdl.serialization.AttrValue = class AttrValue { + + constructor() { + } + + get value() { + $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet = $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet || new Set([ "int32Value", "int64Value", "floatValue", "doubleValue", "stringValue", "boolValue", "regularizerValue", "tensorValue", "variableFormatValue", "initMethodValue", "bigDLModuleValue", "nameAttrListValue", "arrayValue", "dataFormatValue", "customValue", "shape"]); + return Object.keys(this).find((key) => $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.AttrValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataType = reader.int32(); + break; + case 2: + message.subType = reader.string(); + break; + case 3: + message.int32Value = reader.int32(); + break; + case 4: + message.int64Value = reader.int64(); + break; + case 5: + message.floatValue = reader.float(); + break; + case 6: + message.doubleValue = reader.double(); + break; + case 7: + message.stringValue = reader.string(); + break; + case 8: + message.boolValue = reader.bool(); + break; + case 9: + message.regularizerValue = $root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32()); + break; + case 10: + message.tensorValue = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 11: + message.variableFormatValue = reader.int32(); + break; + case 12: + message.initMethodValue = $root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32()); + break; + case 13: + message.bigDLModuleValue = $root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32()); + break; + case 14: + message.nameAttrListValue = $root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32()); + break; + case 15: + message.arrayValue = $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.decode(reader, reader.uint32()); + break; + case 16: + message.dataFormatValue = reader.int32(); + break; + case 17: + message.customValue = $root.google.protobuf.Any.decode(reader, reader.uint32()); + break; + case 18: + message.shape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.AttrValue.prototype.dataType = 0; +$root.com.intel.analytics.bigdl.serialization.AttrValue.prototype.subType = ""; + +$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue = class ArrayValue { + + constructor() { + this.i32 = []; + this.i64 = []; + this.flt = []; + this.dbl = []; + this.str = []; + this.boolean = []; + this.Regularizer = []; + this.tensor = []; + this.variableFormat = []; + this.initMethod = []; + this.bigDLModule = []; + this.nameAttrList = []; + this.dataFormat = []; + this.custom = []; + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.int32(); + break; + case 2: + message.datatype = reader.int32(); + break; + case 3: + message.i32 = reader.array(message.i32, () => reader.int32(), tag); + break; + case 4: + message.i64 = reader.array(message.i64, () => reader.int64(), tag); + break; + case 5: + message.flt = reader.floats(message.flt, tag); + break; + case 6: + message.dbl = reader.doubles(message.dbl, tag); + break; + case 7: + message.str.push(reader.string()); + break; + case 8: + message.boolean = reader.array(message.boolean, () => reader.bool(), tag); + break; + case 9: + message.Regularizer.push($root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32())); + break; + case 10: + message.tensor.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32())); + break; + case 11: + message.variableFormat = reader.array(message.variableFormat, () => reader.int32(), tag); + break; + case 12: + message.initMethod.push($root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32())); + break; + case 13: + message.bigDLModule.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32())); + break; + case 14: + message.nameAttrList.push($root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32())); + break; + case 15: + message.dataFormat = reader.array(message.dataFormat, () => reader.int32(), tag); + break; + case 16: + message.custom.push($root.google.protobuf.Any.decode(reader, reader.uint32())); + break; + case 17: + message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.prototype.size = 0; +$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.prototype.datatype = 0; + +$root.com.intel.analytics.bigdl.serialization.NameAttrList = class NameAttrList { + + constructor() { + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.NameAttrList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.entry(message.attr, () => reader.string(), () => $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.NameAttrList.prototype.name = ""; + +$root.com.intel.analytics.bigdl.serialization.Shape = class Shape { + + constructor() { + this.shapeValue = []; + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.com.intel.analytics.bigdl.serialization.Shape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapeType = reader.int32(); + break; + case 2: + message.ssize = reader.int32(); + break; + case 3: + message.shapeValue = reader.array(message.shapeValue, () => reader.int32(), tag); + break; + case 4: + message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.com.intel.analytics.bigdl.serialization.Shape.prototype.shapeType = 0; +$root.com.intel.analytics.bigdl.serialization.Shape.prototype.ssize = 0; + +$root.com.intel.analytics.bigdl.serialization.Shape.ShapeType = { + "SINGLE": 0, + "MULTI": 1 +}; + +$root.google = {}; + +$root.google.protobuf = {}; + +$root.google.protobuf.Any = class Any { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.google.protobuf.Any(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_url = reader.string(); + break; + case 2: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.google.protobuf.Any.prototype.type_url = ""; +$root.google.protobuf.Any.prototype.value = new Uint8Array([]); diff --git a/bigdl.js b/bigdl.js new file mode 100644 index 00000000000..e89fcdc5825 --- /dev/null +++ b/bigdl.js @@ -0,0 +1,307 @@ + +// Experimental + +import * as protobuf from './protobuf.js'; + +const bigdl = {}; + +bigdl.ModelFactory = class { + + match(context) { + const tags = context.tags('pb'); + if (tags.has(2) && tags.has(7) && tags.has(8) && tags.has(9) && tags.has(10) && tags.has(11) && tags.has(12)) { + return 'bigdl'; + } + return ''; + } + + async open(context) { + await context.require('./bigdl-proto'); + let module = null; + try { + // https://github.com/intel-analytics/BigDL/blob/master/spark/dl/src/main/resources/serialization/bigdl.proto + bigdl.proto = protobuf.get('bigdl').com.intel.analytics.bigdl.serialization; + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + module = bigdl.proto.BigDLModule.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new bigdl.Error(`File format is not bigdl.BigDLModule (${message.replace(/\.$/, '')}).`); + } + const metadata = await context.metadata('bigdl-metadata.json'); + return new bigdl.Model(metadata, module); + } +}; + +bigdl.Model = class { + + constructor(metadata, module) { + const version = module && module.version ? module.version : ''; + this.format = `BigDL${version ? ` v${version}` : ''}`; + this.graphs = [ new bigdl.Graph(metadata, module) ]; + } +}; + +bigdl.Graph = class { + + constructor(metadata, module) { + this.type = module.moduleType; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const tensors = module.attr && module.attr.global_storage && module.attr.global_storage.nameAttrListValue && module.attr.global_storage.nameAttrListValue.attr ? module.attr.global_storage.nameAttrListValue.attr : {}; + const values = new Map(); + values.map = (name) => { + if (!values.has(name)) { + values.set(name, new bigdl.Value(name)); + } + return values.get(name); + }; + const loadModule = (metadata, module, tensors) => { + switch (module.moduleType) { + case 'com.intel.analytics.bigdl.nn.StaticGraph': + case 'com.intel.analytics.bigdl.nn.Sequential': { + for (const submodule of module.subModules) { + loadModule(metadata, submodule, tensors); + } + break; + } + case 'com.intel.analytics.bigdl.nn.Input': { + const argument = new bigdl.Argument(module.name, [ values.map(module.name) ]); + this.inputs.push(argument); + break; + } + default: { + const node = new bigdl.Node(metadata, module, tensors, values); + this.nodes.push(node); + break; + } + } + }; + loadModule(metadata, module, tensors); + } +}; + +bigdl.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +bigdl.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new bigdl.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer ? initializer.type : null; + this.initializer = initializer; + } +}; + +bigdl.Node = class { + + constructor(metadata, module, tensors, values) { + const type = module.moduleType; + this.name = module.name; + this.attributes = []; + this.inputs = []; + this.outputs = []; + this.inputs.push(new bigdl.Argument('input', module.preModules.map((id) => values.map(id)))); + this.type = metadata.type(type) || { name: type }; + const inputs = this.type && this.type.inputs ? this.type.inputs.slice() : []; + inputs.shift(); + if (module.weight) { + inputs.shift(); + this.inputs.push(new bigdl.Argument('weight', [ + new bigdl.Value('', null, new bigdl.Tensor(module.weight, tensors)) + ])); + } + if (module.bias) { + inputs.shift(); + this.inputs.push(new bigdl.Argument('bias', [ + new bigdl.Value('', null, new bigdl.Tensor(module.bias, tensors)) + ])); + } + if (module.parameters && module.parameters.length > 0) { + for (const parameter of module.parameters) { + const input = inputs.shift(); + const inputName = input ? input.name : this.inputs.length.toString(); + this.inputs.push(new bigdl.Argument(inputName, [ + new bigdl.Value('', null, new bigdl.Tensor(parameter, tensors)) + ])); + } + } + for (const [key, value] of Object.entries(module.attr)) { + if (key === 'module_numerics' || key === 'module_tags') { + continue; + } + if (value.dataType === bigdl.proto.DataType.TENSOR) { + if (value.value) { + this.inputs.push(new bigdl.Argument(key, [ new bigdl.Value('', null, new bigdl.Tensor(value.tensorValue, tensors)) ])); + } + continue; + } + if (value.dataType === bigdl.proto.DataType.REGULARIZER && value.value === undefined) { + continue; + } + if (value.dataType === bigdl.proto.DataType.ARRAY_VALUE && value.arrayValue.datatype === bigdl.proto.DataType.TENSOR) { + this.inputs.push(new bigdl.Argument(key, value.arrayValue.tensor.map((tensor) => new bigdl.Value('', null, new bigdl.Tensor(tensor, tensors))))); + continue; + } + this.attributes.push(new bigdl.Attribute(key, value)); + } + const output = this.name || this.type + module.namePostfix; + this.outputs.push(new bigdl.Argument('output', [ values.map(output) ])); + } +}; + +bigdl.Attribute = class { + + constructor(name, value) { + this.name = name; + switch (value.dataType) { + case bigdl.proto.DataType.INT32: { + this.type = 'int32'; + this.value = value.int32Value; + break; + } + case bigdl.proto.DataType.FLOAT: { + this.type = 'float32'; + this.value = value.floatValue; + break; + } + case bigdl.proto.DataType.DOUBLE: { + this.type = 'float64'; + this.value = value.doubleValue; + break; + } + case bigdl.proto.DataType.BOOL: { + this.type = 'boolean'; + this.value = value.boolValue; + break; + } + case bigdl.proto.DataType.REGULARIZER: { + this.value = value.value; + break; + } + case bigdl.proto.DataType.MODULE: { + this.value = value.bigDLModule; + break; + } + case bigdl.proto.DataType.NAME_ATTR_LIST: { + this.value = value.nameAttrListValue; + break; + } + case bigdl.proto.DataType.ARRAY_VALUE: { + switch (value.arrayValue.datatype) { + case bigdl.proto.DataType.INT32: { + this.type = 'int32[]'; + this.value = value.arrayValue.i32; + break; + } + case bigdl.proto.DataType.FLOAT: { + this.type = 'float32[]'; + this.value = value.arrayValue.flt; + break; + } + case bigdl.proto.DataType.STRING: { + this.type = 'string[]'; + this.value = value.arrayValue.str; + break; + } + case bigdl.proto.DataType.TENSOR: { + this.type = 'tensor[]'; + this.value = value.arrayValue.tensor; + break; + } + default: { + throw new bigdl.Error(`Unsupported attribute array data type '${value.arrayValue.datatype}'.`); + } + } + break; + } + case bigdl.proto.DataType.DATA_FORMAT: { + switch (value.dataFormatValue) { + case 0: this.value = 'NCHW'; break; + case 1: this.value = 'NHWC'; break; + default: throw new bigdl.Error(`Unsupported data format '${value.dataFormatValue}'.`); + } + break; + } + default: { + throw new bigdl.Error(`Unsupported attribute data type '${value.dataType}'.`); + } + } + } +}; + +bigdl.Tensor = class { + + constructor(tensor /*, tensors */) { + this.type = new bigdl.TensorType(tensor.datatype, new bigdl.TensorShape(tensor.size)); + /* + if (tensor && tensor.id && tensors && tensors[tensor.id] && tensors[tensor.id].tensorValue && tensors[tensor.id].tensorValue.storage) { + const storage = tensors[tensor.id].tensorValue.storage; + switch (this.type.dataType) { + case 'float32': + if (storage.bytes_data && storage.bytes_data.length > 0) { + this.values = storage.bytes_data[0]; + this.encoding = '<'; + } + else if (storage.float_data && storage.float_data.length > 0) { + this.values = storage.float_data; + this.encoding = '|'; + } + break; + default: + break; + } + } + */ + } +}; + +bigdl.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case bigdl.proto.DataType.FLOAT: this.dataType = 'float32'; break; + case bigdl.proto.DataType.DOUBLE: this.dataType = 'float64'; break; + default: throw new bigdl.Error(`Unsupported tensor type '${dataType}'.`); + } + this.shape = shape; + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +bigdl.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + if (!dimensions.every((dimension) => Number.isInteger(dimension))) { + throw new bigdl.Error(`Invalid tensor shape '${JSON.stringify(dimensions)}'.`); + } + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +bigdl.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading BigDL model.'; + } +}; + +export const ModelFactory = bigdl.ModelFactory; + diff --git a/browser.js b/browser.js new file mode 100644 index 00000000000..7fd80d6e31f --- /dev/null +++ b/browser.js @@ -0,0 +1,834 @@ + +import * as base from './base.js'; + +const host = {}; + +host.BrowserHost = class { + + constructor() { + this._window = window; + this._navigator = window.navigator; + this._document = window.document; + this._telemetry = new base.Telemetry(this._window); + this._window.eval = () => { + throw new Error('window.eval() not supported.'); + }; + this._meta = {}; + for (const element of Array.from(this._document.getElementsByTagName('meta'))) { + if (element.name !== undefined && element.content !== undefined) { + this._meta[element.name] = this._meta[element.name] || []; + this._meta[element.name].push(element.content); + } + } + this._environment = { + name: this._document.title, + type: this._meta.type ? this._meta.type[0] : 'Browser', + version: this._meta.version ? this._meta.version[0] : null, + date: Array.isArray(this._meta.date) && this._meta.date.length > 0 && this._meta.date[0] ? new Date(`${this._meta.date[0].split(' ').join('T')}Z`) : new Date(), + packaged: this._meta.version && this._meta.version[0] !== '0.0.0', + platform: /(Mac|iPhone|iPod|iPad)/i.test(this._navigator.platform) ? 'darwin' : undefined, + agent: this._navigator.userAgent.toLowerCase().indexOf('safari') !== -1 && this._navigator.userAgent.toLowerCase().indexOf('chrome') === -1 ? 'safari' : '', + repository: this._element('logo-github').getAttribute('href'), + menu: true + }; + if (!/^\d\.\d\.\d$/.test(this.version)) { + throw new Error('Invalid version.'); + } + } + + get window() { + return this._window; + } + + get document() { + return this._document; + } + + get version() { + return this._environment.version; + } + + get type() { + return this._environment.type; + } + + async view(view) { + this._view = view; + const age = async () => { + const days = (new Date() - new Date(this._environment.date)) / (24 * 60 * 60 * 1000); + if (days > 180) { + this.document.body.classList.remove('spinner'); + this.window.exports.terminate('Please update to the newest version.', 'Download', () => { + const link = this._element('logo-github').href; + this.openURL(link); + }); + return new Promise(() => {}); + } + return Promise.resolve(); + }; + const consent = async () => { + if (this._getCookie('consent') || this._getCookie('_ga')) { + return; + } + let consent = true; + try { + const text = await this._request('https://ipinfo.io/json', { 'Content-Type': 'application/json' }, 'utf-8', null, 2000); + const json = JSON.parse(text); + const countries = ['AT', 'BE', 'BG', 'HR', 'CZ', 'CY', 'DK', 'EE', 'FI', 'FR', 'DE', 'EL', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'NO', 'PL', 'PT', 'SK', 'ES', 'SE', 'GB', 'UK', 'GR', 'EU', 'RO']; + if (json && json.country && countries.indexOf(json.country) === -1) { + consent = false; + } + } catch (error) { + // continue regardless of error + } + if (consent) { + this.document.body.classList.remove('spinner'); + await this._message('This app uses cookies to report errors and anonymous usage information.', 'Accept'); + } + this._setCookie('consent', Date.now().toString(), 30); + }; + const telemetry = async () => { + if (this._environment.packaged) { + this._window.addEventListener('error', (event) => { + const error = event instanceof ErrorEvent && event.error && event.error instanceof Error ? event.error : new Error(event && event.message ? event.message : JSON.stringify(event)); + this.exception(error, true); + }); + const measurement_id = '848W2NVWVH'; + const user = this._getCookie('_ga').replace(/^(GA1\.\d\.)*/, ''); + const session = this._getCookie(`_ga${measurement_id}`); + await this._telemetry.start(`G-${measurement_id}`, user, session); + this._telemetry.set('page_location', this._document.location && this._document.location.href ? this._document.location.href : null); + this._telemetry.set('page_title', this._document.title ? this._document.title : null); + this._telemetry.set('page_referrer', this._document.referrer ? this._document.referrer : null); + this._telemetry.send('page_view', { + app_name: this.type, + app_version: this.version, + }); + this._telemetry.send('scroll', { + percent_scrolled: 90, + app_name: this.type, + app_version: this.version + }); + this._setCookie('_ga', `GA1.2.${this._telemetry.get('client_id')}`, 1200); + this._setCookie(`_ga${measurement_id}`, `GS1.1.${this._telemetry.session}`, 1200); + } + }; + const capabilities = async () => { + const filter = (list) => { + return list.filter((capability) => { + const path = capability.split('.').reverse(); + let obj = this.window[path.pop()]; + while (obj && path.length > 0) { + obj = obj[path.pop()]; + } + return obj; + }); + }; + const capabilities = filter([ 'fetch', 'DataView.prototype.getBigInt64', 'Worker' ]); + this.event('browser_open', { + browser_capabilities: capabilities.map((capability) => capability.split('.').pop()).join(',') + }); + return Promise.resolve(); + }; + await age(); + await consent(); + await telemetry(); + await capabilities(); + } + + async start() { + const hash = this.window.location.hash ? this.window.location.hash.replace(/^#/, '') : ''; + const search = this.window.location.search; + const params = new URLSearchParams(search + (hash ? `&${hash}` : '')); + if (this._meta.file && this._meta.identifier) { + const [url] = this._meta.file; + if (this._view.accept(url)) { + this._openModel(this._url(url), null); + this._document.title = this._meta.identifier; + return; + } + } + const url = params.get('url'); + if (url) { + const identifier = params.get('identifier') || null; + const location = url + .replace(/^https:\/\/github\.com\/([\w-]*\/[\w-]*)\/blob\/([\w/\-_.]*)(\?raw=true)?$/, 'https://raw.githubusercontent.com/$1/$2') + .replace(/^https:\/\/github\.com\/([\w-]*\/[\w-]*)\/raw\/([\w/\-_.]*)$/, 'https://raw.githubusercontent.com/$1/$2') + .replace(/^https:\/\/huggingface.co\/(.*)\/blob\/(.*)$/, 'https://huggingface.co/$1/resolve/$2'); + if (this._view.accept(identifier || location)) { + const title = await this._openModel(location, identifier); + if (title) { + this.document.title = title; + return; + } + } + } + const gist = params.get('gist'); + if (gist) { + this._openGist(gist); + return; + } + const openFileButton = this._element('open-file-button'); + const openFileDialog = this._element('open-file-dialog'); + if (openFileButton && openFileDialog) { + openFileButton.addEventListener('click', () => { + this.execute('open'); + }); + const mobileSafari = this.environment('platform') === 'darwin' && navigator.maxTouchPoints && navigator.maxTouchPoints > 1; + if (!mobileSafari) { + const extensions = new base.Metadata().extensions.map((extension) => `.${extension}`); + openFileDialog.setAttribute('accept', extensions.join(', ')); + } + openFileDialog.addEventListener('change', (e) => { + if (e.target && e.target.files && e.target.files.length > 0) { + const files = Array.from(e.target.files); + const file = files.find((file) => this._view.accept(file.name, file.size)); + if (file) { + this._open(file, files); + } + } + }); + } + this.document.addEventListener('dragover', (e) => { + e.preventDefault(); + }); + this.document.addEventListener('drop', (e) => { + e.preventDefault(); + }); + this.document.body.addEventListener('drop', (e) => { + e.preventDefault(); + if (e.dataTransfer && e.dataTransfer.files && e.dataTransfer.files.length > 0) { + const files = Array.from(e.dataTransfer.files); + const file = files.find((file) => this._view.accept(file.name, file.size)); + if (file) { + this._open(file, files); + } + } + }); + this._view.show('welcome'); + } + + environment(name) { + return this._environment[name]; + } + + async error(message, detail /*, cancel */) { + alert((message == 'Error' ? '' : `${message} `) + detail); + return 0; + } + + confirm(message, detail) { + return confirm(`${message} ${detail}`); + } + + async require(id) { + return import(`${id}.js`); + } + + save(name, extension, defaultPath, callback) { + callback(`${defaultPath}.${extension}`); + } + + export(file, blob) { + const element = this.document.createElement('a'); + element.download = file; + element.href = URL.createObjectURL(blob); + this.document.body.appendChild(element); + element.click(); + this.document.body.removeChild(element); + } + + execute(name /*, value */) { + switch (name) { + case 'open': { + const openFileDialog = this._element('open-file-dialog'); + if (openFileDialog) { + openFileDialog.value = ''; + openFileDialog.click(); + } + break; + } + case 'report-issue': { + this.openURL(`${this.environment('repository')}/issues/new`); + break; + } + case 'about': { + this._view.about(); + break; + } + default: { + break; + } + } + } + + request(file, encoding, base) { + const url = base ? (`${base}/${file}`) : this._url(file); + return this._request(url, null, encoding); + } + + openURL(url) { + this.window.location = url; + } + + exception(error, fatal) { + if (this._telemetry && error) { + const name = error.name ? `${error.name}: ` : ''; + const message = error.message ? error.message : JSON.stringify(error); + let context = ''; + let stack = ''; + if (error.stack) { + const format = (file, line, column) => { + return `${file.split('\\').join('/').split('/').pop()}:${line}:${column}`; + }; + const match = error.stack.match(/\n {4}at (.*) \((.*):(\d*):(\d*)\)/); + if (match) { + stack = `${match[1]} (${format(match[2], match[3], match[4])})`; + } else { + const match = error.stack.match(/\n {4}at (.*):(\d*):(\d*)/); + if (match) { + stack = `(${format(match[1], match[2], match[3])})`; + } else { + const match = error.stack.match(/\n {4}at (.*)\((.*)\)/); + if (match) { + stack = `(${format(match[1], match[2], match[3])})`; + } else { + const match = error.stack.match(/\s*@\s*(.*):(.*):(.*)/); + if (match) { + stack = `(${format(match[1], match[2], match[3])})`; + } else { + const match = error.stack.match(/.*\n\s*(.*)\s*/); + if (match) { + [, stack] = match; + } + } + } + } + } + } + if (error.context) { + context = typeof error.context === 'string' ? error.context : JSON.stringify(error.context); + } + this._telemetry.send('exception', { + app_name: this.type, + app_version: this.version, + error_name: name, + error_message: message, + error_context: context, + error_stack: stack, + error_fatal: fatal ? true : false + }); + } + } + + event(name, params) { + if (name && params) { + params.app_name = this.type; + params.app_version = this.version; + this._telemetry.send(name, params); + } + } + + _request(url, headers, encoding, callback, timeout) { + return new Promise((resolve, reject) => { + const request = new XMLHttpRequest(); + if (!encoding) { + request.responseType = 'arraybuffer'; + } + if (timeout) { + request.timeout = timeout; + } + const error = (status) => { + const err = new Error(`The web request failed with status code ${status} at '${url}'.`); + err.type = 'error'; + err.url = url; + return err; + }; + const progress = (value) => { + if (callback) { + callback(value); + } + }; + request.onload = () => { + progress(0); + if (request.status == 200) { + if (request.responseType == 'arraybuffer') { + const buffer = new Uint8Array(request.response); + const stream = new base.BinaryStream(buffer); + resolve(stream); + } else { + resolve(request.responseText); + } + } else { + reject(error(request.status)); + } + }; + request.onerror = (e) => { + progress(0); + const err = error(request.status); + err.type = e.type; + reject(err); + }; + request.ontimeout = () => { + progress(0); + request.abort(); + const err = new Error(`The web request timed out in '${url}'.`); + err.type = 'timeout'; + err.url = url; + reject(err); + }; + request.onprogress = (e) => { + if (e && e.lengthComputable) { + progress(e.loaded / e.total * 100); + } + }; + request.open('GET', url, true); + if (headers) { + for (const [name, value] of Object.entries(headers)) { + request.setRequestHeader(name, value); + } + } + request.send(); + }); + } + + _url(file) { + file = file.startsWith('./') ? file.substring(2) : file.startsWith('/') ? file.substring(1) : file; + const location = this.window.location; + const pathname = location.pathname.endsWith('/') ? + location.pathname : + `${location.pathname.split('/').slice(0, -1).join('/')}/`; + return `${location.protocol}//${location.host}${pathname}${file}`; + } + + async _openModel(url, identifier) { + url = url.startsWith('data:') ? url : `${url + ((/\?/).test(url) ? '&' : '?')}cb=${(new Date()).getTime()}`; + this._view.show('welcome spinner'); + let context = null; + try { + const progress = (value) => { + this._view.progress(value); + }; + let stream = await this._request(url, null, null, progress); + if (url.startsWith('https://raw.githubusercontent.com/') && stream.length < 150) { + const buffer = stream.peek(); + const content = Array.from(buffer).map((c) => String.fromCodePoint(c)).join(''); + if (content.split('\n')[0] === 'version https://git-lfs.github.com/spec/v1') { + url = url.replace('https://raw.githubusercontent.com/', 'https://media.githubusercontent.com/media/'); + stream = await this._request(url, null, null, progress); + } + } + context = new host.BrowserHost.Context(this, url, identifier, stream); + this._telemetry.set('session_engaged', 1); + } catch (error) { + await this.error('Model load request failed.', error.message); + this._view.show('welcome'); + return null; + } + try { + await this._view.open(context); + return identifier || context.identifier; + } catch (err) { + if (err) { + this._view.error(err, null, 'welcome'); + } + return null; + } + } + + async _open(file, files) { + this._view.show('welcome spinner'); + const context = new host.BrowserHost.BrowserFileContext(this, file, files); + try { + await context.open(); + this._telemetry.set('session_engaged', 1); + await this._view.open(context); + this._view.show(null); + this.document.title = files[0].name; + } catch (error) { + this._view.error(error, null, null); + } + } + + async _openGist(gist) { + this._view.show('welcome spinner'); + const url = `https://api.github.com/gists/${gist}`; + try { + const text = await this._request(url, { 'Content-Type': 'application/json' }, 'utf-8'); + const json = JSON.parse(text); + if (json.message) { + this.error('Error while loading Gist.', json.message); + return; + } + const file = Object.values(json.files).find((file) => this._view.accept(file.filename)); + if (!file) { + this.error('Error while loading Gist.', 'Gist does not contain a model file.'); + return; + } + const identifier = file.filename; + const encoder = new TextEncoder(); + const buffer = encoder.encode(file.content); + const stream = new base.BinaryStream(buffer); + const context = new host.BrowserHost.Context(this, '', identifier, stream); + this._telemetry.set('session_engaged', 1); + try { + await this._view.open(context); + this.document.title = identifier; + } catch (error) { + if (error) { + this._view.error(error, error.name, 'welcome'); + } + } + } catch (error) { + this._view.error(error, 'Model load request failed.', 'welcome'); + } + } + + _setCookie(name, value, days) { + this.document.cookie = `${name}=; Max-Age=0`; + const location = this.window.location; + const domain = location && location.hostname && location.hostname.indexOf('.') !== -1 ? `;domain=.${location.hostname.split('.').slice(-2).join('.')}` : ''; + const date = new Date(); + date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000)); + this.document.cookie = `${name}=${value}${domain};path=/;expires=${date.toUTCString()}`; + } + + _getCookie(name) { + for (const cookie of this.document.cookie.split(';')) { + const entry = cookie.split('='); + if (entry[0].trim() === name) { + return entry[1].trim(); + } + } + return ''; + } + + get(name) { + try { + if (typeof this.window.localStorage !== 'undefined') { + const content = this.window.localStorage.getItem(name); + return JSON.parse(content); + } + } catch (error) { + // continue regardless of error + } + return undefined; + } + + set(name, value) { + try { + if (typeof this.window.localStorage !== 'undefined') { + this.window.localStorage.setItem(name, JSON.stringify(value)); + } + } catch (error) { + // continue regardless of error + } + } + + delete(name) { + try { + if (typeof this.window.localStorage !== 'undefined') { + this.window.localStorage.removeItem(name); + } + } catch (error) { + // continue regardless of error + } + } + + _element(id) { + return this.document.getElementById(id); + } + + _message(message, action) { + return new Promise((resolve) => { + this._element('message-text').innerText = message; + const button = this._element('message-button'); + if (action) { + button.style.removeProperty('display'); + button.innerText = action; + button.onclick = () => { + button.onclick = null; + this._document.body.classList.remove('message'); + resolve(0); + }; + button.focus(); + } else { + button.style.display = 'none'; + button.onclick = null; + } + this._document.body.classList.add('message'); + }); + } +}; + +host.BrowserHost.BrowserFileContext = class { + + constructor(host, file, blobs) { + this._host = host; + this._file = file; + this._blobs = {}; + for (const blob of blobs) { + this._blobs[blob.name] = blob; + } + } + + get identifier() { + return this._file.name; + } + + get stream() { + return this._stream; + } + + async request(file, encoding, basename) { + if (basename !== undefined) { + return this._host.request(file, encoding, basename); + } + const blob = this._blobs[file]; + if (!blob) { + throw new Error(`File not found '${file}'.`); + } + return new Promise((resolve, reject) => { + const reader = new FileReader(); + const size = 0x10000000; + let position = 0; + const chunks = []; + reader.onload = (e) => { + if (encoding) { + resolve(e.target.result); + } else { + const buffer = new Uint8Array(e.target.result); + if (position === 0 && buffer.length === blob.size) { + const stream = new base.BinaryStream(buffer); + resolve(stream); + } else { + chunks.push(buffer); + position += buffer.length; + if (position < blob.size) { + const slice = blob.slice(position, Math.min(position + size, blob.size)); + reader.readAsArrayBuffer(slice); + } else { + const stream = new host.BrowserHost.FileStream(chunks, size, 0, position); + resolve(stream); + } + } + } + }; + reader.onerror = (event) => { + event = event || this._host.window.event; + let message = ''; + const error = event.target.error; + switch (error.code) { + case error.NOT_FOUND_ERR: + message = `File not found '${file}'.`; + break; + case error.NOT_READABLE_ERR: + message = `File not readable '${file}'.`; + break; + case error.SECURITY_ERR: + message = `File access denied '${file}'.`; + break; + default: + message = error.message ? error.message : `File read '${error.code}' error '${file}'.`; + break; + } + reject(new Error(message)); + }; + if (encoding === 'utf-8') { + reader.readAsText(blob, encoding); + } else { + const slice = blob.slice(position, Math.min(position + size, blob.size)); + reader.readAsArrayBuffer(slice); + } + }); + } + + async require(id) { + return await this._host.require(id); + } + + exception(error, fatal) { + this._host.exception(error, fatal); + } + + async open() { + this._stream = await this.request(this._file.name, null); + } +}; + +host.BrowserHost.FileStream = class { + + constructor(chunks, size, start, length) { + this._chunks = chunks; + this._size = size; + this._start = start; + this._length = length; + this._position = 0; + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + stream(length) { + const file = new host.BrowserHost.FileStream(this._chunks, this._size, this._start + this._position, length); + this.skip(length); + return file; + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + this._position += offset; + if (this._position > this._length) { + throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + peek(length) { + length = length !== undefined ? length : this._length - this._position; + if (length < 0x10000000) { + const position = this._fill(length); + this._position -= length; + return this._buffer.subarray(position, position + length); + } + const position = this._start + this._position; + this.skip(length); + this.seek(position); + const buffer = new Uint8Array(length); + this._read(buffer, position); + return buffer; + } + + read(length) { + length = length !== undefined ? length : this._length - this._position; + if (length < 0x10000000) { + const position = this._fill(length); + return this._buffer.subarray(position, position + length); + } + const position = this._start + this._position; + this.skip(length); + const buffer = new Uint8Array(length); + this._read(buffer, position); + return buffer; + } + + byte() { + const position = this._fill(1); + return this._buffer[position]; + } + + _fill(length) { + if (this._position + length > this._length) { + throw new Error(`Expected ${this._position + length - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + if (!this._buffer || this._position < this._offset || this._position + length > this._offset + this._buffer.length) { + this._offset = this._start + this._position; + this._buffer = new Uint8Array(Math.min(0x10000000, this._start + this._length - this._offset)); + this._read(this._buffer, this._offset); + } + const position = this._start + this._position - this._offset; + this._position += length; + return position; + } + + _read(buffer, offset) { + let index = Math.floor(offset / this._size); + offset = offset - (index * this._size); + const chunk = this._chunks[index++]; + let destination = Math.min(chunk.length - offset, buffer.length); + buffer.set(chunk.subarray(offset, offset + destination), 0); + while (destination < buffer.length) { + const chunk = this._chunks[index++]; + const size = Math.min(this._size, buffer.length - destination); + buffer.set(chunk.subarray(0, size), destination); + destination += size; + } + } +}; + +host.BrowserHost.Context = class { + + constructor(host, url, identifier, stream) { + this._host = host; + this._stream = stream; + if (identifier) { + this._identifier = identifier; + this._base = url; + if (this._base.endsWith('/')) { + this._base.substring(0, this._base.length - 1); + } + } else { + const parts = url.split('?')[0].split('/'); + this._identifier = parts.pop(); + this._base = parts.join('/'); + } + } + + get identifier() { + return this._identifier; + } + + get stream() { + return this._stream; + } + + request(file, encoding, base) { + return this._host.request(file, encoding, base === undefined ? this._base : base); + } + + require(id) { + return this._host.require(id); + } + + exception(error, fatal) { + this._host.exception(error, fatal); + } +}; + +if (!('scrollBehavior' in window.document.documentElement.style)) { + const __scrollTo__ = Element.prototype.scrollTo; + Element.prototype.scrollTo = function(options) { + if (options !== undefined) { + if (options === null || typeof options !== 'object' || options.behavior === undefined || arguments[0].behavior === 'auto' || options.behavior === 'instant') { + if (__scrollTo__) { + __scrollTo__.apply(this, arguments); + } + } else { + const now = () => window.performance && window.performance.now ? window.performance.now() : Date.now(); + const ease = (k) => 0.5 * (1 - Math.cos(Math.PI * k)); + const step = (context) => { + const value = ease(Math.min((now() - context.startTime) / 468, 1)); + const x = context.startX + (context.x - context.startX) * value; + const y = context.startY + (context.y - context.startY) * value; + context.element.scrollLeft = x; + context.element.scrollTop = y; + if (x !== context.x || y !== context.y) { + window.requestAnimationFrame(step.bind(window, context)); + } + }; + const context = { + element: this, + x: typeof options.left === 'undefined' ? this.scrollLeft : ~~options.left, + y: typeof options.top === 'undefined' ? this.scrollTop : ~~options.top, + startX: this.scrollLeft, + startY: this.scrollTop, + startTime: now() + }; + step(context); + } + } + }; +} + +if (typeof window !== 'undefined' && window.exports) { + window.exports.browser = host; +} + +export const BrowserHost = host.BrowserHost; diff --git a/caffe-metadata.json b/caffe-metadata.json new file mode 100644 index 00000000000..3d7c1927eec --- /dev/null +++ b/caffe-metadata.json @@ -0,0 +1,462 @@ +[ + { + "name": "Accuracy", + "inputs": [ + { "name": "predictions" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "AnnotatedData", + "category": "Data", + "outputs": [ + { "name": "data" } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "attributes": [ + { "name": "use_global_stats", "type": "boolean", "visible": false }, + { "name": "eps", "type": "float32", "default": 0.00001 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BN", + "category": "Normalization", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ColorConv", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ContrastiveLossParameter", + "attributes": [ + { "name": "margin", "default": 1 }, + { "name": "legacy_version", "default": false } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false }, + { "name": "pad", "default": [ 0 ] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [ 1 ] }, + { "name": "dilation", "default": [] }, + { "name": "group", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvolutionDepthwise", + "category": "Layer", + "attributes": [ + { "name": "pad", "default": [ 0 ] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [ 1 ] }, + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Crop", + "category": "Data", + "inputs": [ + { "name": "data" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Data", + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + }, + { + "name": "Deconvolution", + "category": "Layer", + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false }, + { "name": "pad", "default": [] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [] }, + { "name": "dilation", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DepthwiseConvolution", + "category": "Layer", + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Dropout", + "category": "Dropout", + "attributes": [ + { "name": "dropout_ratio", "default": 0.5 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DummyData", + "category": "Data", + "outputs": [ + { "name": "data" } + ] + }, + { + "name": "Eltwise", + "attributes": [ + { "name": "operation", "type": "EltwiseParameter.EltwiseOp", "default": 1 }, + { "name": "coeff", "type": "float32[]", "default": [] }, + { "name": "stable_prod_grad", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "EuclideanLoss", + "inputs": [ + { "name": "predictions" }, + { "name": "targets" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Flatten", + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "HDF5Data", + "category": "Data", + "outputs": [ + { "name": "data" } + ] + }, + { + "name": "ImageData", + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + }, + { + "name": "InnerProduct", + "category": "Layer", + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LRN", + "category": "Normalization", + "attributes": [ + { "name": "local_size", "type": "uint32", "default": 5 }, + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LSTM", + "category": "Layer", + "attributes": [ + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "h_0" }, + { "name": "c_0" } + ], + "outputs": [ + { "name": "output" }, + { "name": "h_T" }, + { "name": "c_T" } + ] + }, + { + "name": "Parameter", + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Permute", + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "attributes": [ + { "name": "pool", "type": "PoolingParameter.PoolMethod", "default": 0 }, + { "name": "engine", "type": "PoolingParameter.Engine", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "PReLU", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Python" + }, + { + "name": "ReLU", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReLU6", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "data" } + ], + "outputs": [ + { "name": "reshaped" } + ] + }, + { + "name": "Scale", + "category": "Layer", + "attributes": [ + { "name": "filler", "visible": false }, + { "name": "bias_term", "visible": false }, + { "name": "bias_filler", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Sigmoid", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "attributes": [ + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SoftmaxLoss", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SoftmaxWithLoss", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Split", + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + }, + { + "name": "WindowData", + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + } +] \ No newline at end of file diff --git a/caffe-proto.js b/caffe-proto.js new file mode 100644 index 00000000000..095f504f714 --- /dev/null +++ b/caffe-proto.js @@ -0,0 +1,5357 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('caffe'); + +$root.caffe = {}; + +$root.caffe.BlobShape = class BlobShape { + + constructor() { + this.dim = []; + } + + static decode(reader, length) { + const message = new $root.caffe.BlobShape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim = reader.array(message.dim, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.BlobShape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + reader.array(message.dim, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.BlobProto = class BlobProto { + + constructor() { + this.data = []; + this.diff = []; + this.double_data = []; + this.double_diff = []; + } + + static decode(reader, length) { + const message = new $root.caffe.BlobProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 7: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + case 5: + message.data = reader.floats(message.data, tag); + break; + case 6: + message.diff = reader.floats(message.diff, tag); + break; + case 8: + message.double_data = reader.doubles(message.double_data, tag); + break; + case 9: + message.double_diff = reader.doubles(message.double_diff, tag); + break; + case 1: + message.num = reader.int32(); + break; + case 2: + message.channels = reader.int32(); + break; + case 3: + message.height = reader.int32(); + break; + case 4: + message.width = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.BlobProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader); + break; + case "data": + reader.array(message.data, () => reader.float()); + break; + case "diff": + reader.array(message.diff, () => reader.float()); + break; + case "double_data": + reader.array(message.double_data, () => reader.double()); + break; + case "double_diff": + reader.array(message.double_diff, () => reader.double()); + break; + case "num": + message.num = reader.int32(); + break; + case "channels": + message.channels = reader.int32(); + break; + case "height": + message.height = reader.int32(); + break; + case "width": + message.width = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.BlobProto.prototype.shape = null; +$root.caffe.BlobProto.prototype.num = 0; +$root.caffe.BlobProto.prototype.channels = 0; +$root.caffe.BlobProto.prototype.height = 0; +$root.caffe.BlobProto.prototype.width = 0; + +$root.caffe.BlobProtoVector = class BlobProtoVector { + + constructor() { + this.blobs = []; + } + + static decode(reader, length) { + const message = new $root.caffe.BlobProtoVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.BlobProtoVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "blobs": + message.blobs.push($root.caffe.BlobProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.Datum = class Datum { + + constructor() { + this.float_data = []; + } + + static decode(reader, length) { + const message = new $root.caffe.Datum(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channels = reader.int32(); + break; + case 2: + message.height = reader.int32(); + break; + case 3: + message.width = reader.int32(); + break; + case 4: + message.data = reader.bytes(); + break; + case 5: + message.label = reader.int32(); + break; + case 6: + message.float_data = reader.floats(message.float_data, tag); + break; + case 7: + message.encoded = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.Datum(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "channels": + message.channels = reader.int32(); + break; + case "height": + message.height = reader.int32(); + break; + case "width": + message.width = reader.int32(); + break; + case "data": + message.data = reader.bytes(); + break; + case "label": + message.label = reader.int32(); + break; + case "float_data": + reader.array(message.float_data, () => reader.float()); + break; + case "encoded": + message.encoded = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.Datum.prototype.channels = 0; +$root.caffe.Datum.prototype.height = 0; +$root.caffe.Datum.prototype.width = 0; +$root.caffe.Datum.prototype.data = new Uint8Array([]); +$root.caffe.Datum.prototype.label = 0; +$root.caffe.Datum.prototype.encoded = false; + +$root.caffe.FillerParameter = class FillerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.FillerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.string(); + break; + case 2: + message.value = reader.float(); + break; + case 3: + message.min = reader.float(); + break; + case 4: + message.max = reader.float(); + break; + case 5: + message.mean = reader.float(); + break; + case 6: + message.std = reader.float(); + break; + case 7: + message.sparse = reader.int32(); + break; + case 8: + message.variance_norm = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.FillerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "value": + message.value = reader.float(); + break; + case "min": + message.min = reader.float(); + break; + case "max": + message.max = reader.float(); + break; + case "mean": + message.mean = reader.float(); + break; + case "std": + message.std = reader.float(); + break; + case "sparse": + message.sparse = reader.int32(); + break; + case "variance_norm": + message.variance_norm = reader.enum($root.caffe.FillerParameter.VarianceNorm); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.FillerParameter.prototype.type = "constant"; +$root.caffe.FillerParameter.prototype.value = 0; +$root.caffe.FillerParameter.prototype.min = 0; +$root.caffe.FillerParameter.prototype.max = 1; +$root.caffe.FillerParameter.prototype.mean = 0; +$root.caffe.FillerParameter.prototype.std = 1; +$root.caffe.FillerParameter.prototype.sparse = -1; +$root.caffe.FillerParameter.prototype.variance_norm = 0; + +$root.caffe.FillerParameter.VarianceNorm = { + "FAN_IN": 0, + "FAN_OUT": 1, + "AVERAGE": 2 +}; + +$root.caffe.NetParameter = class NetParameter { + + constructor() { + this.input = []; + this.input_shape = []; + this.input_dim = []; + this.layer = []; + this.layers = []; + } + + static decode(reader, length) { + const message = new $root.caffe.NetParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 3: + message.input.push(reader.string()); + break; + case 8: + message.input_shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + case 4: + message.input_dim = reader.array(message.input_dim, () => reader.int32(), tag); + break; + case 5: + message.force_backward = reader.bool(); + break; + case 6: + message.state = $root.caffe.NetState.decode(reader, reader.uint32()); + break; + case 7: + message.debug_info = reader.bool(); + break; + case 100: + message.layer.push($root.caffe.LayerParameter.decode(reader, reader.uint32())); + break; + case 2: + message.layers.push($root.caffe.V1LayerParameter.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.NetParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "input_shape": + message.input_shape.push($root.caffe.BlobShape.decodeText(reader)); + break; + case "input_dim": + reader.array(message.input_dim, () => reader.int32()); + break; + case "force_backward": + message.force_backward = reader.bool(); + break; + case "state": + message.state = $root.caffe.NetState.decodeText(reader); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "layer": + message.layer.push($root.caffe.LayerParameter.decodeText(reader)); + break; + case "layers": + message.layers.push($root.caffe.V1LayerParameter.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.NetParameter.prototype.name = ""; +$root.caffe.NetParameter.prototype.force_backward = false; +$root.caffe.NetParameter.prototype.state = null; +$root.caffe.NetParameter.prototype.debug_info = false; + +$root.caffe.SolverParameter = class SolverParameter { + + constructor() { + this.test_net = []; + this.test_net_param = []; + this.test_state = []; + this.test_iter = []; + this.stepvalue = []; + this.weights = []; + } + + static decode(reader, length) { + const message = new $root.caffe.SolverParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 24: + message.net = reader.string(); + break; + case 25: + message.net_param = $root.caffe.NetParameter.decode(reader, reader.uint32()); + break; + case 1: + message.train_net = reader.string(); + break; + case 2: + message.test_net.push(reader.string()); + break; + case 21: + message.train_net_param = $root.caffe.NetParameter.decode(reader, reader.uint32()); + break; + case 22: + message.test_net_param.push($root.caffe.NetParameter.decode(reader, reader.uint32())); + break; + case 26: + message.train_state = $root.caffe.NetState.decode(reader, reader.uint32()); + break; + case 27: + message.test_state.push($root.caffe.NetState.decode(reader, reader.uint32())); + break; + case 3: + message.test_iter = reader.array(message.test_iter, () => reader.int32(), tag); + break; + case 4: + message.test_interval = reader.int32(); + break; + case 19: + message.test_compute_loss = reader.bool(); + break; + case 32: + message.test_initialization = reader.bool(); + break; + case 5: + message.base_lr = reader.float(); + break; + case 6: + message.display = reader.int32(); + break; + case 33: + message.average_loss = reader.int32(); + break; + case 7: + message.max_iter = reader.int32(); + break; + case 36: + message.iter_size = reader.int32(); + break; + case 8: + message.lr_policy = reader.string(); + break; + case 9: + message.gamma = reader.float(); + break; + case 10: + message.power = reader.float(); + break; + case 11: + message.momentum = reader.float(); + break; + case 12: + message.weight_decay = reader.float(); + break; + case 29: + message.regularization_type = reader.string(); + break; + case 13: + message.stepsize = reader.int32(); + break; + case 34: + message.stepvalue = reader.array(message.stepvalue, () => reader.int32(), tag); + break; + case 35: + message.clip_gradients = reader.float(); + break; + case 14: + message.snapshot = reader.int32(); + break; + case 15: + message.snapshot_prefix = reader.string(); + break; + case 16: + message.snapshot_diff = reader.bool(); + break; + case 37: + message.snapshot_format = reader.int32(); + break; + case 17: + message.solver_mode = reader.int32(); + break; + case 18: + message.device_id = reader.int32(); + break; + case 20: + message.random_seed = reader.int64(); + break; + case 40: + message.type = reader.string(); + break; + case 31: + message.delta = reader.float(); + break; + case 39: + message.momentum2 = reader.float(); + break; + case 38: + message.rms_decay = reader.float(); + break; + case 23: + message.debug_info = reader.bool(); + break; + case 28: + message.snapshot_after_train = reader.bool(); + break; + case 30: + message.solver_type = reader.int32(); + break; + case 41: + message.layer_wise_reduce = reader.bool(); + break; + case 42: + message.weights.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SolverParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "net": + message.net = reader.string(); + break; + case "net_param": + message.net_param = $root.caffe.NetParameter.decodeText(reader); + break; + case "train_net": + message.train_net = reader.string(); + break; + case "test_net": + reader.array(message.test_net, () => reader.string()); + break; + case "train_net_param": + message.train_net_param = $root.caffe.NetParameter.decodeText(reader); + break; + case "test_net_param": + message.test_net_param.push($root.caffe.NetParameter.decodeText(reader)); + break; + case "train_state": + message.train_state = $root.caffe.NetState.decodeText(reader); + break; + case "test_state": + message.test_state.push($root.caffe.NetState.decodeText(reader)); + break; + case "test_iter": + reader.array(message.test_iter, () => reader.int32()); + break; + case "test_interval": + message.test_interval = reader.int32(); + break; + case "test_compute_loss": + message.test_compute_loss = reader.bool(); + break; + case "test_initialization": + message.test_initialization = reader.bool(); + break; + case "base_lr": + message.base_lr = reader.float(); + break; + case "display": + message.display = reader.int32(); + break; + case "average_loss": + message.average_loss = reader.int32(); + break; + case "max_iter": + message.max_iter = reader.int32(); + break; + case "iter_size": + message.iter_size = reader.int32(); + break; + case "lr_policy": + message.lr_policy = reader.string(); + break; + case "gamma": + message.gamma = reader.float(); + break; + case "power": + message.power = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + case "weight_decay": + message.weight_decay = reader.float(); + break; + case "regularization_type": + message.regularization_type = reader.string(); + break; + case "stepsize": + message.stepsize = reader.int32(); + break; + case "stepvalue": + reader.array(message.stepvalue, () => reader.int32()); + break; + case "clip_gradients": + message.clip_gradients = reader.float(); + break; + case "snapshot": + message.snapshot = reader.int32(); + break; + case "snapshot_prefix": + message.snapshot_prefix = reader.string(); + break; + case "snapshot_diff": + message.snapshot_diff = reader.bool(); + break; + case "snapshot_format": + message.snapshot_format = reader.enum($root.caffe.SolverParameter.SnapshotFormat); + break; + case "solver_mode": + message.solver_mode = reader.enum($root.caffe.SolverParameter.SolverMode); + break; + case "device_id": + message.device_id = reader.int32(); + break; + case "random_seed": + message.random_seed = reader.int64(); + break; + case "type": + message.type = reader.string(); + break; + case "delta": + message.delta = reader.float(); + break; + case "momentum2": + message.momentum2 = reader.float(); + break; + case "rms_decay": + message.rms_decay = reader.float(); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "snapshot_after_train": + message.snapshot_after_train = reader.bool(); + break; + case "solver_type": + message.solver_type = reader.enum($root.caffe.SolverParameter.SolverType); + break; + case "layer_wise_reduce": + message.layer_wise_reduce = reader.bool(); + break; + case "weights": + reader.array(message.weights, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SolverParameter.prototype.net = ""; +$root.caffe.SolverParameter.prototype.net_param = null; +$root.caffe.SolverParameter.prototype.train_net = ""; +$root.caffe.SolverParameter.prototype.train_net_param = null; +$root.caffe.SolverParameter.prototype.train_state = null; +$root.caffe.SolverParameter.prototype.test_interval = 0; +$root.caffe.SolverParameter.prototype.test_compute_loss = false; +$root.caffe.SolverParameter.prototype.test_initialization = true; +$root.caffe.SolverParameter.prototype.base_lr = 0; +$root.caffe.SolverParameter.prototype.display = 0; +$root.caffe.SolverParameter.prototype.average_loss = 1; +$root.caffe.SolverParameter.prototype.max_iter = 0; +$root.caffe.SolverParameter.prototype.iter_size = 1; +$root.caffe.SolverParameter.prototype.lr_policy = ""; +$root.caffe.SolverParameter.prototype.gamma = 0; +$root.caffe.SolverParameter.prototype.power = 0; +$root.caffe.SolverParameter.prototype.momentum = 0; +$root.caffe.SolverParameter.prototype.weight_decay = 0; +$root.caffe.SolverParameter.prototype.regularization_type = "L2"; +$root.caffe.SolverParameter.prototype.stepsize = 0; +$root.caffe.SolverParameter.prototype.clip_gradients = -1; +$root.caffe.SolverParameter.prototype.snapshot = 0; +$root.caffe.SolverParameter.prototype.snapshot_prefix = ""; +$root.caffe.SolverParameter.prototype.snapshot_diff = false; +$root.caffe.SolverParameter.prototype.snapshot_format = 1; +$root.caffe.SolverParameter.prototype.solver_mode = 1; +$root.caffe.SolverParameter.prototype.device_id = 0; +$root.caffe.SolverParameter.prototype.random_seed = protobuf.Int64.create(-1); +$root.caffe.SolverParameter.prototype.type = "SGD"; +$root.caffe.SolverParameter.prototype.delta = 1e-8; +$root.caffe.SolverParameter.prototype.momentum2 = 0.999; +$root.caffe.SolverParameter.prototype.rms_decay = 0.99; +$root.caffe.SolverParameter.prototype.debug_info = false; +$root.caffe.SolverParameter.prototype.snapshot_after_train = true; +$root.caffe.SolverParameter.prototype.solver_type = 0; +$root.caffe.SolverParameter.prototype.layer_wise_reduce = true; + +$root.caffe.SolverParameter.SnapshotFormat = { + "HDF5": 0, + "BINARYPROTO": 1 +}; + +$root.caffe.SolverParameter.SolverMode = { + "CPU": 0, + "GPU": 1 +}; + +$root.caffe.SolverParameter.SolverType = { + "SGD": 0, + "NESTEROV": 1, + "ADAGRAD": 2, + "RMSPROP": 3, + "ADADELTA": 4, + "ADAM": 5 +}; + +$root.caffe.SolverState = class SolverState { + + constructor() { + this.history = []; + } + + static decode(reader, length) { + const message = new $root.caffe.SolverState(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.iter = reader.int32(); + break; + case 2: + message.learned_net = reader.string(); + break; + case 3: + message.history.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 4: + message.current_step = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SolverState(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "iter": + message.iter = reader.int32(); + break; + case "learned_net": + message.learned_net = reader.string(); + break; + case "history": + message.history.push($root.caffe.BlobProto.decodeText(reader)); + break; + case "current_step": + message.current_step = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SolverState.prototype.iter = 0; +$root.caffe.SolverState.prototype.learned_net = ""; +$root.caffe.SolverState.prototype.current_step = 0; + +$root.caffe.Phase = { + "TRAIN": 0, + "TEST": 1 +}; + +$root.caffe.NetState = class NetState { + + constructor() { + this.stage = []; + } + + static decode(reader, length) { + const message = new $root.caffe.NetState(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.phase = reader.int32(); + break; + case 2: + message.level = reader.int32(); + break; + case 3: + message.stage.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.NetState(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "level": + message.level = reader.int32(); + break; + case "stage": + reader.array(message.stage, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.NetState.prototype.phase = 1; +$root.caffe.NetState.prototype.level = 0; + +$root.caffe.NetStateRule = class NetStateRule { + + constructor() { + this.stage = []; + this.not_stage = []; + } + + static decode(reader, length) { + const message = new $root.caffe.NetStateRule(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.phase = reader.int32(); + break; + case 2: + message.min_level = reader.int32(); + break; + case 3: + message.max_level = reader.int32(); + break; + case 4: + message.stage.push(reader.string()); + break; + case 5: + message.not_stage.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.NetStateRule(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "min_level": + message.min_level = reader.int32(); + break; + case "max_level": + message.max_level = reader.int32(); + break; + case "stage": + reader.array(message.stage, () => reader.string()); + break; + case "not_stage": + reader.array(message.not_stage, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.NetStateRule.prototype.phase = 0; +$root.caffe.NetStateRule.prototype.min_level = 0; +$root.caffe.NetStateRule.prototype.max_level = 0; + +$root.caffe.ParamSpec = class ParamSpec { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ParamSpec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.share_mode = reader.int32(); + break; + case 3: + message.lr_mult = reader.float(); + break; + case 4: + message.decay_mult = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ParamSpec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "share_mode": + message.share_mode = reader.enum($root.caffe.ParamSpec.DimCheckMode); + break; + case "lr_mult": + message.lr_mult = reader.float(); + break; + case "decay_mult": + message.decay_mult = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ParamSpec.prototype.name = ""; +$root.caffe.ParamSpec.prototype.share_mode = 0; +$root.caffe.ParamSpec.prototype.lr_mult = 1; +$root.caffe.ParamSpec.prototype.decay_mult = 1; + +$root.caffe.ParamSpec.DimCheckMode = { + "STRICT": 0, + "PERMISSIVE": 1 +}; + +$root.caffe.LayerParameter = class LayerParameter { + + constructor() { + this.bottom = []; + this.top = []; + this.loss_weight = []; + this.param = []; + this.blobs = []; + this.propagate_down = []; + this.include = []; + this.exclude = []; + } + + static decode(reader, length) { + const message = new $root.caffe.LayerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.bottom.push(reader.string()); + break; + case 4: + message.top.push(reader.string()); + break; + case 10: + message.phase = reader.int32(); + break; + case 5: + message.loss_weight = reader.floats(message.loss_weight, tag); + break; + case 6: + message.param.push($root.caffe.ParamSpec.decode(reader, reader.uint32())); + break; + case 7: + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 11: + message.propagate_down = reader.array(message.propagate_down, () => reader.bool(), tag); + break; + case 8: + message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 9: + message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 100: + message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32()); + break; + case 101: + message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32()); + break; + case 102: + message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32()); + break; + case 103: + message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32()); + break; + case 139: + message.batch_norm_param = $root.caffe.BatchNormParameter.decode(reader, reader.uint32()); + break; + case 141: + message.bias_param = $root.caffe.BiasParameter.decode(reader, reader.uint32()); + break; + case 148: + message.clip_param = $root.caffe.ClipParameter.decode(reader, reader.uint32()); + break; + case 104: + message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32()); + break; + case 105: + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32()); + break; + case 106: + message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32()); + break; + case 144: + message.crop_param = $root.caffe.CropParameter.decode(reader, reader.uint32()); + break; + case 107: + message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32()); + break; + case 108: + message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32()); + break; + case 109: + message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32()); + break; + case 110: + message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32()); + break; + case 140: + message.elu_param = $root.caffe.ELUParameter.decode(reader, reader.uint32()); + break; + case 137: + message.embed_param = $root.caffe.EmbedParameter.decode(reader, reader.uint32()); + break; + case 111: + message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32()); + break; + case 135: + message.flatten_param = $root.caffe.FlattenParameter.decode(reader, reader.uint32()); + break; + case 112: + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32()); + break; + case 113: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + case 114: + message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32()); + break; + case 115: + message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32()); + break; + case 116: + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32()); + break; + case 117: + message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32()); + break; + case 143: + message.input_param = $root.caffe.InputParameter.decode(reader, reader.uint32()); + break; + case 134: + message.log_param = $root.caffe.LogParameter.decode(reader, reader.uint32()); + break; + case 118: + message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32()); + break; + case 119: + message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32()); + break; + case 120: + message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32()); + break; + case 145: + message.parameter_param = $root.caffe.ParameterParameter.decode(reader, reader.uint32()); + break; + case 121: + message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32()); + break; + case 122: + message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32()); + break; + case 131: + message.prelu_param = $root.caffe.PReLUParameter.decode(reader, reader.uint32()); + break; + case 130: + message.python_param = $root.caffe.PythonParameter.decode(reader, reader.uint32()); + break; + case 146: + message.recurrent_param = $root.caffe.RecurrentParameter.decode(reader, reader.uint32()); + break; + case 136: + message.reduction_param = $root.caffe.ReductionParameter.decode(reader, reader.uint32()); + break; + case 123: + message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32()); + break; + case 133: + message.reshape_param = $root.caffe.ReshapeParameter.decode(reader, reader.uint32()); + break; + case 142: + message.scale_param = $root.caffe.ScaleParameter.decode(reader, reader.uint32()); + break; + case 124: + message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32()); + break; + case 125: + message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32()); + break; + case 132: + message.spp_param = $root.caffe.SPPParameter.decode(reader, reader.uint32()); + break; + case 126: + message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32()); + break; + case 147: + message.swish_param = $root.caffe.SwishParameter.decode(reader, reader.uint32()); + break; + case 127: + message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32()); + break; + case 128: + message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32()); + break; + case 138: + message.tile_param = $root.caffe.TileParameter.decode(reader, reader.uint32()); + break; + case 129: + message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.LayerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "bottom": + reader.array(message.bottom, () => reader.string()); + break; + case "top": + reader.array(message.top, () => reader.string()); + break; + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "loss_weight": + reader.array(message.loss_weight, () => reader.float()); + break; + case "param": + message.param.push($root.caffe.ParamSpec.decodeText(reader)); + break; + case "blobs": + message.blobs.push($root.caffe.BlobProto.decodeText(reader)); + break; + case "propagate_down": + reader.array(message.propagate_down, () => reader.bool()); + break; + case "include": + message.include.push($root.caffe.NetStateRule.decodeText(reader)); + break; + case "exclude": + message.exclude.push($root.caffe.NetStateRule.decodeText(reader)); + break; + case "transform_param": + message.transform_param = $root.caffe.TransformationParameter.decodeText(reader); + break; + case "loss_param": + message.loss_param = $root.caffe.LossParameter.decodeText(reader); + break; + case "accuracy_param": + message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader); + break; + case "argmax_param": + message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader); + break; + case "batch_norm_param": + message.batch_norm_param = $root.caffe.BatchNormParameter.decodeText(reader); + break; + case "bias_param": + message.bias_param = $root.caffe.BiasParameter.decodeText(reader); + break; + case "clip_param": + message.clip_param = $root.caffe.ClipParameter.decodeText(reader); + break; + case "concat_param": + message.concat_param = $root.caffe.ConcatParameter.decodeText(reader); + break; + case "contrastive_loss_param": + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader); + break; + case "convolution_param": + message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader); + break; + case "crop_param": + message.crop_param = $root.caffe.CropParameter.decodeText(reader); + break; + case "data_param": + message.data_param = $root.caffe.DataParameter.decodeText(reader); + break; + case "dropout_param": + message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader); + break; + case "dummy_data_param": + message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader); + break; + case "eltwise_param": + message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader); + break; + case "elu_param": + message.elu_param = $root.caffe.ELUParameter.decodeText(reader); + break; + case "embed_param": + message.embed_param = $root.caffe.EmbedParameter.decodeText(reader); + break; + case "exp_param": + message.exp_param = $root.caffe.ExpParameter.decodeText(reader); + break; + case "flatten_param": + message.flatten_param = $root.caffe.FlattenParameter.decodeText(reader); + break; + case "hdf5_data_param": + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader); + break; + case "hinge_loss_param": + message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader); + break; + case "image_data_param": + message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader); + break; + case "infogain_loss_param": + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader); + break; + case "inner_product_param": + message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader); + break; + case "input_param": + message.input_param = $root.caffe.InputParameter.decodeText(reader); + break; + case "log_param": + message.log_param = $root.caffe.LogParameter.decodeText(reader); + break; + case "lrn_param": + message.lrn_param = $root.caffe.LRNParameter.decodeText(reader); + break; + case "memory_data_param": + message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader); + break; + case "mvn_param": + message.mvn_param = $root.caffe.MVNParameter.decodeText(reader); + break; + case "parameter_param": + message.parameter_param = $root.caffe.ParameterParameter.decodeText(reader); + break; + case "pooling_param": + message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader); + break; + case "power_param": + message.power_param = $root.caffe.PowerParameter.decodeText(reader); + break; + case "prelu_param": + message.prelu_param = $root.caffe.PReLUParameter.decodeText(reader); + break; + case "python_param": + message.python_param = $root.caffe.PythonParameter.decodeText(reader); + break; + case "recurrent_param": + message.recurrent_param = $root.caffe.RecurrentParameter.decodeText(reader); + break; + case "reduction_param": + message.reduction_param = $root.caffe.ReductionParameter.decodeText(reader); + break; + case "relu_param": + message.relu_param = $root.caffe.ReLUParameter.decodeText(reader); + break; + case "reshape_param": + message.reshape_param = $root.caffe.ReshapeParameter.decodeText(reader); + break; + case "scale_param": + message.scale_param = $root.caffe.ScaleParameter.decodeText(reader); + break; + case "sigmoid_param": + message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader); + break; + case "softmax_param": + message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader); + break; + case "spp_param": + message.spp_param = $root.caffe.SPPParameter.decodeText(reader); + break; + case "slice_param": + message.slice_param = $root.caffe.SliceParameter.decodeText(reader); + break; + case "swish_param": + message.swish_param = $root.caffe.SwishParameter.decodeText(reader); + break; + case "tanh_param": + message.tanh_param = $root.caffe.TanHParameter.decodeText(reader); + break; + case "threshold_param": + message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader); + break; + case "tile_param": + message.tile_param = $root.caffe.TileParameter.decodeText(reader); + break; + case "window_data_param": + message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.LayerParameter.prototype.name = ""; +$root.caffe.LayerParameter.prototype.type = ""; +$root.caffe.LayerParameter.prototype.phase = 0; +$root.caffe.LayerParameter.prototype.transform_param = null; +$root.caffe.LayerParameter.prototype.loss_param = null; +$root.caffe.LayerParameter.prototype.accuracy_param = null; +$root.caffe.LayerParameter.prototype.argmax_param = null; +$root.caffe.LayerParameter.prototype.batch_norm_param = null; +$root.caffe.LayerParameter.prototype.bias_param = null; +$root.caffe.LayerParameter.prototype.clip_param = null; +$root.caffe.LayerParameter.prototype.concat_param = null; +$root.caffe.LayerParameter.prototype.contrastive_loss_param = null; +$root.caffe.LayerParameter.prototype.convolution_param = null; +$root.caffe.LayerParameter.prototype.crop_param = null; +$root.caffe.LayerParameter.prototype.data_param = null; +$root.caffe.LayerParameter.prototype.dropout_param = null; +$root.caffe.LayerParameter.prototype.dummy_data_param = null; +$root.caffe.LayerParameter.prototype.eltwise_param = null; +$root.caffe.LayerParameter.prototype.elu_param = null; +$root.caffe.LayerParameter.prototype.embed_param = null; +$root.caffe.LayerParameter.prototype.exp_param = null; +$root.caffe.LayerParameter.prototype.flatten_param = null; +$root.caffe.LayerParameter.prototype.hdf5_data_param = null; +$root.caffe.LayerParameter.prototype.hdf5_output_param = null; +$root.caffe.LayerParameter.prototype.hinge_loss_param = null; +$root.caffe.LayerParameter.prototype.image_data_param = null; +$root.caffe.LayerParameter.prototype.infogain_loss_param = null; +$root.caffe.LayerParameter.prototype.inner_product_param = null; +$root.caffe.LayerParameter.prototype.input_param = null; +$root.caffe.LayerParameter.prototype.log_param = null; +$root.caffe.LayerParameter.prototype.lrn_param = null; +$root.caffe.LayerParameter.prototype.memory_data_param = null; +$root.caffe.LayerParameter.prototype.mvn_param = null; +$root.caffe.LayerParameter.prototype.parameter_param = null; +$root.caffe.LayerParameter.prototype.pooling_param = null; +$root.caffe.LayerParameter.prototype.power_param = null; +$root.caffe.LayerParameter.prototype.prelu_param = null; +$root.caffe.LayerParameter.prototype.python_param = null; +$root.caffe.LayerParameter.prototype.recurrent_param = null; +$root.caffe.LayerParameter.prototype.reduction_param = null; +$root.caffe.LayerParameter.prototype.relu_param = null; +$root.caffe.LayerParameter.prototype.reshape_param = null; +$root.caffe.LayerParameter.prototype.scale_param = null; +$root.caffe.LayerParameter.prototype.sigmoid_param = null; +$root.caffe.LayerParameter.prototype.softmax_param = null; +$root.caffe.LayerParameter.prototype.spp_param = null; +$root.caffe.LayerParameter.prototype.slice_param = null; +$root.caffe.LayerParameter.prototype.swish_param = null; +$root.caffe.LayerParameter.prototype.tanh_param = null; +$root.caffe.LayerParameter.prototype.threshold_param = null; +$root.caffe.LayerParameter.prototype.tile_param = null; +$root.caffe.LayerParameter.prototype.window_data_param = null; + +$root.caffe.TransformationParameter = class TransformationParameter { + + constructor() { + this.mean_value = []; + } + + static decode(reader, length) { + const message = new $root.caffe.TransformationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scale = reader.float(); + break; + case 2: + message.mirror = reader.bool(); + break; + case 3: + message.crop_size = reader.uint32(); + break; + case 4: + message.mean_file = reader.string(); + break; + case 5: + message.mean_value = reader.floats(message.mean_value, tag); + break; + case 6: + message.force_color = reader.bool(); + break; + case 7: + message.force_gray = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.TransformationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "scale": + message.scale = reader.float(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "mean_value": + reader.array(message.mean_value, () => reader.float()); + break; + case "force_color": + message.force_color = reader.bool(); + break; + case "force_gray": + message.force_gray = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.TransformationParameter.prototype.scale = 1; +$root.caffe.TransformationParameter.prototype.mirror = false; +$root.caffe.TransformationParameter.prototype.crop_size = 0; +$root.caffe.TransformationParameter.prototype.mean_file = ""; +$root.caffe.TransformationParameter.prototype.force_color = false; +$root.caffe.TransformationParameter.prototype.force_gray = false; + +$root.caffe.LossParameter = class LossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.LossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ignore_label = reader.int32(); + break; + case 3: + message.normalization = reader.int32(); + break; + case 2: + message.normalize = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.LossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "ignore_label": + message.ignore_label = reader.int32(); + break; + case "normalization": + message.normalization = reader.enum($root.caffe.LossParameter.NormalizationMode); + break; + case "normalize": + message.normalize = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.LossParameter.prototype.ignore_label = 0; +$root.caffe.LossParameter.prototype.normalization = 1; +$root.caffe.LossParameter.prototype.normalize = false; + +$root.caffe.LossParameter.NormalizationMode = { + "FULL": 0, + "VALID": 1, + "BATCH_SIZE": 2, + "NONE": 3 +}; + +$root.caffe.AccuracyParameter = class AccuracyParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.AccuracyParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.top_k = reader.uint32(); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.ignore_label = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.AccuracyParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "top_k": + message.top_k = reader.uint32(); + break; + case "axis": + message.axis = reader.int32(); + break; + case "ignore_label": + message.ignore_label = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.AccuracyParameter.prototype.top_k = 1; +$root.caffe.AccuracyParameter.prototype.axis = 1; +$root.caffe.AccuracyParameter.prototype.ignore_label = 0; + +$root.caffe.ArgMaxParameter = class ArgMaxParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ArgMaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.out_max_val = reader.bool(); + break; + case 2: + message.top_k = reader.uint32(); + break; + case 3: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ArgMaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "out_max_val": + message.out_max_val = reader.bool(); + break; + case "top_k": + message.top_k = reader.uint32(); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ArgMaxParameter.prototype.out_max_val = false; +$root.caffe.ArgMaxParameter.prototype.top_k = 1; +$root.caffe.ArgMaxParameter.prototype.axis = 0; + +$root.caffe.ClipParameter = class ClipParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ClipParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.min = reader.float(); + break; + case 2: + message.max = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ClipParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "min": + message.min = reader.float(); + break; + case "max": + message.max = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ClipParameter.prototype.min = 0; +$root.caffe.ClipParameter.prototype.max = 0; + +$root.caffe.ConcatParameter = class ConcatParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ConcatParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.axis = reader.int32(); + break; + case 1: + message.concat_dim = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ConcatParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "concat_dim": + message.concat_dim = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ConcatParameter.prototype.axis = 1; +$root.caffe.ConcatParameter.prototype.concat_dim = 1; + +$root.caffe.BatchNormParameter = class BatchNormParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.BatchNormParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.use_global_stats = reader.bool(); + break; + case 2: + message.moving_average_fraction = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.BatchNormParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "use_global_stats": + message.use_global_stats = reader.bool(); + break; + case "moving_average_fraction": + message.moving_average_fraction = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.BatchNormParameter.prototype.use_global_stats = false; +$root.caffe.BatchNormParameter.prototype.moving_average_fraction = 0.999; +$root.caffe.BatchNormParameter.prototype.eps = 0.00001; + +$root.caffe.BiasParameter = class BiasParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.BiasParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.num_axes = reader.int32(); + break; + case 3: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.BiasParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.BiasParameter.prototype.axis = 1; +$root.caffe.BiasParameter.prototype.num_axes = 1; +$root.caffe.BiasParameter.prototype.filler = null; + +$root.caffe.ContrastiveLossParameter = class ContrastiveLossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ContrastiveLossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.margin = reader.float(); + break; + case 2: + message.legacy_version = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ContrastiveLossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "margin": + message.margin = reader.float(); + break; + case "legacy_version": + message.legacy_version = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ContrastiveLossParameter.prototype.margin = 1; +$root.caffe.ContrastiveLossParameter.prototype.legacy_version = false; + +$root.caffe.ConvolutionParameter = class ConvolutionParameter { + + constructor() { + this.pad = []; + this.kernel_size = []; + this.stride = []; + this.dilation = []; + } + + static decode(reader, length) { + const message = new $root.caffe.ConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.bias_term = reader.bool(); + break; + case 3: + message.pad = reader.array(message.pad, () => reader.uint32(), tag); + break; + case 4: + message.kernel_size = reader.array(message.kernel_size, () => reader.uint32(), tag); + break; + case 6: + message.stride = reader.array(message.stride, () => reader.uint32(), tag); + break; + case 18: + message.dilation = reader.array(message.dilation, () => reader.uint32(), tag); + break; + case 9: + message.pad_h = reader.uint32(); + break; + case 10: + message.pad_w = reader.uint32(); + break; + case 11: + message.kernel_h = reader.uint32(); + break; + case 12: + message.kernel_w = reader.uint32(); + break; + case 13: + message.stride_h = reader.uint32(); + break; + case 14: + message.stride_w = reader.uint32(); + break; + case 5: + message.group = reader.uint32(); + break; + case 7: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 8: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 15: + message.engine = reader.int32(); + break; + case 16: + message.axis = reader.int32(); + break; + case 17: + message.force_nd_im2col = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "pad": + reader.array(message.pad, () => reader.uint32()); + break; + case "kernel_size": + reader.array(message.kernel_size, () => reader.uint32()); + break; + case "stride": + reader.array(message.stride, () => reader.uint32()); + break; + case "dilation": + reader.array(message.dilation, () => reader.uint32()); + break; + case "pad_h": + message.pad_h = reader.uint32(); + break; + case "pad_w": + message.pad_w = reader.uint32(); + break; + case "kernel_h": + message.kernel_h = reader.uint32(); + break; + case "kernel_w": + message.kernel_w = reader.uint32(); + break; + case "stride_h": + message.stride_h = reader.uint32(); + break; + case "stride_w": + message.stride_w = reader.uint32(); + break; + case "group": + message.group = reader.uint32(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "engine": + message.engine = reader.enum($root.caffe.ConvolutionParameter.Engine); + break; + case "axis": + message.axis = reader.int32(); + break; + case "force_nd_im2col": + message.force_nd_im2col = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ConvolutionParameter.prototype.num_output = 0; +$root.caffe.ConvolutionParameter.prototype.bias_term = true; +$root.caffe.ConvolutionParameter.prototype.pad_h = 0; +$root.caffe.ConvolutionParameter.prototype.pad_w = 0; +$root.caffe.ConvolutionParameter.prototype.kernel_h = 0; +$root.caffe.ConvolutionParameter.prototype.kernel_w = 0; +$root.caffe.ConvolutionParameter.prototype.stride_h = 0; +$root.caffe.ConvolutionParameter.prototype.stride_w = 0; +$root.caffe.ConvolutionParameter.prototype.group = 1; +$root.caffe.ConvolutionParameter.prototype.weight_filler = null; +$root.caffe.ConvolutionParameter.prototype.bias_filler = null; +$root.caffe.ConvolutionParameter.prototype.engine = 0; +$root.caffe.ConvolutionParameter.prototype.axis = 1; +$root.caffe.ConvolutionParameter.prototype.force_nd_im2col = false; + +$root.caffe.ConvolutionParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.CropParameter = class CropParameter { + + constructor() { + this.offset = []; + } + + static decode(reader, length) { + const message = new $root.caffe.CropParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.offset = reader.array(message.offset, () => reader.uint32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.CropParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "offset": + reader.array(message.offset, () => reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.CropParameter.prototype.axis = 2; + +$root.caffe.DataParameter = class DataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.DataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 7: + message.rand_skip = reader.uint32(); + break; + case 8: + message.backend = reader.int32(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 9: + message.force_encoded_color = reader.bool(); + break; + case 10: + message.prefetch = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.DataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "backend": + message.backend = reader.enum($root.caffe.DataParameter.DB); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "force_encoded_color": + message.force_encoded_color = reader.bool(); + break; + case "prefetch": + message.prefetch = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.DataParameter.prototype.source = ""; +$root.caffe.DataParameter.prototype.batch_size = 0; +$root.caffe.DataParameter.prototype.rand_skip = 0; +$root.caffe.DataParameter.prototype.backend = 0; +$root.caffe.DataParameter.prototype.scale = 1; +$root.caffe.DataParameter.prototype.mean_file = ""; +$root.caffe.DataParameter.prototype.crop_size = 0; +$root.caffe.DataParameter.prototype.mirror = false; +$root.caffe.DataParameter.prototype.force_encoded_color = false; +$root.caffe.DataParameter.prototype.prefetch = 4; + +$root.caffe.DataParameter.DB = { + "LEVELDB": 0, + "LMDB": 1 +}; + +$root.caffe.DropoutParameter = class DropoutParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.DropoutParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dropout_ratio = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.DropoutParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dropout_ratio": + message.dropout_ratio = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.DropoutParameter.prototype.dropout_ratio = 0.5; + +$root.caffe.DummyDataParameter = class DummyDataParameter { + + constructor() { + this.data_filler = []; + this.shape = []; + this.num = []; + this.channels = []; + this.height = []; + this.width = []; + } + + static decode(reader, length) { + const message = new $root.caffe.DummyDataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data_filler.push($root.caffe.FillerParameter.decode(reader, reader.uint32())); + break; + case 6: + message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + case 2: + message.num = reader.array(message.num, () => reader.uint32(), tag); + break; + case 3: + message.channels = reader.array(message.channels, () => reader.uint32(), tag); + break; + case 4: + message.height = reader.array(message.height, () => reader.uint32(), tag); + break; + case 5: + message.width = reader.array(message.width, () => reader.uint32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.DummyDataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "data_filler": + message.data_filler.push($root.caffe.FillerParameter.decodeText(reader)); + break; + case "shape": + message.shape.push($root.caffe.BlobShape.decodeText(reader)); + break; + case "num": + reader.array(message.num, () => reader.uint32()); + break; + case "channels": + reader.array(message.channels, () => reader.uint32()); + break; + case "height": + reader.array(message.height, () => reader.uint32()); + break; + case "width": + reader.array(message.width, () => reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.EltwiseParameter = class EltwiseParameter { + + constructor() { + this.coeff = []; + } + + static decode(reader, length) { + const message = new $root.caffe.EltwiseParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.int32(); + break; + case 2: + message.coeff = reader.floats(message.coeff, tag); + break; + case 3: + message.stable_prod_grad = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.EltwiseParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.enum($root.caffe.EltwiseParameter.EltwiseOp); + break; + case "coeff": + reader.array(message.coeff, () => reader.float()); + break; + case "stable_prod_grad": + message.stable_prod_grad = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.EltwiseParameter.prototype.operation = 1; +$root.caffe.EltwiseParameter.prototype.stable_prod_grad = true; + +$root.caffe.EltwiseParameter.EltwiseOp = { + "PROD": 0, + "SUM": 1, + "MAX": 2 +}; + +$root.caffe.ELUParameter = class ELUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ELUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ELUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ELUParameter.prototype.alpha = 1; + +$root.caffe.EmbedParameter = class EmbedParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.EmbedParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.input_dim = reader.uint32(); + break; + case 3: + message.bias_term = reader.bool(); + break; + case 4: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 5: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.EmbedParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "input_dim": + message.input_dim = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.EmbedParameter.prototype.num_output = 0; +$root.caffe.EmbedParameter.prototype.input_dim = 0; +$root.caffe.EmbedParameter.prototype.bias_term = true; +$root.caffe.EmbedParameter.prototype.weight_filler = null; +$root.caffe.EmbedParameter.prototype.bias_filler = null; + +$root.caffe.ExpParameter = class ExpParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ExpParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ExpParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base": + message.base = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ExpParameter.prototype.base = -1; +$root.caffe.ExpParameter.prototype.scale = 1; +$root.caffe.ExpParameter.prototype.shift = 0; + +$root.caffe.FlattenParameter = class FlattenParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.FlattenParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.end_axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.FlattenParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "end_axis": + message.end_axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.FlattenParameter.prototype.axis = 1; +$root.caffe.FlattenParameter.prototype.end_axis = -1; + +$root.caffe.HDF5DataParameter = class HDF5DataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.HDF5DataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.batch_size = reader.uint32(); + break; + case 3: + message.shuffle = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.HDF5DataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "shuffle": + message.shuffle = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.HDF5DataParameter.prototype.source = ""; +$root.caffe.HDF5DataParameter.prototype.batch_size = 0; +$root.caffe.HDF5DataParameter.prototype.shuffle = false; + +$root.caffe.HDF5OutputParameter = class HDF5OutputParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.HDF5OutputParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.file_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.HDF5OutputParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "file_name": + message.file_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.HDF5OutputParameter.prototype.file_name = ""; + +$root.caffe.HingeLossParameter = class HingeLossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.HingeLossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.norm = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.HingeLossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "norm": + message.norm = reader.enum($root.caffe.HingeLossParameter.Norm); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.HingeLossParameter.prototype.norm = 1; + +$root.caffe.HingeLossParameter.Norm = { + "L1": 1, + "L2": 2 +}; + +$root.caffe.ImageDataParameter = class ImageDataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ImageDataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 7: + message.rand_skip = reader.uint32(); + break; + case 8: + message.shuffle = reader.bool(); + break; + case 9: + message.new_height = reader.uint32(); + break; + case 10: + message.new_width = reader.uint32(); + break; + case 11: + message.is_color = reader.bool(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 12: + message.root_folder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ImageDataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "shuffle": + message.shuffle = reader.bool(); + break; + case "new_height": + message.new_height = reader.uint32(); + break; + case "new_width": + message.new_width = reader.uint32(); + break; + case "is_color": + message.is_color = reader.bool(); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "root_folder": + message.root_folder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ImageDataParameter.prototype.source = ""; +$root.caffe.ImageDataParameter.prototype.batch_size = 1; +$root.caffe.ImageDataParameter.prototype.rand_skip = 0; +$root.caffe.ImageDataParameter.prototype.shuffle = false; +$root.caffe.ImageDataParameter.prototype.new_height = 0; +$root.caffe.ImageDataParameter.prototype.new_width = 0; +$root.caffe.ImageDataParameter.prototype.is_color = true; +$root.caffe.ImageDataParameter.prototype.scale = 1; +$root.caffe.ImageDataParameter.prototype.mean_file = ""; +$root.caffe.ImageDataParameter.prototype.crop_size = 0; +$root.caffe.ImageDataParameter.prototype.mirror = false; +$root.caffe.ImageDataParameter.prototype.root_folder = ""; + +$root.caffe.InfogainLossParameter = class InfogainLossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.InfogainLossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.InfogainLossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.InfogainLossParameter.prototype.source = ""; +$root.caffe.InfogainLossParameter.prototype.axis = 1; + +$root.caffe.InnerProductParameter = class InnerProductParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.InnerProductParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.bias_term = reader.bool(); + break; + case 3: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 5: + message.axis = reader.int32(); + break; + case 6: + message.transpose = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.InnerProductParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "axis": + message.axis = reader.int32(); + break; + case "transpose": + message.transpose = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.InnerProductParameter.prototype.num_output = 0; +$root.caffe.InnerProductParameter.prototype.bias_term = true; +$root.caffe.InnerProductParameter.prototype.weight_filler = null; +$root.caffe.InnerProductParameter.prototype.bias_filler = null; +$root.caffe.InnerProductParameter.prototype.axis = 1; +$root.caffe.InnerProductParameter.prototype.transpose = false; + +$root.caffe.InputParameter = class InputParameter { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.caffe.InputParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.InputParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape.push($root.caffe.BlobShape.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.LogParameter = class LogParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.LogParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.LogParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base": + message.base = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.LogParameter.prototype.base = -1; +$root.caffe.LogParameter.prototype.scale = 1; +$root.caffe.LogParameter.prototype.shift = 0; + +$root.caffe.LRNParameter = class LRNParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.LRNParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.local_size = reader.uint32(); + break; + case 2: + message.alpha = reader.float(); + break; + case 3: + message.beta = reader.float(); + break; + case 4: + message.norm_region = reader.int32(); + break; + case 5: + message.k = reader.float(); + break; + case 6: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.LRNParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "local_size": + message.local_size = reader.uint32(); + break; + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "norm_region": + message.norm_region = reader.enum($root.caffe.LRNParameter.NormRegion); + break; + case "k": + message.k = reader.float(); + break; + case "engine": + message.engine = reader.enum($root.caffe.LRNParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.LRNParameter.prototype.local_size = 5; +$root.caffe.LRNParameter.prototype.alpha = 1; +$root.caffe.LRNParameter.prototype.beta = 0.75; +$root.caffe.LRNParameter.prototype.norm_region = 0; +$root.caffe.LRNParameter.prototype.k = 1; +$root.caffe.LRNParameter.prototype.engine = 0; + +$root.caffe.LRNParameter.NormRegion = { + "ACROSS_CHANNELS": 0, + "WITHIN_CHANNEL": 1 +}; + +$root.caffe.LRNParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.MemoryDataParameter = class MemoryDataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.MemoryDataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batch_size = reader.uint32(); + break; + case 2: + message.channels = reader.uint32(); + break; + case 3: + message.height = reader.uint32(); + break; + case 4: + message.width = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.MemoryDataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "channels": + message.channels = reader.uint32(); + break; + case "height": + message.height = reader.uint32(); + break; + case "width": + message.width = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.MemoryDataParameter.prototype.batch_size = 0; +$root.caffe.MemoryDataParameter.prototype.channels = 0; +$root.caffe.MemoryDataParameter.prototype.height = 0; +$root.caffe.MemoryDataParameter.prototype.width = 0; + +$root.caffe.MVNParameter = class MVNParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.MVNParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.normalize_variance = reader.bool(); + break; + case 2: + message.across_channels = reader.bool(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.MVNParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "normalize_variance": + message.normalize_variance = reader.bool(); + break; + case "across_channels": + message.across_channels = reader.bool(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.MVNParameter.prototype.normalize_variance = true; +$root.caffe.MVNParameter.prototype.across_channels = false; +$root.caffe.MVNParameter.prototype.eps = 1e-9; + +$root.caffe.ParameterParameter = class ParameterParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ParameterParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ParameterParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ParameterParameter.prototype.shape = null; + +$root.caffe.PoolingParameter = class PoolingParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.PoolingParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pool = reader.int32(); + break; + case 4: + message.pad = reader.uint32(); + break; + case 9: + message.pad_h = reader.uint32(); + break; + case 10: + message.pad_w = reader.uint32(); + break; + case 2: + message.kernel_size = reader.uint32(); + break; + case 5: + message.kernel_h = reader.uint32(); + break; + case 6: + message.kernel_w = reader.uint32(); + break; + case 3: + message.stride = reader.uint32(); + break; + case 7: + message.stride_h = reader.uint32(); + break; + case 8: + message.stride_w = reader.uint32(); + break; + case 11: + message.engine = reader.int32(); + break; + case 12: + message.global_pooling = reader.bool(); + break; + case 13: + message.round_mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.PoolingParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pool": + message.pool = reader.enum($root.caffe.PoolingParameter.PoolMethod); + break; + case "pad": + message.pad = reader.uint32(); + break; + case "pad_h": + message.pad_h = reader.uint32(); + break; + case "pad_w": + message.pad_w = reader.uint32(); + break; + case "kernel_size": + message.kernel_size = reader.uint32(); + break; + case "kernel_h": + message.kernel_h = reader.uint32(); + break; + case "kernel_w": + message.kernel_w = reader.uint32(); + break; + case "stride": + message.stride = reader.uint32(); + break; + case "stride_h": + message.stride_h = reader.uint32(); + break; + case "stride_w": + message.stride_w = reader.uint32(); + break; + case "engine": + message.engine = reader.enum($root.caffe.PoolingParameter.Engine); + break; + case "global_pooling": + message.global_pooling = reader.bool(); + break; + case "round_mode": + message.round_mode = reader.enum($root.caffe.PoolingParameter.RoundMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.PoolingParameter.prototype.pool = 0; +$root.caffe.PoolingParameter.prototype.pad = 0; +$root.caffe.PoolingParameter.prototype.pad_h = 0; +$root.caffe.PoolingParameter.prototype.pad_w = 0; +$root.caffe.PoolingParameter.prototype.kernel_size = 0; +$root.caffe.PoolingParameter.prototype.kernel_h = 0; +$root.caffe.PoolingParameter.prototype.kernel_w = 0; +$root.caffe.PoolingParameter.prototype.stride = 1; +$root.caffe.PoolingParameter.prototype.stride_h = 0; +$root.caffe.PoolingParameter.prototype.stride_w = 0; +$root.caffe.PoolingParameter.prototype.engine = 0; +$root.caffe.PoolingParameter.prototype.global_pooling = false; +$root.caffe.PoolingParameter.prototype.round_mode = 0; + +$root.caffe.PoolingParameter.PoolMethod = { + "MAX": 0, + "AVE": 1, + "STOCHASTIC": 2 +}; + +$root.caffe.PoolingParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.PoolingParameter.RoundMode = { + "CEIL": 0, + "FLOOR": 1 +}; + +$root.caffe.PowerParameter = class PowerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.PowerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.power = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.PowerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "power": + message.power = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.PowerParameter.prototype.power = 1; +$root.caffe.PowerParameter.prototype.scale = 1; +$root.caffe.PowerParameter.prototype.shift = 0; + +$root.caffe.PythonParameter = class PythonParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.PythonParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.module = reader.string(); + break; + case 2: + message.layer = reader.string(); + break; + case 3: + message.param_str = reader.string(); + break; + case 4: + message.share_in_parallel = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.PythonParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "module": + message.module = reader.string(); + break; + case "layer": + message.layer = reader.string(); + break; + case "param_str": + message.param_str = reader.string(); + break; + case "share_in_parallel": + message.share_in_parallel = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.PythonParameter.prototype.module = ""; +$root.caffe.PythonParameter.prototype.layer = ""; +$root.caffe.PythonParameter.prototype.param_str = ""; +$root.caffe.PythonParameter.prototype.share_in_parallel = false; + +$root.caffe.RecurrentParameter = class RecurrentParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.RecurrentParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 3: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.debug_info = reader.bool(); + break; + case 5: + message.expose_hidden = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.RecurrentParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "expose_hidden": + message.expose_hidden = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.RecurrentParameter.prototype.num_output = 0; +$root.caffe.RecurrentParameter.prototype.weight_filler = null; +$root.caffe.RecurrentParameter.prototype.bias_filler = null; +$root.caffe.RecurrentParameter.prototype.debug_info = false; +$root.caffe.RecurrentParameter.prototype.expose_hidden = false; + +$root.caffe.ReductionParameter = class ReductionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ReductionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.int32(); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.coeff = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ReductionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.enum($root.caffe.ReductionParameter.ReductionOp); + break; + case "axis": + message.axis = reader.int32(); + break; + case "coeff": + message.coeff = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ReductionParameter.prototype.operation = 1; +$root.caffe.ReductionParameter.prototype.axis = 0; +$root.caffe.ReductionParameter.prototype.coeff = 1; + +$root.caffe.ReductionParameter.ReductionOp = { + "SUM": 1, + "ASUM": 2, + "SUMSQ": 3, + "MEAN": 4 +}; + +$root.caffe.ReLUParameter = class ReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.negative_slope = reader.float(); + break; + case 2: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "negative_slope": + message.negative_slope = reader.float(); + break; + case "engine": + message.engine = reader.enum($root.caffe.ReLUParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ReLUParameter.prototype.negative_slope = 0; +$root.caffe.ReLUParameter.prototype.engine = 0; + +$root.caffe.ReLUParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.ReshapeParameter = class ReshapeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ReshapeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.num_axes = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ReshapeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader); + break; + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ReshapeParameter.prototype.shape = null; +$root.caffe.ReshapeParameter.prototype.axis = 0; +$root.caffe.ReshapeParameter.prototype.num_axes = -1; + +$root.caffe.ScaleParameter = class ScaleParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ScaleParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.num_axes = reader.int32(); + break; + case 3: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.bias_term = reader.bool(); + break; + case 5: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ScaleParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ScaleParameter.prototype.axis = 1; +$root.caffe.ScaleParameter.prototype.num_axes = 1; +$root.caffe.ScaleParameter.prototype.filler = null; +$root.caffe.ScaleParameter.prototype.bias_term = false; +$root.caffe.ScaleParameter.prototype.bias_filler = null; + +$root.caffe.SigmoidParameter = class SigmoidParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.SigmoidParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SigmoidParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.SigmoidParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SigmoidParameter.prototype.engine = 0; + +$root.caffe.SigmoidParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.SliceParameter = class SliceParameter { + + constructor() { + this.slice_point = []; + } + + static decode(reader, length) { + const message = new $root.caffe.SliceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.axis = reader.int32(); + break; + case 2: + message.slice_point = reader.array(message.slice_point, () => reader.uint32(), tag); + break; + case 1: + message.slice_dim = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SliceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "slice_point": + reader.array(message.slice_point, () => reader.uint32()); + break; + case "slice_dim": + message.slice_dim = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SliceParameter.prototype.axis = 1; +$root.caffe.SliceParameter.prototype.slice_dim = 1; + +$root.caffe.SoftmaxParameter = class SoftmaxParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.SoftmaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + case 2: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SoftmaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.SoftmaxParameter.Engine); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SoftmaxParameter.prototype.engine = 0; +$root.caffe.SoftmaxParameter.prototype.axis = 1; + +$root.caffe.SoftmaxParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.SwishParameter = class SwishParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.SwishParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SwishParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SwishParameter.prototype.beta = 1; + +$root.caffe.TanHParameter = class TanHParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.TanHParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.TanHParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.TanHParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.TanHParameter.prototype.engine = 0; + +$root.caffe.TanHParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.TileParameter = class TileParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.TileParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.tiles = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.TileParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "tiles": + message.tiles = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.TileParameter.prototype.axis = 1; +$root.caffe.TileParameter.prototype.tiles = 0; + +$root.caffe.ThresholdParameter = class ThresholdParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.ThresholdParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.threshold = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.ThresholdParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "threshold": + message.threshold = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.ThresholdParameter.prototype.threshold = 0; + +$root.caffe.WindowDataParameter = class WindowDataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.WindowDataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 7: + message.fg_threshold = reader.float(); + break; + case 8: + message.bg_threshold = reader.float(); + break; + case 9: + message.fg_fraction = reader.float(); + break; + case 10: + message.context_pad = reader.uint32(); + break; + case 11: + message.crop_mode = reader.string(); + break; + case 12: + message.cache_images = reader.bool(); + break; + case 13: + message.root_folder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.WindowDataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "fg_threshold": + message.fg_threshold = reader.float(); + break; + case "bg_threshold": + message.bg_threshold = reader.float(); + break; + case "fg_fraction": + message.fg_fraction = reader.float(); + break; + case "context_pad": + message.context_pad = reader.uint32(); + break; + case "crop_mode": + message.crop_mode = reader.string(); + break; + case "cache_images": + message.cache_images = reader.bool(); + break; + case "root_folder": + message.root_folder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.WindowDataParameter.prototype.source = ""; +$root.caffe.WindowDataParameter.prototype.scale = 1; +$root.caffe.WindowDataParameter.prototype.mean_file = ""; +$root.caffe.WindowDataParameter.prototype.batch_size = 0; +$root.caffe.WindowDataParameter.prototype.crop_size = 0; +$root.caffe.WindowDataParameter.prototype.mirror = false; +$root.caffe.WindowDataParameter.prototype.fg_threshold = 0.5; +$root.caffe.WindowDataParameter.prototype.bg_threshold = 0.5; +$root.caffe.WindowDataParameter.prototype.fg_fraction = 0.25; +$root.caffe.WindowDataParameter.prototype.context_pad = 0; +$root.caffe.WindowDataParameter.prototype.crop_mode = "warp"; +$root.caffe.WindowDataParameter.prototype.cache_images = false; +$root.caffe.WindowDataParameter.prototype.root_folder = ""; + +$root.caffe.SPPParameter = class SPPParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.SPPParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pyramid_height = reader.uint32(); + break; + case 2: + message.pool = reader.int32(); + break; + case 6: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.SPPParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pyramid_height": + message.pyramid_height = reader.uint32(); + break; + case "pool": + message.pool = reader.enum($root.caffe.SPPParameter.PoolMethod); + break; + case "engine": + message.engine = reader.enum($root.caffe.SPPParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.SPPParameter.prototype.pyramid_height = 0; +$root.caffe.SPPParameter.prototype.pool = 0; +$root.caffe.SPPParameter.prototype.engine = 0; + +$root.caffe.SPPParameter.PoolMethod = { + "MAX": 0, + "AVE": 1, + "STOCHASTIC": 2 +}; + +$root.caffe.SPPParameter.Engine = { + "DEFAULT": 0, + "CAFFE": 1, + "CUDNN": 2 +}; + +$root.caffe.V1LayerParameter = class V1LayerParameter { + + constructor() { + this.bottom = []; + this.top = []; + this.include = []; + this.exclude = []; + this.blobs = []; + this.param = []; + this.blob_share_mode = []; + this.blobs_lr = []; + this.weight_decay = []; + this.loss_weight = []; + } + + static decode(reader, length) { + const message = new $root.caffe.V1LayerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.bottom.push(reader.string()); + break; + case 3: + message.top.push(reader.string()); + break; + case 4: + message.name = reader.string(); + break; + case 32: + message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 33: + message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 5: + message.type = reader.int32(); + break; + case 6: + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 1001: + message.param.push(reader.string()); + break; + case 1002: + message.blob_share_mode = reader.array(message.blob_share_mode, () => reader.int32(), tag); + break; + case 7: + message.blobs_lr = reader.floats(message.blobs_lr, tag); + break; + case 8: + message.weight_decay = reader.floats(message.weight_decay, tag); + break; + case 35: + message.loss_weight = reader.floats(message.loss_weight, tag); + break; + case 27: + message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32()); + break; + case 23: + message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32()); + break; + case 9: + message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32()); + break; + case 40: + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32()); + break; + case 10: + message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32()); + break; + case 11: + message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32()); + break; + case 12: + message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32()); + break; + case 26: + message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32()); + break; + case 24: + message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32()); + break; + case 41: + message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32()); + break; + case 13: + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32()); + break; + case 14: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + case 29: + message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32()); + break; + case 15: + message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32()); + break; + case 16: + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32()); + break; + case 17: + message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32()); + break; + case 18: + message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32()); + break; + case 22: + message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32()); + break; + case 34: + message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32()); + break; + case 19: + message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32()); + break; + case 21: + message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32()); + break; + case 30: + message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32()); + break; + case 38: + message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32()); + break; + case 39: + message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32()); + break; + case 31: + message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32()); + break; + case 37: + message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32()); + break; + case 25: + message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32()); + break; + case 20: + message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32()); + break; + case 36: + message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32()); + break; + case 42: + message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32()); + break; + case 1: + message.layer = $root.caffe.V0LayerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.V1LayerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "bottom": + reader.array(message.bottom, () => reader.string()); + break; + case "top": + reader.array(message.top, () => reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "include": + message.include.push($root.caffe.NetStateRule.decodeText(reader)); + break; + case "exclude": + message.exclude.push($root.caffe.NetStateRule.decodeText(reader)); + break; + case "type": + message.type = reader.enum($root.caffe.V1LayerParameter.LayerType); + break; + case "blobs": + message.blobs.push($root.caffe.BlobProto.decodeText(reader)); + break; + case "param": + reader.array(message.param, () => reader.string()); + break; + case "blob_share_mode": + reader.array(message.blob_share_mode, () => reader.enum($root.caffe.V1LayerParameter.DimCheckMode)); + break; + case "blobs_lr": + reader.array(message.blobs_lr, () => reader.float()); + break; + case "weight_decay": + reader.array(message.weight_decay, () => reader.float()); + break; + case "loss_weight": + reader.array(message.loss_weight, () => reader.float()); + break; + case "accuracy_param": + message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader); + break; + case "argmax_param": + message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader); + break; + case "concat_param": + message.concat_param = $root.caffe.ConcatParameter.decodeText(reader); + break; + case "contrastive_loss_param": + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader); + break; + case "convolution_param": + message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader); + break; + case "data_param": + message.data_param = $root.caffe.DataParameter.decodeText(reader); + break; + case "dropout_param": + message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader); + break; + case "dummy_data_param": + message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader); + break; + case "eltwise_param": + message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader); + break; + case "exp_param": + message.exp_param = $root.caffe.ExpParameter.decodeText(reader); + break; + case "hdf5_data_param": + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader); + break; + case "hinge_loss_param": + message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader); + break; + case "image_data_param": + message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader); + break; + case "infogain_loss_param": + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader); + break; + case "inner_product_param": + message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader); + break; + case "lrn_param": + message.lrn_param = $root.caffe.LRNParameter.decodeText(reader); + break; + case "memory_data_param": + message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader); + break; + case "mvn_param": + message.mvn_param = $root.caffe.MVNParameter.decodeText(reader); + break; + case "pooling_param": + message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader); + break; + case "power_param": + message.power_param = $root.caffe.PowerParameter.decodeText(reader); + break; + case "relu_param": + message.relu_param = $root.caffe.ReLUParameter.decodeText(reader); + break; + case "sigmoid_param": + message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader); + break; + case "softmax_param": + message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader); + break; + case "slice_param": + message.slice_param = $root.caffe.SliceParameter.decodeText(reader); + break; + case "tanh_param": + message.tanh_param = $root.caffe.TanHParameter.decodeText(reader); + break; + case "threshold_param": + message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader); + break; + case "window_data_param": + message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader); + break; + case "transform_param": + message.transform_param = $root.caffe.TransformationParameter.decodeText(reader); + break; + case "loss_param": + message.loss_param = $root.caffe.LossParameter.decodeText(reader); + break; + case "layer": + message.layer = $root.caffe.V0LayerParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.V1LayerParameter.prototype.name = ""; +$root.caffe.V1LayerParameter.prototype.type = 0; +$root.caffe.V1LayerParameter.prototype.accuracy_param = null; +$root.caffe.V1LayerParameter.prototype.argmax_param = null; +$root.caffe.V1LayerParameter.prototype.concat_param = null; +$root.caffe.V1LayerParameter.prototype.contrastive_loss_param = null; +$root.caffe.V1LayerParameter.prototype.convolution_param = null; +$root.caffe.V1LayerParameter.prototype.data_param = null; +$root.caffe.V1LayerParameter.prototype.dropout_param = null; +$root.caffe.V1LayerParameter.prototype.dummy_data_param = null; +$root.caffe.V1LayerParameter.prototype.eltwise_param = null; +$root.caffe.V1LayerParameter.prototype.exp_param = null; +$root.caffe.V1LayerParameter.prototype.hdf5_data_param = null; +$root.caffe.V1LayerParameter.prototype.hdf5_output_param = null; +$root.caffe.V1LayerParameter.prototype.hinge_loss_param = null; +$root.caffe.V1LayerParameter.prototype.image_data_param = null; +$root.caffe.V1LayerParameter.prototype.infogain_loss_param = null; +$root.caffe.V1LayerParameter.prototype.inner_product_param = null; +$root.caffe.V1LayerParameter.prototype.lrn_param = null; +$root.caffe.V1LayerParameter.prototype.memory_data_param = null; +$root.caffe.V1LayerParameter.prototype.mvn_param = null; +$root.caffe.V1LayerParameter.prototype.pooling_param = null; +$root.caffe.V1LayerParameter.prototype.power_param = null; +$root.caffe.V1LayerParameter.prototype.relu_param = null; +$root.caffe.V1LayerParameter.prototype.sigmoid_param = null; +$root.caffe.V1LayerParameter.prototype.softmax_param = null; +$root.caffe.V1LayerParameter.prototype.slice_param = null; +$root.caffe.V1LayerParameter.prototype.tanh_param = null; +$root.caffe.V1LayerParameter.prototype.threshold_param = null; +$root.caffe.V1LayerParameter.prototype.window_data_param = null; +$root.caffe.V1LayerParameter.prototype.transform_param = null; +$root.caffe.V1LayerParameter.prototype.loss_param = null; +$root.caffe.V1LayerParameter.prototype.layer = null; + +$root.caffe.V1LayerParameter.LayerType = { + "NONE": 0, + "ABSVAL": 35, + "ACCURACY": 1, + "ARGMAX": 30, + "BNLL": 2, + "CONCAT": 3, + "CONTRASTIVE_LOSS": 37, + "CONVOLUTION": 4, + "DATA": 5, + "DECONVOLUTION": 39, + "DROPOUT": 6, + "DUMMY_DATA": 32, + "EUCLIDEAN_LOSS": 7, + "ELTWISE": 25, + "EXP": 38, + "FLATTEN": 8, + "HDF5_DATA": 9, + "HDF5_OUTPUT": 10, + "HINGE_LOSS": 28, + "IM2COL": 11, + "IMAGE_DATA": 12, + "INFOGAIN_LOSS": 13, + "INNER_PRODUCT": 14, + "LRN": 15, + "MEMORY_DATA": 29, + "MULTINOMIAL_LOGISTIC_LOSS": 16, + "MVN": 34, + "POOLING": 17, + "POWER": 26, + "RELU": 18, + "SIGMOID": 19, + "SIGMOID_CROSS_ENTROPY_LOSS": 27, + "SILENCE": 36, + "SOFTMAX": 20, + "SOFTMAX_LOSS": 21, + "SPLIT": 22, + "SLICE": 33, + "TANH": 23, + "WINDOW_DATA": 24, + "THRESHOLD": 31 +}; + +$root.caffe.V1LayerParameter.DimCheckMode = { + "STRICT": 0, + "PERMISSIVE": 1 +}; + +$root.caffe.V0LayerParameter = class V0LayerParameter { + + constructor() { + this.blobs = []; + this.blobs_lr = []; + this.weight_decay = []; + } + + static decode(reader, length) { + const message = new $root.caffe.V0LayerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.num_output = reader.uint32(); + break; + case 4: + message.biasterm = reader.bool(); + break; + case 5: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 6: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 7: + message.pad = reader.uint32(); + break; + case 8: + message.kernelsize = reader.uint32(); + break; + case 9: + message.group = reader.uint32(); + break; + case 10: + message.stride = reader.uint32(); + break; + case 11: + message.pool = reader.int32(); + break; + case 12: + message.dropout_ratio = reader.float(); + break; + case 13: + message.local_size = reader.uint32(); + break; + case 14: + message.alpha = reader.float(); + break; + case 15: + message.beta = reader.float(); + break; + case 22: + message.k = reader.float(); + break; + case 16: + message.source = reader.string(); + break; + case 17: + message.scale = reader.float(); + break; + case 18: + message.meanfile = reader.string(); + break; + case 19: + message.batchsize = reader.uint32(); + break; + case 20: + message.cropsize = reader.uint32(); + break; + case 21: + message.mirror = reader.bool(); + break; + case 50: + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 51: + message.blobs_lr = reader.floats(message.blobs_lr, tag); + break; + case 52: + message.weight_decay = reader.floats(message.weight_decay, tag); + break; + case 53: + message.rand_skip = reader.uint32(); + break; + case 54: + message.det_fg_threshold = reader.float(); + break; + case 55: + message.det_bg_threshold = reader.float(); + break; + case 56: + message.det_fg_fraction = reader.float(); + break; + case 58: + message.det_context_pad = reader.uint32(); + break; + case 59: + message.det_crop_mode = reader.string(); + break; + case 60: + message.new_num = reader.int32(); + break; + case 61: + message.new_channels = reader.int32(); + break; + case 62: + message.new_height = reader.int32(); + break; + case 63: + message.new_width = reader.int32(); + break; + case 64: + message.shuffle_images = reader.bool(); + break; + case 65: + message.concat_dim = reader.uint32(); + break; + case 1001: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.V0LayerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "num_output": + message.num_output = reader.uint32(); + break; + case "biasterm": + message.biasterm = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "pad": + message.pad = reader.uint32(); + break; + case "kernelsize": + message.kernelsize = reader.uint32(); + break; + case "group": + message.group = reader.uint32(); + break; + case "stride": + message.stride = reader.uint32(); + break; + case "pool": + message.pool = reader.enum($root.caffe.V0LayerParameter.PoolMethod); + break; + case "dropout_ratio": + message.dropout_ratio = reader.float(); + break; + case "local_size": + message.local_size = reader.uint32(); + break; + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "k": + message.k = reader.float(); + break; + case "source": + message.source = reader.string(); + break; + case "scale": + message.scale = reader.float(); + break; + case "meanfile": + message.meanfile = reader.string(); + break; + case "batchsize": + message.batchsize = reader.uint32(); + break; + case "cropsize": + message.cropsize = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "blobs": + message.blobs.push($root.caffe.BlobProto.decodeText(reader)); + break; + case "blobs_lr": + reader.array(message.blobs_lr, () => reader.float()); + break; + case "weight_decay": + reader.array(message.weight_decay, () => reader.float()); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "det_fg_threshold": + message.det_fg_threshold = reader.float(); + break; + case "det_bg_threshold": + message.det_bg_threshold = reader.float(); + break; + case "det_fg_fraction": + message.det_fg_fraction = reader.float(); + break; + case "det_context_pad": + message.det_context_pad = reader.uint32(); + break; + case "det_crop_mode": + message.det_crop_mode = reader.string(); + break; + case "new_num": + message.new_num = reader.int32(); + break; + case "new_channels": + message.new_channels = reader.int32(); + break; + case "new_height": + message.new_height = reader.int32(); + break; + case "new_width": + message.new_width = reader.int32(); + break; + case "shuffle_images": + message.shuffle_images = reader.bool(); + break; + case "concat_dim": + message.concat_dim = reader.uint32(); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.V0LayerParameter.prototype.name = ""; +$root.caffe.V0LayerParameter.prototype.type = ""; +$root.caffe.V0LayerParameter.prototype.num_output = 0; +$root.caffe.V0LayerParameter.prototype.biasterm = true; +$root.caffe.V0LayerParameter.prototype.weight_filler = null; +$root.caffe.V0LayerParameter.prototype.bias_filler = null; +$root.caffe.V0LayerParameter.prototype.pad = 0; +$root.caffe.V0LayerParameter.prototype.kernelsize = 0; +$root.caffe.V0LayerParameter.prototype.group = 1; +$root.caffe.V0LayerParameter.prototype.stride = 1; +$root.caffe.V0LayerParameter.prototype.pool = 0; +$root.caffe.V0LayerParameter.prototype.dropout_ratio = 0.5; +$root.caffe.V0LayerParameter.prototype.local_size = 5; +$root.caffe.V0LayerParameter.prototype.alpha = 1; +$root.caffe.V0LayerParameter.prototype.beta = 0.75; +$root.caffe.V0LayerParameter.prototype.k = 1; +$root.caffe.V0LayerParameter.prototype.source = ""; +$root.caffe.V0LayerParameter.prototype.scale = 1; +$root.caffe.V0LayerParameter.prototype.meanfile = ""; +$root.caffe.V0LayerParameter.prototype.batchsize = 0; +$root.caffe.V0LayerParameter.prototype.cropsize = 0; +$root.caffe.V0LayerParameter.prototype.mirror = false; +$root.caffe.V0LayerParameter.prototype.rand_skip = 0; +$root.caffe.V0LayerParameter.prototype.det_fg_threshold = 0.5; +$root.caffe.V0LayerParameter.prototype.det_bg_threshold = 0.5; +$root.caffe.V0LayerParameter.prototype.det_fg_fraction = 0.25; +$root.caffe.V0LayerParameter.prototype.det_context_pad = 0; +$root.caffe.V0LayerParameter.prototype.det_crop_mode = "warp"; +$root.caffe.V0LayerParameter.prototype.new_num = 0; +$root.caffe.V0LayerParameter.prototype.new_channels = 0; +$root.caffe.V0LayerParameter.prototype.new_height = 0; +$root.caffe.V0LayerParameter.prototype.new_width = 0; +$root.caffe.V0LayerParameter.prototype.shuffle_images = false; +$root.caffe.V0LayerParameter.prototype.concat_dim = 1; +$root.caffe.V0LayerParameter.prototype.hdf5_output_param = null; + +$root.caffe.V0LayerParameter.PoolMethod = { + "MAX": 0, + "AVE": 1, + "STOCHASTIC": 2 +}; + +$root.caffe.PReLUParameter = class PReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe.PReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 2: + message.channel_shared = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe.PReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader); + break; + case "channel_shared": + message.channel_shared = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe.PReLUParameter.prototype.filler = null; +$root.caffe.PReLUParameter.prototype.channel_shared = false; diff --git a/caffe.js b/caffe.js new file mode 100644 index 00000000000..cc308d15cf3 --- /dev/null +++ b/caffe.js @@ -0,0 +1,719 @@ + +import * as protobuf from './protobuf.js'; + +const caffe = {}; + +caffe.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'caffemodel') { + return 'caffe.pb'; + } + if (identifier == 'saved_model.pbtxt' || identifier == 'saved_model.prototxt' || + identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') || + identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) { + return undefined; + } + const tags = context.tags('pbtxt'); + if (tags.has('layer') || tags.has('layers')) { + return 'caffe.pbtxt'; + } + if (tags.has('net') || tags.has('train_net') || tags.has('net_param')) { + return 'caffe.pbtxt.solver'; + } + return undefined; + } + + async open(context, target) { + await context.require('./caffe-proto'); + caffe.proto = protobuf.get('caffe').caffe; + const openModel = async (context, netParameter) => { + const metadata = await context.metadata('caffe-metadata.json'); + return new caffe.Model(metadata, netParameter); + }; + const openNetParameterText = (context, identifier, buffer) => { + let netParameter = null; + try { + const reader = protobuf.TextReader.open(buffer); + reader.field = function(tag, message) { + const type = message.constructor.name; + if (tag.endsWith('_param') && (type == 'LayerParameter' || type == 'V1LayerParameter' || type == 'V0LayerParameter')) { + message[tag] = caffe.ModelFactory._decodeText(reader); + return; + } else if (message.constructor.name.endsWith('Parameter') || message.constructor.name === 'ParamSpec') { + if (message[tag]) { + if (!Array.isArray(message[tag])) { + message[tag] = [ message[tag] ]; + } + message[tag].push(this.read()); + } else { + message[tag] = this.read(); + } + return; + } + throw new Error(`Unknown field '${tag}' ${this.location()}`); + }; + reader.enum = function(type) { + const token = this.token(); + this.next(); + this.semicolon(); + if (!Object.prototype.hasOwnProperty.call(type, token)) { + const value = Number.parseInt(token, 10); + if (!Number.isNaN(token - value)) { + return value; + } + return token; + } + return type[token]; + }; + if (/MobileNetSSD_train_template.prototxt/.exec(identifier)) { + reader.integer = function() { + const token = this.token(); + const value = Number.parseInt(token, 10); + this.next(); + this.semicolon(); + if (Number.isNaN(token - value)) { + return token; + } + return value; + }; + } + netParameter = caffe.proto.NetParameter.decodeText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new caffe.Error(`File text format is not caffe.NetParameter (${message.replace(/\.$/, '')}).`); + } + return openModel(context, netParameter); + }; + switch (target) { + case 'caffe.pbtxt.solver': { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + reader.field = function(tag, message) { + if (message instanceof caffe.proto.SolverParameter) { + message[tag] = this.read(); + return; + } + throw new Error(`Unknown field '${tag}'${this.location()}`); + }; + const solver = caffe.proto.SolverParameter.decodeText(reader); + if (solver.net_param) { + return openModel(context, solver.net_param); + } + let name = solver.net || solver.train_net; + name = name.split('/').pop(); + try { + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openNetParameterText(context, name, buffer); + } catch (error) { + const message = error.message ? error.message : error.toString(); + throw new caffe.Error(`Failed to load '${name}' (${message.replace(/\.$/, '')}).`); + } + } + case 'caffe.pbtxt': { + return openNetParameterText(context, context.identifier, context.stream.peek()); + } + case 'caffe.pb': { + let netParameter = null; + try { + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + netParameter = caffe.proto.NetParameter.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new caffe.Error(`File format is not caffe.NetParameter (${message.replace(/\.$/, '')}).`); + } + return openModel(context, netParameter); + } + default: { + throw new caffe.Error(`Unsupported Caffe format '${target}'.`); + } + } + } + + static _decodeText(reader) { + const message = {}; + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + const value = reader.read(); + if (!message[tag]) { + message[tag] = value; + } else { + if (!Array.isArray(message[tag])) { + message[tag] = [ message[tag] ]; + } + message[tag].push(value); + } + } + return message; + } +}; + +caffe.Model = class { + + constructor(metadata, net) { + + this._name = net.name; + + if (net.layers && net.layers.length > 0) { + if (net.layers.every((layer) => Object.prototype.hasOwnProperty.call(layer, 'layer'))) { + this._version = 0; + net.layer = net.layers; + } else { + this._version = 1; + net.layer = net.layers; + } + } else if (net.layer && net.layer.length > 0) { + this._version = 2; + } + + const phases = new Set(); + for (const layer of net.layer) { + for (const include of layer.include) { + if (include.phase !== undefined) { + phases.add(include.phase); + } + } + } + if (phases.size === 0) { + phases.add(-1); + } + + this._graphs = []; + for (const phase of phases) { + const graph = new caffe.Graph(metadata, phase, net, this._version); + this._graphs.push(graph); + } + } + + get format() { + return `Caffe${this._version ? ` v${this._version}` : ''}`; + } + + get graphs() { + return this._graphs; + } +}; + +caffe.Graph = class { + + constructor(metadata, phase, net, version) { + switch (phase) { + case 0: this._phase = 'TRAIN'; break; + case 1: this._phase = 'TEST'; break; + case -1: this._phase = ''; break; + default: this._phase = phase.toString(); break; + } + this._nodes = []; + this._inputs = []; + this._outputs = []; + for (const layer of net.layer) { + layer.input = layer.bottom.slice(0); + layer.output = layer.top.slice(0); + layer.chain = []; + } + const layers = []; + for (const layer of net.layer) { + if (phase === -1 || layer.include.every((include) => include.phase === phase)) { + layers.push(layer); + } + } + const scopes = new Map(); + let index = 0; + for (const layer of layers) { + layer.input = layer.input.map((input) => scopes.has(input) ? scopes.get(input) : input); + layer.output = layer.output.map((output) => { + const value = scopes.has(output) ? `${output}\n${index}` : output; + scopes.set(output, value); + return value; + }); + index++; + } + // Graph Inputs + const usedOutputs = new Set(); + for (const layer of layers) { + for (const output of layer.output) { + usedOutputs.add(output); + } + } + const unusedInputs = []; + for (const layer of layers) { + for (const input of layer.input) { + if (!usedOutputs.has(input)) { + unusedInputs.push(input); + } + } + } + const values = new Map(); + const value = (name, type) => { + if (!values.has(name)) { + values.set(name, new caffe.Value(name, type)); + } else if (type) { + throw new caffe.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const nodes = []; + let lastLayer = null; + let lastTop = null; + while (layers.length > 0) { + let layer = layers.shift(); + if (layer.output.length == 1 && layer.input.length == 1 && + layer.output[0].split('\n').shift() == layer.input[0].split('\n').shift() && + lastLayer && + lastTop == layer.output[0].split('\n').shift()) { + lastLayer.chain = lastLayer.chain || []; + lastLayer.chain.push(layer); + } else { + if (layer.type == 'Input' || layer.type == 'Data') { + if (layer.input.length == 0 && layer.output.length == 1 && + layer.input_param && layer.input_param.shape && + layer.input_param.shape.length == 1 && layer.input_param.shape[0].dim) { + const shape = new caffe.TensorShape(layer.input_param.shape[0].dim.map((dim) => dim.toNumber())); + const type = new caffe.TensorType(null, shape); + this._inputs.push(new caffe.Argument(layer.output[0], [ value(layer.output[0], type) ])); + layer = null; + } + } + if (layer) { + nodes.push(layer); + lastLayer = null; + lastTop = null; + if (layer.output.length == 1) { + lastLayer = layer; + lastTop = layer.output[0].split('\n').shift(); + } + } + } + } + if (net.input) { + for (let i = 0; i < net.input.length; i++) { + const input = net.input[i]; + if (this._inputs.some((item) => item.name === input)) { + continue; + } + let inputType = null; + if (net.input_shape && i < net.input_shape.length) { + const blobShape = net.input_shape[i]; + if (blobShape && blobShape.dim) { + const shape = new caffe.TensorShape(blobShape.dim.map((dim) => dim.toNumber())); + inputType = new caffe.TensorType(null, shape); + } + } + const dim = i * 4; + if (!inputType && net.input_dim && net.input_dim.length >= dim) { + const shape = new caffe.TensorShape(net.input_dim.slice(dim, dim + 4)); + inputType = new caffe.TensorType(null, shape); + } + this._inputs.push(new caffe.Argument(input, [ value(input, inputType, null) ])); + } + } + + for (const layer of nodes) { + const node = new caffe.Node(metadata, layer, version, value); + if (layer.chain && layer.chain.length > 0) { + for (const chain of layer.chain) { + node.chain.push(new caffe.Node(metadata, chain, version, value)); + } + } + this._nodes.push(node); + } + + if (this._inputs.length === 0 && unusedInputs.length === 1) { + this._inputs.push(new caffe.Argument(unusedInputs[0], [ value(unusedInputs[0], null) ])); + } + } + + get name() { + return this._phase; + } + + get type() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +caffe.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +caffe.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new caffe.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +caffe.Node = class { + + constructor(metadata, layer, version, value) { + this._chain = []; + this._attributes = []; + let type; + switch (version) { + case 0: { + this._name = layer.layer.name; + type = layer.layer.type; + break; + } + case 1: { + this._name = layer.name; + type = caffe.Utility.layerType(layer.type); + break; + } + case 2: { + this._name = layer.name; + type = layer.type; + break; + } + default: { + throw new new caffe.Error(`Unsupported Caffe version '${version}'.`); + } + } + this._type = metadata.type(type) || { name: type }; + + let initializers = []; + switch (version) { + case 0: { + for (const name of Object.keys(layer.layer)) { + if (name != 'type' && name != 'name' && name != 'blobs' && name != 'blobs_lr') { + const value = layer.layer[name]; + const attribute = new caffe.Attribute(metadata.attribute(type, name), name, value); + this._attributes.push(attribute); + } + } + initializers = layer.layer.blobs.map((blob) => new caffe.Tensor(blob)); + break; + } + case 1: + case 2: { + for (const layer_kind of Object.keys(layer)) { + if (layer_kind.endsWith('_param') || layer_kind == 'transform_param') { + const param = layer[layer_kind]; + if (type == 'Deconvolution') { + type = 'Convolution'; + } + const prototype = Object.getPrototypeOf(param); + for (const name of Object.keys(param)) { + const defaultValue = prototype[name]; + const value = param[name]; + const attribute = new caffe.Attribute(metadata.attribute(type, name), name, value, defaultValue); + this._attributes.push(attribute); + } + } + } + if (layer.include && layer.include.length > 0) { + const attribute = new caffe.Attribute(metadata.attribute(type, 'include'), 'include', layer.include); + this._attributes.push(attribute); + } + if (layer.exclude && layer.exclude.length > 0) { + const attribute = new caffe.Attribute(metadata.attribute(type, 'exclude'), 'exclude', layer.exclude); + this._attributes.push(attribute); + } + if (this._type == 'Data' && layer.input_param && layer.input_param.shape) { + const attribute = new caffe.Attribute(metadata.attribute(type, 'shape'), 'shape', layer.input_param.shape); + this._attributes.push(attribute); + } + initializers = layer.blobs.map((blob) => new caffe.Tensor(blob)); + break; + } + default: { + throw new caffe.Error(`Unsupported Caffe version '${version}'.`); + } + } + this._inputs = []; + const inputs = layer.input.concat(initializers); + let inputIndex = 0; + if (this._type && this._type.inputs) { + for (const inputDef of this._type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const count = inputDef.option == 'variadic' ? inputs.length - inputIndex : 1; + const values = inputs.slice(inputIndex, inputIndex + count).filter((input) => input !== '' || inputDef.option != 'optional').map((input) => { + return input instanceof caffe.Tensor ? new caffe.Value('', input.type, input) : value(input, null, null); + }); + const argument = new caffe.Argument(inputDef.name, values); + this._inputs.push(argument); + inputIndex += count; + } + } + } + this._inputs.push(...inputs.slice(inputIndex).map((input) => { + return new caffe.Argument(inputIndex.toString(), [ + input instanceof caffe.Tensor ? new caffe.Value('', input.type, input) : value(input, null, null) + ]); + })); + + this._outputs = []; + const outputs = layer.output; + let outputIndex = 0; + if (this._type && this._type.outputs) { + for (const outputDef of this._type.outputs) { + if (outputIndex < outputs.length) { + const count = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const values = outputs.slice(outputIndex, outputIndex + count).map((output) => value(output, null, null)); + const argument = new caffe.Argument(outputDef.name, values); + this._outputs.push(argument); + outputIndex += count; + } + } + } + this._outputs.push(...outputs.slice(outputIndex).map((output, index) => { + return new caffe.Argument((outputIndex + index).toString(), [ value(output, null, null) ]); + })); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get chain() { + return this._chain; + } +}; + +caffe.Attribute = class { + + constructor(metadata, name, value, defaultValue) { + this._name = name; + this._value = value; + if (metadata && metadata.type) { + this._type = metadata.type; + } + if (value instanceof caffe.proto.BlobShape) { + this._value = new caffe.TensorShape(value.dim.map((dim) => dim.toNumber())); + this._type = 'shape'; + } + if (metadata && metadata.visible === false) { + this._visible = false; + } + if (metadata && Object.prototype.hasOwnProperty.call(metadata, 'default')) { + defaultValue = metadata.default; + } + if (defaultValue !== undefined) { + if (this._value == defaultValue) { + this._visible = false; + } else if (Array.isArray(this._value) && Array.isArray(defaultValue)) { + if (this._value.length == defaultValue.length && + this._value.every((item, index) => { + return item == defaultValue[index]; + })) { + this._visible = false; + } + } + } + if (this._type) { + this._value = caffe.Utility.enum(this._type, this._value); + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +caffe.Tensor = class { + + constructor(blob) { + let shape = []; + if (Object.prototype.hasOwnProperty.call(blob, 'num') && + Object.prototype.hasOwnProperty.call(blob, 'channels') && + Object.prototype.hasOwnProperty.call(blob, 'width') && + Object.prototype.hasOwnProperty.call(blob, 'height')) { + if (blob.num != 1) { + shape.push(blob.num); + } + if (blob.channels != 1) { + shape.push(blob.channels); + } + if (blob.height != 1) { + shape.push(blob.height); + } + if (blob.width != 1) { + shape.push(blob.width); + } + } else if (Object.prototype.hasOwnProperty.call(blob, 'shape')) { + shape = blob.shape.dim.map((dim) => dim.toNumber()); + } + + let dataType = '?'; + if (blob.data.length > 0) { + dataType = 'float32'; + this._values = blob.data; + } else if (blob.double_data.length > 0) { + dataType = 'float64'; + this._values = blob.double_data; + } + + this._type = new caffe.TensorType(dataType, new caffe.TensorShape(shape)); + } + + get category() { + return 'Blob'; + } + + get type() { + return this._type; + } + + get encoding() { + return '|'; + } + + get values() { + return this._values; + } +}; + +caffe.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this.dataType || '?') + this._shape.toString(); + } +}; + +caffe.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? (`[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +caffe.Utility = class { + + static layerType(type) { + type = type || 0; + if (!caffe.Utility._layerTypeMap) { + caffe.Utility._layerTypeMap = new Map(); + const known = { 'BNLL': 'BNLL', 'HDF5': 'HDF5', 'LRN': 'LRN', 'RELU': 'ReLU', 'TANH': 'TanH', 'ARGMAX': 'ArgMax', 'MVN': 'MVN', 'ABSVAL': 'AbsVal' }; + for (const key of Object.keys(caffe.proto.V1LayerParameter.LayerType)) { + const value = caffe.proto.V1LayerParameter.LayerType[key]; + caffe.Utility._layerTypeMap.set(value, key.split('_').map((item) => known[item] || item.substring(0, 1) + item.substring(1).toLowerCase()).join('')); + } + } + return caffe.Utility._layerTypeMap.has(type) ? caffe.Utility._layerTypeMap.get(type) : type.toString(); + } + + static enum(name, value) { + let type = caffe.proto; + const parts = name.split('.'); + while (type && parts.length > 0) { + type = type[parts.shift()]; + } + if (type) { + caffe.Utility._enumKeyMap = caffe.Utility._enumKeyMap || new Map(); + if (!caffe.Utility._enumKeyMap.has(name)) { + const map = new Map(Object.entries(type).map(([name, value]) => [ value, name ])); + caffe.Utility._enumKeyMap.set(name, map); + } + const map = caffe.Utility._enumKeyMap.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } +}; + +caffe.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Caffe model.'; + } +}; + +export const ModelFactory = caffe.ModelFactory; diff --git a/caffe2-metadata.json b/caffe2-metadata.json new file mode 100644 index 00000000000..666813eaf7e --- /dev/null +++ b/caffe2-metadata.json @@ -0,0 +1,17836 @@ +[ + { + "name": "Abs", + "description": "\nCalculates the absolute value of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/abs_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Abs\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [ 0.3005476 1.551666 -1.3591481 0.39191285 -0.21866608]\nY: [0.3005476 1.551666 1.3591481 0.39191285 0.21866608]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Absolute value of input element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AbsGradient", + "support_level": "default" + }, + { + "name": "Accumulate", + "description": "\nAccumulate operator accumulates the input tensor to the output tensor. If the\noutput tensor already has the right size, we add to it; otherwise, we first\ninitialize the output tensor to all zeros, and then do accumulation. Any\nfurther calls to the operator, given that no one else fiddles with the output\nin the interim, will do simple accumulations.\nAccumulation is done using Axpby operation as shown:\n Y = 1*X + gamma*Y\nwhere X is the input tensor, Y is the output tensor and gamma is the multiplier\nargument.\n", + "attributes": [ + { + "description": "(float, default 1.0) Accumulation multiplier", + "name": "gamma", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Accumulated output tensor", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AccumulateHistogram", + "description": "\nThis operator calculate thes histogram of values in input tensor.\nThere're 2 outputs, one for histogram of current input tensor, and another\nfor histogram of the all input tensors accumulated through history.\nThe output would contain num_buckets + 2 values. index[1 ... num_buckets]\nfor values in [lower_bound, upper_bound) interval. And the rest 2 for values\nsmaller than lower_bound or greater than upper_bound respectively.\n", + "attributes": [ + { + "description": "the lower bound value", + "name": "lower_bound", + "option": "optional" + }, + { + "description": "the upper bound value", + "name": "upper_bound", + "option": "optional" + }, + { + "description": "number of buckets to use in [lower_bound, upper_bound)", + "name": "num_buckets", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output histogram of the current tensor.", + "name": "CurHist" + }, + { + "description": "Accumulated histogram of the history tensor.", + "name": "AccHist" + } + ], + "support_level": "default" + }, + { + "name": "Accuracy", + "description": "\nAccuracy takes two inputs- predictions and labels, and returns a float\naccuracy value for the batch. Predictions are expected in the form of 2-D tensor\ncontaining a batch of scores for various classes, and labels are expected in the\n form of 1-D tensor containing true label indices of samples in the batch. If\nthe score for the label index in the predictions is the highest among all\nclasses, it is considered a correct prediction.\n", + "attributes": [ + { + "description": "Count as correct by comparing the true label to the top k scoring classes (default 1: only compare to the top scoring class i.e. argmax)", + "name": "top_k", + "option": "optional" + } + ], + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing scores", + "name": "predictions" + }, + { + "description": "1-D tensor (Tensor) of size (num_batches) having the indices of true labels", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size 1 containing accuracy", + "name": "accuracy" + } + ], + "support_level": "default" + }, + { + "name": "Acos", + "description": "\nCalculates the arccosine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arccosine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AcosGradient", + "support_level": "default" + }, + { + "name": "Adadelta", + "description": "\n\nComputes the AdaDelta update (https://arxiv.org/abs/1212.5701) for an input\ngradient and accumulated history of squared gradients. Concretely, given\ninputs (param, moment, moment_delta, grad, learning_rate), computes:\n\n new_moment = moment * decay + square(grad) * (1 - decay)\n new_grad = sqrt(moment_delta + epsilon) / sqrt(new_moment + epsilon) * grad\n new_param = param + learning_rate * new_grad\n new_moment_delta = moment_delta * decay + square(new_grad) * (1 - decay)\n\nand returns (new_param, new_moment, new_moment_delta).\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 0.95, the squared gradient sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Average of squared gradients", + "name": "moment" + }, + { + "description": "Average of squared parameter updates", + "name": "moment_delta" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "Learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated average squared gradient", + "name": "output_moment" + }, + { + "description": "Updated average of squared parameter updates", + "name": "output_moment_delta" + } + ], + "support_level": "default" + }, + { + "name": "Adagrad", + "description": "\n\nComputes the AdaGrad update for an input gradient and accumulated\nhistory. Concretely, given inputs (param, grad, moment, learning_rate),\ncomputes\n\n new_moment = moment + square(grad)\n effective_lr = learning_rate / (sqrt(new_moment) + epsilon)\n update = learning_rate * grad / (sqrt(new_moment) + epsilon)\n new_param = param + update\nand returns (new_param, new_moment).\n\nOptionally returns effective_lr and update as well.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 1. If it is in (0, 1), the gradient square sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "(optional) Effective learning rate", + "name": "output_effective_lr" + }, + { + "description": "(optional) Actual update that is applied.", + "name": "output_update" + } + ], + "support_level": "default" + }, + { + "name": "Adam", + "description": "\n\nComputes the Adam update (https://arxiv.org/abs/1412.6980) for an\ninput gradient and momentum parameters. Concretely, given inputs\n(param, m1, m2, grad, lr, iters),\n\n t = iters + 1\n correction_multiplier = sqrt(1 - power(beta2, t)) /\n (1 - power(beta1, t))\n m1_o = (beta1 * m1) + (1 - beta1) * grad\n m2_o = (beta2 * m2) + (1 - beta2) * np.square(grad)\n grad_o = correction_multiplier * m1_o / \\\n (sqrt(m2_o) + epsilon)\n param_o = param + lr * grad_o\n\nand returns (param_o, m1_o, m2_o, grad_o), in which grad_o is an optional output\n\n", + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + }, + { + "name": "Add", + "description": "\nPerforms element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Add\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[1 2]\n [3 4]]\nB:\n[[5 6]\n [7 8]]\nC:\n[[ 6 8]\n [10 12]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "AddGradient", + "support_level": "default" + }, + { + "name": "AddPadding", + "description": "\nGiven a partitioned tensor $T$, where the partitions are\ndefined as ranges on its outer-most (slowest varying) dimension $N$,\nreturn a tensor $T<(N + 2 * padding\\_width), D_1, ..., D_n>$ with paddings\nadded to the start and end of each range.\n\nOptionally, different paddings can be provided for beginning and end.\nPaddings provided must be a tensor $T$. If no padding is\nprovided, add zero padding. If no lengths vector is provided, add padding\nonly once, at the start and end of data.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sequence_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AddPadding\",\n [\"X\", \"lengths\"],\n [\"Y\", \"lengths_out\"],\n padding_width=1\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,2,2).astype(np.float32)))\nworkspace.FeedBlob(\"lengths\", np.array([3]).astype(np.int32))\n\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"lengths_out:\", workspace.FetchBlob(\"lengths_out\"))\n```\n\n**Result**\n\n```\nX: [[[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]]\nY: [[[0. 0. ]\n [0. 0. ]]\n\n [[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]\n\n [[0. 0. ]\n [0. 0. ]]]\nlengths_out: [5]\n```\n\n
\n\n", + "attributes": [ + { + "description": "Number of copies of padding to add around each range.", + "name": "padding_width", + "option": "optional", + "type": "int64" + }, + { + "description": "[OPTIONAL] Specifies a different end-padding width. If this is not set, will use same as `padding_width`.", + "name": "end_padding_width", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input data ($T$).", + "name": "data_in" + }, + { + "description": "*(type: Tensor``)* Number of elements in each range. sum(lengths) = N.", + "name": "lengths" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Padding data for range start ($T$).", + "name": "start_padding" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Padding for range end. If not provided, `start_padding` is used ($T$).", + "name": "end_padding" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Padded data tensor ($T$).", + "name": "data_out" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Lengths for each padded range.", + "name": "lengths_out" + } + ], + "support_level": "default" + }, + { + "name": "AffineChannel", + "category": "Normalization", + "description": "\nApplies a separate affine transformation to each channel of the input. Useful\nfor replacing spatial batch norm with its equivalent fixed transformation.\n", + "inputs": [ + { + "description": "Feature map input with order NCHW or NHWC.", + "name": "X" + }, + { + "description": "1D input of shape (C); the c-th element is the scale factor of the affine transformation for the c-th channel of the input.", + "name": "scale" + }, + { + "description": "1D input of shape (C); the c-th element is the bias of the affine transformation for the c-th channel of the input.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output with the same order of Input.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AffineChannelGradient", + "support_level": "default" + }, + { + "name": "Alias", + "description": "\nMakes the output and the input share the same underlying storage.\n\nWARNING: in general, in caffe2's operator interface different tensors should\nhave different underlying storage, which is the assumption made by\ncomponents such as the dependency engine and memory optimization. Thus, in\nnormal situations you should not use the AliasOp, especially in a normal\nforward-backward pass.\n\nThe Alias op is provided so one can achieve true asynchrony, such as\nHogwild, in a graph. But make sure you understand all the implications\nsimilar to multi-thread computation before you use it explicitly.\n", + "inputs": [ + { + "description": "Input tensor whose storage will be shared.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Tensor of same shape as input, sharing its storage.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AliasWithName", + "description": "\nSimilar with AliasOp, storing the alias name as operator argument.\n", + "attributes": [ + { + "description": "name of the aliasing", + "name": "name", + "option": "optional" + }, + { + "description": "weather or not to alias forward or backward", + "name": "is_backward", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor whose storage will be shared.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Tensor of same shape as input, sharing its storage.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Allgather", + "description": "\nDoes an allgather operation among the nodes.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allgathered.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The allgathered tensor, same on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Allreduce", + "description": "\nDoes an allreduce operation among the nodes. Currently only Sum is supported.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allreduced.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The allreduced tensor, same on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "And", + "description": "\nPerforms element-wise logical operation **and** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"And\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n [[ True False False]\n [False True False]\n [False False True]]\nB:\n [[ True False True]\n [False False False]\n [False False False]]\nC:\n [[ True False False]\n [False False False]\n [False False False]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "APMeter", + "description": "\nAPMeter computes Average Precision for binary or multi-class classification.\nIt takes two inputs: prediction scores P of size (n_samples x n_classes), and\ntrue labels Y of size (n_samples x n_classes). It returns a single float number\nper class for the average precision of that class.\n", + "attributes": [ + { + "description": "(int32_t) indicates how many predictions should the op buffer. defaults to 1000", + "name": "buffer_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_samples xnum_classes) containing prediction scores", + "name": "predictions" + }, + { + "description": "2-D tensor (Tensor) of size (num_samples) containing true labels for each sample", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size num_classes containing average precision for each class", + "name": "AP" + } + ], + "support_level": "default" + }, + { + "name": "Append", + "description": "\nAppend input `B` to the end of input `A`.\n\n- It is required that this operation run in-place, meaning that the input `A` blob must match the output blob.\n- All except the outer-most dimension must be the same between `A` and `B`.\n- Input `A` may have to be re-allocated in order for accommodate to the new size. Currently, an exponential growth ratio is used in order to ensure amortized constant time complexity.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dataset_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Append\",\n [\"A\", \"B\"],\n [\"A\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(1,3,3)))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(2,3,3)))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"A:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]]\nB:\n[[[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]\n\n [[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): base input tensor of shape $(N, d_1, d_2, ..., d_n)$", + "name": "A" + }, + { + "description": "(*Tensor*): second input tensor of shape $(M, d_1, d_2, ..., d_n)$ to be appended to the base", + "name": "B" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor of shape $(N+M, d_1, d_2, ..., d_n)$", + "name": "A" + } + ], + "support_level": "default" + }, + { + "name": "ArgMax", + "description": "\nRetrieve the argmax of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the largest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMax\",\n [\"X\"],\n [\"Indices\"],\n axis=2,\n keepdims=False\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\nX: [[[4. 9. 6.]\n [6. 6. 1.]\n [9. 5. 4.]]\n\n [[6. 7. 4.]\n [7. 9. 1.]\n [3. 2. 8.]]\n\n [[3. 4. 6.]\n [5. 2. 7.]\n [1. 5. 7.]]]\nIndices: [[1 0 0]\n [1 1 2]\n [2 2 2]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": -1, + "description": "The axis to get argmax.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": true, + "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.", + "name": "keepdims", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Tensor of indices for the largest values.", + "name": "Indices" + } + ], + "support_level": "default" + }, + { + "name": "ArgMin", + "description": "\nRetrieve the argmin of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the smallest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMin\",\n [\"X\"],\n [\"Indices\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\n\nX: [[9. 4. 6. 4. 1.]\n [5. 9. 8. 3. 4.]\n [6. 1. 0. 2. 9.]\n [7. 8. 2. 4. 9.]\n [3. 9. 4. 9. 4.]]\nIndices: [[4]\n [3]\n [2]\n [2]\n [0]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": -1, + "description": "The axis to get argmin.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": true, + "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.", + "name": "keepdims", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Tensor of indices for the smallest values.", + "name": "Indices" + } + ], + "support_level": "default" + }, + { + "name": "Asin", + "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arcsine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AsinGradient", + "support_level": "default" + }, + { + "name": "Assert", + "description": "\nTakes in a tensor of type *bool*, *int*, *long*, or *long long* and checks if all values are True when coerced into a boolean. In other words, for non-bool types this asserts that all values in the tensor are non-zero. If a value is False after coerced into a boolean, the operator throws an error. Else, if all values are True, nothing is returned. For tracability, a custom error message can be set using the `error_msg` argument.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/assert_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Assert\",\n [\"A\"],\n [],\n error_msg=\"Failed assertion from Assert operator\"\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.int32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\ntry:\n workspace.RunOperatorOnce(op)\nexcept RuntimeError:\n print(\"Assertion Failed!\")\nelse:\n print(\"Assertion Passed!\")\n\n```\n\n**Result**\n\n```\n\nA:\n[[7 5 6]\n [1 2 4]\n [5 3 7]]\nAssertion Passed!\n\n```\n\n
\n\n ", + "attributes": [ + { + "description": "(*string*): custom error message to be thrown when the input does not pass assertion", + "name": "error_msg", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor*): input tensor", + "name": "X" + } + ], + "support_level": "default" + }, + { + "name": "AsyncNetBarrier", + "description": "\nThis is a pretty much no-op operator, since it's only purposes is make sure that\nasync_scheduling will schedule certian operations earlier than others.\n\nExaple where this operator can work well - mixture of data-parallel and model-\nparallel training, where one wants to force that all copies are started before\ndata-parallel part starts.\n", + "attributes": [ + { + "description": "Specifies either inputs should be across different devices in dev inference options", + "name": "cross_device", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Atan", + "description": "\nCalculates the arctangent of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arctangent of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AtanGradient", + "support_level": "default" + }, + { + "name": "ATen", + "support_level": "contribution" + }, + { + "name": "AtomicAppend", + "support_level": "default" + }, + { + "name": "AtomicFetchAdd", + "description": "\nGiven a mutex and two int32 scalar tensors, performs an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n", + "inputs": [ + { + "description": "Blob containing to a unique_ptr", + "name": "mutex_ptr" + }, + { + "description": "Value to be mutated after the sum.", + "name": "mut_value" + }, + { + "description": "Value to add to the first operand.", + "name": "increment" + } + ], + "outputs": [ + { + "description": "Mutated value after sum. Usually same as input 1.", + "name": "mut_value" + }, + { + "description": "Value of the first operand before sum.", + "name": "fetched_value" + } + ], + "support_level": "default" + }, + { + "name": "AtomicFetchAdd64", + "description": "\nLike, AtomicFetchAdd but with int64_t scalar tensors,\nperforms an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n", + "inputs": [ + { + "description": "Blob containing to a unique_ptr", + "name": "mutex_ptr" + }, + { + "description": "Value to be mutated after the sum.", + "name": "mut_value" + }, + { + "description": "Value to add to the first operand.", + "name": "increment" + } + ], + "outputs": [ + { + "description": "Mutated value after sum. Usually same as input 1.", + "name": "mut_value" + }, + { + "description": "Value of the first operand before sum.", + "name": "fetched_value" + } + ], + "support_level": "default" + }, + { + "name": "AtomicIter", + "description": "\nSimilar to Iter, but takes a mutex as the first input to make sure that\nupdates are carried out atomically. This can be used in e.g. Hogwild sgd\nalgorithms.\n", + "inputs": [ + { + "description": "The mutex used to do atomic increment.", + "name": "mutex" + }, + { + "description": "The iter counter as an int64_t TensorCPU.", + "name": "iter" + } + ], + "support_level": "default" + }, + { + "name": "AveragedLoss", + "description": "\nThe *AveragedLoss* op takes a single 1-D input tensor *input* and returns a single output float value *output*. The output represents the average of the values in *input*. This op is commonly used for averaging losses, hence the name, however it does not exclusively operate on losses.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragedLoss\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([8, 10, 12]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [ 8. 10. 12.]\noutput:\n 10.0\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "The input data as Tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The output tensor of size 1 containing the averaged value.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "AveragedLossGradient", + "support_level": "default" + }, + { + "name": "AveragePool", + "category": "Pool", + "description": "AveragePool \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AveragePool1D", + "description": "AveragePool1D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AveragePool1DGradient", + "support_level": "default" + }, + { + "name": "AveragePool2D", + "description": "AveragePool2D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AveragePool2DGradient", + "support_level": "default" + }, + { + "name": "AveragePool3D", + "description": "AveragePool3D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "AveragePool3DGradient", + "support_level": "default" + }, + { + "name": "AveragePoolGradient", + "support_level": "default" + }, + { + "name": "AveragePut", + "description": "\n Consume a value and pushes it to the global stat registry as an average.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + }, + { + "name": "Barrier", + "description": "\nDoes a barrier operation among the nodes.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + } + ], + "support_level": "default" + }, + { + "name": "BatchBoxCox", + "description": "\nInput `data` is a N * D matrix. Apply box-cox transform for each column.\n`lambda1` and `lambda2` is of size D that defines the hyper-parameters for\nthe transform of each column `x` of the input `data`:\n\n ln(x + lambda2), if lambda1 == 0\n ((x + lambda2)^lambda1 - 1)/lambda1, if lambda1 != 0\n\n", + "inputs": [ + { + "description": "input float or double N * D matrix", + "name": "data" + }, + { + "description": "tensor of size D with the same type as data", + "name": "lambda1" + }, + { + "description": "tensor of size D with the same type as data", + "name": "lambda2" + } + ], + "outputs": [ + { + "description": "output matrix that applied box-cox transform", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "BatchBucketize", + "description": "\nBucketize the float_features into sparse features.\nThe float_features is a N * D tensor where N is the batch_size, and D is the feature_dim.\nThe indices is a 1D tensor containing the indices of the features that need to be bucketized.\nThe lengths is a 1D tensor that splits the following 'boundaries' argument.\nThe boundaries is a 1D tensor containing the border list for each feature.\n\nWith in each batch, `indices` should not have duplicate number,\nand the number of elements in `indices` should be less than or equal to `D`.\nEach element in `lengths` vector (lengths[`i`]) represents\nthe number of boundaries in the sub border list.\nThe sum of all elements in `lengths` must be equal to the size of `boundaries`.\nIf lengths[0] = 2, the first sub border list is [0.5, 1.0], which separate the\nvalue to (-inf, 0.5], (0,5, 1.0], (1.0, inf). The bucketized feature will have\nthree possible values (i.e. 0, 1, 2).\n\n\nFor example, with input:\n\n float_features = [[1.42, 2.07, 3.19, 0.55, 4.32],\n [4.57, 2.30, 0.84, 4.48, 3.09],\n [0.89, 0.26, 2.41, 0.47, 1.05],\n [0.03, 2.97, 2.43, 4.36, 3.11],\n [2.74, 5.77, 0.90, 2.63, 0.38]]\n indices = [0, 1, 4]\n lengths = [2, 3, 1]\n boundaries = [0.5, 1.0, 1.5, 2.5, 3.5, 2.5]\n\nThe output is:\n\n output =[[2, 1, 1],\n [2, 1, 1],\n [1, 0, 0],\n [0, 2, 1],\n [2, 3, 0]]\n\nafter running this operator.\n", + "inputs": [ + { + "description": "2-D dense tensor, the second dimension must be greater or equal to the indices dimension", + "name": "float_features" + }, + { + "description": "Flatten tensor, containing the indices of `float_features` to be bucketized. The datatype must be int32.", + "name": "indices" + }, + { + "description": "Flatten tensor, the size must be equal to that of `indices`. The datatype must be int32.", + "name": "lengths" + }, + { + "description": "Flatten tensor, dimension has to match the sum of lengths", + "name": "boundaries" + } + ], + "outputs": [ + { + "description": "2-D dense tensor, with 1st dim = float_features.dim(0), 2nd dim = size(indices)in the arg list, the tensor is of the same data type as `feature`.", + "name": "bucktized_feat" + } + ], + "support_level": "default" + }, + { + "name": "BatchBucketOneHot", + "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. For each column, bucketize it based on the boundary values and then do\none hot encoding. The `lengths` specifies the number of boundary values for each\ncolumn. The final number of buckets is this number plus 1. This would also be\nthe expanded feature size. `boundaries` specifies all the boundary values.\nNote that each bucket is right-inclusive. That is, given boundary values\n[b1, b2, b3], the buckets are defined as (-int, b1], (b1, b2], (b2, b3], (b3, inf).\nFor example\n\n data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n If boundaries = [0.1, 2.5, 1, 3.1, 4.5], then\n output = [[0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n If boundaries = [0.1, 2.5, 1, 1, 3.1], then\n output = [[0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n", + "inputs": [ + { + "description": "input tensor matrix", + "name": "data" + }, + { + "description": "the size is the same as the width of the `data`", + "name": "lengths" + }, + { + "description": "bucket boundaries", + "name": "boundaries" + } + ], + "outputs": [ + { + "description": "output matrix that expands each input column with one hot encodingbased on the bucketization", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "BatchDenseToSparse", + "description": "\nThis Op is a inverse of BatchSparseToDenseOp.\nBasically, given a `lengths` vector, a `indices` vector,\nand a dense matrix `dense`, output `value` vector so that, along with\n`lengths` vector and `indices` vector, forms a sparse representation\nof the dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nThe output is:\n\n values = [6, 7, 8, 9, 10, 11]\n\nafter running this operator.\n", + "inputs": [ + { + "description": "Flatten lengths, Used to break down indices into per batch indices", + "name": "lengths" + }, + { + "description": "Flatten indices, tensor of total size = \\sum lengths, containing the indices ", + "name": "indices" + }, + { + "description": "dense 2-D tensor, first dim = len(lengths), last dim > Any(indices)", + "name": "dense" + } + ], + "outputs": [ + { + "description": "Values, tensor of the same size as `indices` and same data type as dense tensor.", + "name": "values" + } + ], + "support_level": "default" + }, + { + "name": "BatchGather", + "description": "\nBatch gather operation, first dimension in DATA is the batch size.\nGiven DATA tensor of rank r >= 2, and INDICES tensor of rank q >= 1, gather\nentries of the second outer dimension (axis == 1) of DATA indexed by INDICES,\nand concatenate them in an output tensor of rank q + (r - 1).\n\nExample:\n DATA = [\n [1.0, 1.2, 2.4, 4.5],\n [2.3, 3.4, 3.6, 2.3],\n [4.5, 5.7, 1.2, 4.5],\n ]\n INDICES = [0, 2]\n\n OUTPUT = [\n [1.0, 2.4],\n [2.3, 3.6],\n [4.5, 1.2],\n ]\n", + "inputs": [ + { + "description": "Tensor of rank r >= 2.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 indices, of any rank q.", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "Tensor of rank q + (r - 1).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "BatchGatherGradient", + "support_level": "default" + }, + { + "name": "BatchMatMul", + "description": "\nBatch Matrix multiplication Yi = Ai * Bi, where A has shape (dim0, dim1, ... M, K),\nB has shape (dim0, dim1, ... K, N), Y has shape (dim0, dim1, ... M, N) and i ranges\nfrom 0 to (dim0 * dim1 ...) - 1. rank(A) == rank(B) >= 2. In case of A and B being\ntwo dimensional, it behaves like normal matrix multiplication.\n", + "attributes": [ + { + "description": "Pass 1 to transpose the last two dimensions of A before doing multiplication", + "name": "trans_a", + "option": "optional" + }, + { + "description": "Pass 1 to transpose the last two dimensions of B before doing multiplication", + "name": "trans_b", + "option": "optional" + }, + { + "description": "Pass 1 to allow broadcasting of dimensions. Behavior is the same as numpy.matmul. Gradient is currently not supported when running in broadcast mode.", + "name": "broadcast", + "option": "optional" + } + ], + "inputs": [ + { + "description": "tensor of shape (dim0, dim1 ... M, K)", + "name": "A" + }, + { + "description": "tensor of shape (dim0, dim1 ... K, N)", + "name": "B" + } + ], + "outputs": [ + { + "description": "tensor of shape (dim0, dim1 ... M, N)", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "BatchMoments", + "support_level": "default" + }, + { + "name": "BatchMomentsGradient", + "support_level": "default" + }, + { + "name": "BatchOneHot", + "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. Expand each column of it using one hot encoding. The `lengths` specifies\nthe size of each column after encoding, and the `values` is the dictionary value\nof one-hot encoding for each column. For example\n\n If data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n and values = [2, 4, 1, 3, 5], then\n\n output = [[1, 0, 0, 1, 0], [0, 1, 1, 0, 0], [1, 0, 0, 0, 1]]\n", + "inputs": [ + { + "description": "input tensor matrix", + "name": "data" + }, + { + "description": "the size is the same as the width of the `data`", + "name": "lengths" + }, + { + "description": "one hot encoding dictionary values", + "name": "values" + } + ], + "outputs": [ + { + "description": "output matrix that expands each input column with one hot encoding", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "BatchPermutation", + "description": "\nBatch permutation of an input tensor X given input indices. First dimension of\nX equals batch size N. The indices stores a be permutation of N.\nThe output Y is a tensor of same shape as X, with data re-ordered according to\nthe indices within the batch size.\n\nExample of batch permutation on a 2-D tensor with batch size 4:\n X = [\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [2, 2, 3, 6, 0, 0, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [2, 2, 3, 6, 0, 0, 1],\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n\nExample of batch permutation on a 3-D tensor with batch size 4:\n X = [\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[2, 2, 3], [6, 0, 0, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [[2, 2, 3], [6, 0, 0, 1]],\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n", + "inputs": [ + { + "description": "Input tensor, where 1st dimension equals batch size", + "name": "X" + }, + { + "description": "Input indices of batch to permute", + "name": "indices" + } + ], + "outputs": [ + { + "description": "Output permuted tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "BatchPermutationGradient", + "support_level": "default" + }, + { + "name": "BatchSparseToDense", + "description": "\nConvert sparse matrix representation into dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n values = [6, 7, 8, 9, 10, 11]\n dense_dim = 6\n default_value = 0\n\nThe output is:\n\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nafter running this operator.\n", + "attributes": [ + { + "description": "Optional, output dense last dimension. If both this argument and output_shape_inference are set, it should be consistent with output_shape_inference's last dim", + "name": "dense_last_dim", + "option": "optional" + }, + { + "description": "Optional, missing values are filled with this value.default_value = 0 when not set", + "name": "default_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Flatten tensor, used to break down indices and values into per batch indices and values.", + "name": "lengths" + }, + { + "description": "Flatten tensor of total size = \\sum lengths, containing the indices ", + "name": "indices" + }, + { + "description": "Data tensor, dimension has to match `indices`", + "name": "values" + }, + { + "description": "Optional, a dense tensor whose shape define the output shape", + "name": "output_shape_inference" + } + ], + "outputs": [ + { + "description": "2-D dense tensor, with 1st dim = len(lengths), 2nd dim = dense_last_dimin the arg list, the tensor is of the same data type as `values`.Missing values are filled with default_value", + "name": "dense" + } + ], + "support_level": "default" + }, + { + "name": "BatchToSpace", + "description": "\nRearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of `SpaceToBatch`. More specifically, this op outputs a copy of the input tensor where values from the batch dimension are moved in spatial blocks to the height and width dimensions, followed by cropping along the height and width dimensions. Only \"NCHW\" order is currently supported.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/space_batch_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BatchToSpace\",\n [\"X\"],\n [\"Y\"],\n pad=3\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(10,3,32,32).astype(np.float32))\nprint(\"X.shape:\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape:\", workspace.FetchBlob(\"Y\").shape)\n\n```\n\n**Result**\n\n```\n\nX.shape: (10, 3, 32, 32)\nY.shape: (2, 3, 58, 58)\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): exclusive axis that divides the first and second dimension of matrix `A` (default=0)", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): height/width of spatial blocks to be moved (default=2)", + "name": "block_size", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; only \"NCHW\" order is currently supported (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor (NCHW order)", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor (NCHW order)", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "BBoxTransform", + "description": "\nTransform proposal bounding boxes to target bounding box using bounding box\n regression deltas.\n", + "attributes": [ + { + "description": "vector weights [wx, wy, ww, wh] for the deltas", + "name": "weights", + "option": "optional" + }, + { + "description": "bool (default true), transform the boxes to the scaled image space after applying the bbox deltas.Set to false to match the detectron code, set to true for keypoint models and for backward compatibility", + "name": "apply_scale", + "option": "optional" + }, + { + "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility", + "name": "correct_transform_coords", + "option": "optional" + }, + { + "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].", + "name": "rotated", + "option": "optional" + }, + { + "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_on", + "option": "optional" + }, + { + "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_lo", + "option": "optional" + }, + { + "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_hi", + "option": "optional" + }, + { + "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.", + "name": "clip_angle_thresh", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Bounding box proposals in pixel coordinates, Size (M, 4), format [x1, y1, x2, y2], orSize (M, 5), format [batch_index, x1, y1, x2, y2]. If proposals from multiple images in a batch are present, they should be grouped sequentially and in incremental order.For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle].", + "name": "rois" + }, + { + "description": "bounding box translations and scales,size (M, 4*K), format [dx, dy, dw, dh], K = # classes. For rotated boxes, size (M, 5*K, format [dx, dy, dw, dh, da].", + "name": "deltas" + }, + { + "description": "Image dimensions, size (batch_size, 3), format [img_height, img_width, img_scale]", + "name": "im_info" + } + ], + "outputs": [ + { + "description": "Pixel coordinates of the transformed bounding boxes,Size (M, 4*K), format [x1, y1, x2, y2]. For rotated boxes, size (M, 5*K), format [ctr_x, ctr_y, w, h, angle].", + "name": "box_out" + }, + { + "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs belonging to the corresponding image in batch", + "name": "roi_batch_splits" + } + ], + "support_level": "default" + }, + { + "name": "BernoulliJSD", + "description": "\nComputes the Jensen-Shannon divergence (JSD) between two Bernoulli distributions\nwhere each is parametrized by a single probability.\n", + "inputs": [ + { + "description": "array of probabilities for target", + "name": "T" + } + ], + "outputs": [ + { + "description": "array of JSD losses", + "name": "L" + } + ], + "support_level": "default" + }, + { + "name": "BernoulliJSDGradient", + "support_level": "default" + }, + { + "name": "BisectPercentile", + "description": "\n This operator is to map raw feature values into the percentile\n representations based on Bisection for more than one feature.\n\n The input is the bath of input feature values, with the size of (batch_size,\n num_feature), where num_feature = F (F >= 1).\n\n For each feature, we also need additional information regarding the feature\n value distribution.\n There are several vectors to keep data to percentile mappping information\n as arguments (context):\n 1. feature raw values (R)\n 2. feature percentile mapping (P)\n 3. feature percentile lower bound (L)\n 4. feature percentile upper bound (U)\n\n A toy example:\n Suppose the sampled data distribution is as follows:\n 1, 1, 2, 2, 2, 2, 2, 2, 3, 4\n We have the mapping vectors as follows:\n R = [1, 2, 3, 4]\n P = [0.15, 0.55, 0.9, 1.0]\n L = [0.1, 0.3, 0.9, 1.0]\n U = [0.2, 0.8, 0.9, 1.0]\n Where P is computed as (L + U) / 2.\n\n For a given list of feature values, X = [x_0, x_1, ..., x_i, ...], for each\n feature value (x_i) we first apply bisection to find the right index (t),\n such that R[t] <= x_i < R[t+1].\n If x_i = R[t], P[t] is returned;\n otherwise, the interpolation is apply by (R[t], R[t+1]) and (U[t] and L[t]).\n\n As there are F features (F >= 1), we concate all the R_f, P_f, L_f, and\n U_f for each feature f and use an additional input length to keep track of\n the number of points for each set of raw feature value to percentile mapping.\n For example, there are two features:\n R_1 =[0.1, 0.4, 0.5];\n R_2 = [0.3, 1.2];\n We will build R = [0.1, 0.4, 0.5, 0.3, 1.2]; besides, we have\n lengths = [3, 2]\n to indicate the boundaries of the percentile information.\n\n", + "attributes": [ + { + "description": "1D tensor, which is the concatenation of all sorted raw feature values for all features.", + "name": "percentile_raw", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_mapping and percentile_raw such that each element in percentile_mapping corresponds to the percentile value of the corresponding raw feature value.", + "name": "percentile_mapping", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile lower bound of the corresponding raw feature value.", + "name": "percentile_lower", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.", + "name": "percentile_upper", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.", + "name": "lengths", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input 2D tensor of floats of size (N, D), where N is the batch size and D is the feature dimension.", + "name": "raw_values" + } + ], + "outputs": [ + { + "description": "2D tensor of output with the same dimensions as the input raw_values.", + "name": "percentile" + } + ], + "support_level": "default" + }, + { + "name": "BitwiseAnd", + "description": "\nPerforms element-wise bitwise operation `bitwise_and` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "BitwiseOr", + "description": "\nPerforms element-wise bitwise operation `bitwise_or` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "BitwiseXor", + "description": "\nPerforms element-wise bitwise operation `bitwise_xor` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "BooleanMask", + "description": "\nGiven a 1D `data` tensor and a boolean `mask` tensor of the same shape, returns a `masked_data` tensor containing only the elements corresponding to positions where the `mask` is True, and a `masked_indices` tensor containing the indices of the True elements.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMask\",\n [\"data\", \"mask\"],\n [\"masked_data\", \"masked_indices\"]\n)\n\nworkspace.FeedBlob(\"data\", np.array([1,2,3,4,5,6]))\nworkspace.FeedBlob(\"mask\", np.array([True,False,False,True,True,False]))\nprint(\"data:\", workspace.FetchBlob(\"data\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_data:\", workspace.FetchBlob(\"masked_data\"))\nprint(\"masked_indices:\", workspace.FetchBlob(\"masked_indices\"))\n\n```\n\n**Result**\n\n```\n\ndata: [1 2 3 4 5 6]\nmask: [ True False False True True False]\nmasked_data: [1 4 5]\nmasked_indices: [0 3 4]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): 1D input tensor", + "name": "data" + }, + { + "description": "(*Tensor``*): tensor of bools which determines the input elements that will be left in the `masked_data` output tensor; same shape as `data`", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the masked input tensor", + "name": "masked_data" + }, + { + "description": "(*Tensor``*): 1D tensor of indices of the True elements in the `mask` tensor", + "name": "masked_indices" + } + ], + "support_level": "default" + }, + { + "name": "BooleanMaskGradient", + "support_level": "default" + }, + { + "name": "BooleanMaskLengths", + "description": "\nGiven a tensor of int32 `lengths` tensor representing segment lengths and a `mask` (boolean) tensor, return the segment lengths of the corresponding segmented tensor after **BooleanMask** is applied.\n\nIf `lengths` tensor is $[a_1, a_2, ..., a_n]$, then length of `mask` tensor must be $a_1 + a_2 + ... + a_n$.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMaskLengths\",\n [\"lengths\", \"mask\"],\n [\"masked_lengths\"]\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1,3,2], dtype=np.int32))\nworkspace.FeedBlob(\"mask\", np.array([False,True,True,False,True,True]))\nprint(\"lengths:\", workspace.FetchBlob(\"lengths\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_lengths:\", workspace.FetchBlob(\"masked_lengths\"))\n\n```\n\n**Result**\n\n```\n\nlengths: [1 3 2]\nmask: [False True True False True True]\nmasked_lengths: [0 2 2]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor containing segment lengths", + "name": "lengths" + }, + { + "description": "(*Tensor``*): A 1D bool tensor of values to keep.", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): 1D tensor of same type as inputs that contains the sequence", + "name": "masked_lengths" + } + ], + "support_level": "default" + }, + { + "name": "BooleanUnmask", + "description": "\nGiven a series of masks and values, reconstruct values together according to masks. A comprehensive example:\n```\nmask1 = True, False, True, False, False\nvalues1 = 1.0, 3.0\nmask2 = False, True, False, False, False\nvalues2 = 2.0\nmask3 = False, False, False, True, True\nvalues3 = 4.0, 5.0\n```\n\nReconstruct by:\n\n```\noutput = net.BooleanUnmask([mask1, values1, mask2, values2, mask3, values3], [\"output\"])\noutput = 1.0, 2.0, 3.0, 4.0, 5.0\n```\n\nNote that for all mask positions, there must be at least one True. This is not allowed:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = False, False\nvalues2 =\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\n```\n\nIf there are multiple True values for a field, we accept the first value, and no longer expect a value for that location:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = True, True\nvalues2 = 2.0\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\noutput = 1.0, 2.0\n```\n\n*** Note that we alternate `data` and `mask` inputs\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_unmask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanUnmask\",\n [\"mask1\", \"data1\", \"mask2\", \"data2\"],\n [\"unmasked_data\"]\n)\n\nworkspace.FeedBlob(\"mask1\", np.array([True,False,False,True,True,False]))\nworkspace.FeedBlob(\"data1\", np.array([1,4,5]))\nworkspace.FeedBlob(\"mask2\", np.array([False,True,True,False,False,True]))\nworkspace.FeedBlob(\"data2\", np.array([2,3,6]))\n\nprint(\"data1:\", workspace.FetchBlob(\"data1\"))\nprint(\"mask1:\", workspace.FetchBlob(\"mask1\"))\nprint(\"data2:\", workspace.FetchBlob(\"data2\"))\nprint(\"mask2:\", workspace.FetchBlob(\"mask2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"unmasked_data:\", workspace.FetchBlob(\"unmasked_data\"))\n\n```\n\n**Result**\n\n```\n\ndata1: [1 4 5]\nmask1: [ True False False True True False]\ndata2: [2 3 6]\nmask2: [False True True False False True]\nunmasked_data: [1 2 3 4 5 6]\n\n```\n\n
\n", + "inputs": [ + { + "description": "(*Tensor*): 1D input tensor(s)", + "name": "data" + }, + { + "description": "(*Tensor``*): 1D boolean mask tensor(s)", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the unmasked input tensor", + "name": "unmasked_data" + } + ], + "support_level": "default" + }, + { + "name": "BoxWithNMSLimit", + "description": "\nApply NMS to each class (except background) and limit the number of\nreturned boxes.\n", + "attributes": [ + { + "description": "(float) TEST.SCORE_THRESH", + "name": "score_thresh", + "option": "optional" + }, + { + "description": "(float) TEST.NMS", + "name": "nms", + "option": "optional" + }, + { + "description": "(int) TEST.DEECTIONS_PER_IM", + "name": "detections_per_im", + "option": "optional" + }, + { + "description": "(bool) TEST.SOFT_NMS.ENABLED", + "name": "soft_nms_enabled", + "option": "optional" + }, + { + "description": "(string) TEST.SOFT_NMS.METHOD", + "name": "soft_nms_method", + "option": "optional" + }, + { + "description": "(float) TEST.SOFT_NMS.SIGMA", + "name": "soft_nms_sigma", + "option": "optional" + }, + { + "description": "(float) Lower bound on updated scores to discard boxes", + "name": "soft_nms_min_score_thres", + "option": "optional" + }, + { + "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].", + "name": "rotated", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Scores, size (count, num_classes)", + "name": "scores" + }, + { + "description": "Bounding box for each class, size (count, num_classes * 4). For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle]. Size: (count, num_classes * 5).", + "name": "boxes" + }, + { + "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs/boxes belonging to the corresponding image in batch. Sum should add up to total count of scores/boxes.", + "name": "batch_splits" + } + ], + "outputs": [ + { + "description": "Filtered scores, size (n)", + "name": "scores" + }, + { + "description": "Filtered boxes, size (n, 4). For rotated boxes, size (n, 5), format [ctr_x, ctr_y, w, h, angle].", + "name": "boxes" + }, + { + "description": "Class id for each filtered score/box, size (n)", + "name": "classes" + }, + { + "description": "Output batch splits for scores/boxes after applying NMS", + "name": "batch_splits" + }, + { + "description": "Optional filtered indices, size (n)", + "name": "keeps" + }, + { + "description": "Optional number of filtered indices per class, size (num_classes)", + "name": "keeps_size" + } + ], + "support_level": "default" + }, + { + "name": "BRGNCHWCToPackedInt8BGRAStylizerDeprocess", + "support_level": "default" + }, + { + "name": "Broadcast", + "description": "\nDoes a broadcast operation from the root node to every other node. The tensor\non each node should have been pre-created with the same shape and data type.\n", + "attributes": [ + { + "description": "(int, default 0) the root to run broadcast from.", + "name": "root", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be broadcasted.", + "name": "X" + } + ], + "outputs": [ + { + "description": "In-place as input 1.", + "name": "X" + } + ], + "support_level": "default" + }, + { + "name": "Bucketize", + "description": "\nThis operator works as bucketize in tensorflow and digitize\nin numpy. It bucketizes the input 'X' based on argument 'boundaries'.\nFor each value x in input 'data', the operator returns index i given\nboundaries[i-1] < x <= boundaries[i].\nIf values in 'data' are beyond the bounds of boundaries, 0 or\nlen(boundaries) is returned as appropriate.\nThe boundaries need to be monotonically increasing.\nFor example\n\nIf data = [2, 4, 1] and boundaries = [0.1, 2.5], then\n\noutput = [1, 2, 1]\n\nIf data = [[2, 3], [4, 1], [2, 5]] and boundaries = [0.1, 2.5], then\n\noutput = [[1, 2], [2, 1], [1, 2]]\n\n", + "attributes": [ + { + "description": "bucketization boundaries", + "name": "boundaries", + "option": "optional" + } + ], + "inputs": [ + { + "description": "input tensor", + "name": "data" + } + ], + "outputs": [ + { + "description": "indices of bins given by boundaries to which each valuein data belongs", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "ByteWeightDequant", + "support_level": "default" + }, + { + "name": "Cast", + "description": "\nCasts the elements of a given input tensor to a data type specified by the `to`\nargument and returns an output tensor of the same size in the converted type.\nThe `to` argument must be one of the data types specified in the *DataType*\nenum field in the TensorProto message (see below). If the `to` argument is not\nprovided or is not one of the enumerated types in *DataType*, Caffe2 throws an\nEnforce error.\n\nNOTE: Casting from strings is not supported, and casting to strings is only\nsupported on CPU.\n\nTensorProto *DataType* field:\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cast_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cast\",\n [\"X\"],\n [\"Y\"],\n to=2\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32)*10)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[9.436466 5.8529844 0.54932857]\n [1.1583444 2.9936118 0.22950427]\n [3.9143739 3.4040766 8.905341 ]]\nY: [[9 5 0]\n [1 2 0]\n [3 3 8]]\n```\n\n
\n\n", + "attributes": [ + { + "description": "Data type to which the elements of the input tensor are cast. Strictly must be one of the types from *DataType* enum in TensorProto.", + "name": "to", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor to be cast.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor`<'to' type>`)* Output tensor with the same shape as input with type specified by the `to` argument.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Cbrt", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cbrt of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "CbrtGradient", + "support_level": "default" + }, + { + "name": "Ceil", + "description": "\nElement-wise application of the ceil function ($y=ceil(x)$) to the input tensor\n`X`. Output tensor shape is the same as the input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/ceil_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Ceil\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 8.44598 -6.5098248 -2.2993476 -7.6859694 0.58566964]\n [-7.846551 -0.03689406 6.9362907 -4.0521703 4.4969673 ]\n [ 0.33355865 -7.895527 -8.393201 9.374202 -2.3930092 ]\n [-6.3061996 3.1403487 3.782099 -8.516556 -2.8387244 ]\n [-2.0164998 4.7663913 -3.422966 0.3636999 8.75713 ]]\nX after running op:\n[[ 9. -6. -2. -7. 1.]\n [-7. -0. 7. -4. 5.]\n [ 1. -7. -8. 10. -2.]\n [-6. 4. 4. -8. -2.]\n [-2. 5. -3. 1. 9.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ChannelBackpropStats", + "description": "\nGiven an input tensor in NCHW format, the gradient for the output of SpatialBN\nand the per-channel mean and inverse std var vectors for the input, computes the\nper-channel bias and scale gradient to be used during the backward pass for\nsubsequent spatial batch normalization gradient calculation. Typically, the\nresults of this op are subsequently reduced over multiple devices to obtain\nstatistics over a larger batch size in cases where the batch size for a single\nmodel copy is too low to yield the full benefit of batch normalization. The\nresulting bias and scale can then be plugged back into SpatialBNGradient to get\nresults over the larger batch size ", + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW", + "name": "X" + }, + { + "description": "The mean saved from the forward pass as a 1-dimensional tensor of size C.", + "name": "mean" + }, + { + "description": "The saved inverse standard deviation as a 1-dimensional tensor of size C.", + "name": "inv_std" + }, + { + "description": "Gradient for the output layer of SpatialBN, here used as input because we are on the backward pass", + "name": "output_grad" + } + ], + "outputs": [ + { + "description": "Gradient for the scale vector", + "name": "scale_grad" + }, + { + "description": "Gradient for the bias vector", + "name": "bias_grad" + } + ], + "support_level": "default" + }, + { + "name": "ChannelShuffle", + "support_level": "default" + }, + { + "name": "ChannelShuffleGradient", + "support_level": "default" + }, + { + "name": "ChannelStats", + "description": "\nGiven an input tensor in NCHW format, computes the sum of all elements per\nchannel and the sum of all elements squared per channel. These values can be\nreduced across multiple batches and used to obtain the mean and variance across\nthe full set of batches. Using the new mean and variance as input to SpatialBN\nhas the effect of changing the batch size over which SpatialBN is applied.\n", + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output 1-dimensional tensor of size C containing the sum of elements of X per channel.", + "name": "sum" + }, + { + "description": "The output 1-dimensional tensor of size C containing the sum of elements squared per channel.", + "name": "sumsq" + } + ], + "support_level": "default" + }, + { + "name": "CheckAtomicBool", + "description": "Copy the value of an atomic to a bool", + "inputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + } + ], + "outputs": [ + { + "description": "Copy of the value for the atomic", + "name": "value" + } + ], + "support_level": "default" + }, + { + "name": "CheckCounterDone", + "description": "\nIf the internal count value <= 0, outputs true, otherwise outputs false.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: bool)* True if the internal count is zero or negative, otherwise False.", + "name": "done" + } + ], + "support_level": "default" + }, + { + "name": "CheckDatasetConsistency", + "description": "\nChecks that the given data fields represents a consistent dataset under\nthe schema specified by the `fields` argument. Operator fails if the fields\nare not consistent. If data is consistent, each field's data can be safely\nappended to an existing dataset, keeping it consistent.\n", + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Data for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + }, + { + "name": "Checkpoint", + "description": "\nThe Checkpoint operator is similar to the Save operator, but allows one to save\nto db every few iterations, with a db name that is appended with the iteration\ncount. It takes [1, infinity) number of inputs and has no output. The first\ninput has to be a TensorCPU of type int and has size 1 (i.e. the iteration\ncounter). This is determined whether we need to do checkpointing.\n", + "attributes": [ + { + "description": "(int, default 0) if set, use the db path directly and do not prepend the current root folder of the workspace.", + "name": "absolute_path", + "option": "optional" + }, + { + "description": "(string) a template string that one can combine with the iteration to create the final db name. For example, \"/home/lonestarr/checkpoint_%08d.db\"", + "name": "db", + "option": "optional" + }, + { + "description": "(string) the type of the db.", + "name": "db_type", + "option": "optional" + }, + { + "description": "(int, default 1) the checkpointing is carried out when (iter mod every) is zero.", + "name": "every", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Clip", + "description": "\nThis operator limits the given input within an interval. The interval is\nspecified by the `min` and `max` arguments. They default to\n*numeric_limits::lowest()* and *numeric_limits::max()* respectively. The\nclipping operation can be done in an in-place fashion by using the same output\nblob as the input blob.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/clip_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Clip\",\n [\"X\"],\n [\"Y\"],\n min=20.0,\n max=60.0\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\nX: [[45. 16. 59. 99. 48.]\n [12. 44. 46. 82. 28.]\n [ 1. 91. 18. 9. 71.]\n [24. 37. 61. 12. 81.]\n [36. 38. 30. 84. 40.]]\nY: [[45. 20. 59. 60. 48.]\n [20. 44. 46. 60. 28.]\n [20. 60. 20. 20. 60.]\n [24. 37. 60. 20. 60.]\n [36. 38. 30. 60. 40.]]\n```\n\n
\n\n", + "attributes": [ + { + "description": "Minimum value, under which element is replaced by min (default=*numeric_limits::lowest()*).", + "name": "min", + "option": "optional", + "type": "float32" + }, + { + "description": "Maximum value, under which element is replaced by max (default=*numeric_limits::max()*).", + "name": "max", + "option": "optional", + "type": "float32" + } + ], + "inputs": [ + { + "description": "*(Tensor``)* Input tensor within range [*numeric_limits::lowest()*, *numeric_limits::max()*].", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(Tensor``)* Output tensor clipped within range [`min`, `max`].", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ClipGradient", + "support_level": "default" + }, + { + "name": "ClipTensorByScaling", + "description": "\n Clips the input tensor by scaling based on the input value and the threshold.\n The value is usually the (pre-computed) norm of the tensor. If the value is\n larger than the threshold, scaling would be performed in this way:\n\n tensor *= (threshold / value).\n\n An optional input called additional_threshold can be provided which\n will scale the original threshold before it is used. That is,\n the final threshold will become threshold * additional_threshold.\n This op could be used for gradient clipping.\n", + "attributes": [ + { + "description": "Threshold to determine whether to scale down the tensor", + "name": "threshold", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of floats to be clipped.", + "name": "input_tensor" + }, + { + "description": "Value to be compared against the threshold", + "name": "val" + }, + { + "description": "An optional additional threshold to scale the original threshold", + "name": "additional_threshold" + } + ], + "outputs": [ + { + "description": "Tensor of floats, which is the same size as the input tensor, representing the clipped tensor.", + "name": "clipped" + } + ], + "support_level": "default" + }, + { + "name": "CloneCommonWorld", + "description": "\nClones existing common world.\n", + "inputs": [ + { + "description": "Existing common world to clone.", + "name": "existing_comm_world" + } + ], + "outputs": [ + { + "description": "A common world for collective operations.", + "name": "comm_world" + } + ], + "support_level": "default" + }, + { + "name": "CloseBlobsQueue", + "support_level": "default" + }, + { + "name": "CloseRebatchingQueue", + "description": "\nCloses the Queue.\n", + "inputs": [ + { + "description": "object representing the queue", + "name": "queue" + } + ], + "support_level": "default" + }, + { + "name": "Col2Im", + "support_level": "default" + }, + { + "name": "CollectAndDistributeFpnRpnProposals", + "description": "\nMerge RPN proposals generated at multiple FPN levels and then\ndistribute those proposals to their appropriate FPN levels for Faster RCNN.\nAn anchor at one FPN level may predict an RoI that will map to another level,\nhence the need to redistribute the proposals.\n\nOnly inference is supported. To train, please use the original Python\noperator in Detectron.\n\nInputs and outputs are examples only; if min/max levels change,\nthe number of inputs and outputs, as well as their level numbering,\nwill change.\n", + "attributes": [ + { + "description": "(int) ROI_CANONICAL_SCALE", + "name": "roi_canonical_scale", + "option": "optional" + }, + { + "description": "(int) ROI_CANONICAL_LEVEL", + "name": "roi_canonical_level", + "option": "optional" + }, + { + "description": "(int) ROI_MAX_LEVEL", + "name": "roi_max_level", + "option": "optional" + }, + { + "description": "(int) ROI_MIN_LEVEL", + "name": "roi_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_MAX_LEVEL", + "name": "rpn_max_level", + "option": "optional" + }, + { + "description": "(int) RPN_MIN_LEVEL", + "name": "rpn_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "rpn_post_nms_topN", + "option": "optional" + } + ], + "inputs": [ + { + "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn2" + }, + { + "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn3" + }, + { + "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn4" + }, + { + "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn5" + }, + { + "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn6" + }, + { + "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn2" + }, + { + "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn3" + }, + { + "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn4" + }, + { + "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn5" + }, + { + "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn6" + } + ], + "outputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + }, + { + "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn2" + }, + { + "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn3" + }, + { + "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn4" + }, + { + "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn5" + }, + { + "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.", + "name": "rois_idx_restore" + } + ], + "support_level": "default" + }, + { + "name": "CollectRpnProposals", + "description": "\n...\n", + "attributes": [ + { + "description": "(int) RPN_MAX_LEVEL", + "name": "rpn_max_level", + "option": "optional" + }, + { + "description": "(int) RPN_MIN_LEVEL", + "name": "rpn_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "rpn_post_nms_topN", + "option": "optional" + } + ], + "inputs": [ + { + "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn2" + }, + { + "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn3" + }, + { + "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn4" + }, + { + "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn5" + }, + { + "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn6" + }, + { + "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn2" + }, + { + "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn3" + }, + { + "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn4" + }, + { + "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn5" + }, + { + "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn6" + } + ], + "outputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + } + ], + "support_level": "default" + }, + { + "name": "CollectTensor", + "description": "\nCollect tensor into tensor vector by reservoir sampling,\nargument num_to_collect indicates the max number of tensors that will be\ncollected. The first half of the inputs are tensor vectors, which are also the\noutputs. The second half of the inputs are the tensors to be collected into each\nvector (in the same order). The input tensors are collected in all-or-none\nmanner. If they are collected, they will be placed at the same index in the\noutput vectors.\n", + "attributes": [ + { + "description": "The max number of tensors to collect", + "name": "num_to_collect", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "ColwiseMax", + "description": "\nCompute column-wise max reduction of the input tensor. This op takes one input, $X$, of shape $BxMxN$, where $B$ is the batch size, $M$ is number of rows, and $N$ is number of columns. The output of this op, $Y$, is a matrix of shape $BxN$, with one row for each element of the batch, and the same number of columns as the input tensor.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ColwiseMax\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X, simulating a batch of 2, 4x4 matricies\nX = np.random.randint(0,high=20,size=(2,4,4))\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[17 15 2 6]\n [ 8 12 6 0]\n [ 6 9 7 3]\n [ 4 13 16 13]]\n\n [[ 0 3 4 12]\n [18 1 17 12]\n [ 7 17 13 14]\n [12 17 2 1]]]\nY:\n [[17. 15. 16. 13.]\n [18. 17. 17. 14.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "A tensor of dimensions $B x M x N$ to compute columnwise-max. Here, $B$ is batch size, and $M$ and $N$ are the number of rows and columns of each element of the batch, respectively.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output tensor of shape $B x N$, where each row represents the column-wise maximums for that element of the input batch.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ColwiseMaxGradient", + "support_level": "default" + }, + { + "name": "ComputeOffset", + "description": "\nCompute the offsets matrix given cursor and data blobs. Need to be ran at\nbeginning or after reseting cursor\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nComputeOffset is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing offset info for this chunk.", + "name": "field_0" + } + ], + "support_level": "default" + }, + { + "name": "Concat", + "category": "Tensor", + "description": "\nConcatenate a list of tensors into a single tensor. Similar functionality to\nNumpy's [concatenate](https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html)\nfunction. The `axis` argument specifies what axis along which the arrays will be concatenated.\nWhen set to non-zero (default=0), the `add_axis` argument adds the axis specified in `axis` to\nall input tensors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n axis=0\n)\n\nworkspace.FeedBlob(\"X1\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"X2\", np.array([[5,6]]))\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[1 2]\n [3 4]]\nX2: [[5 6]]\nY: [[1 2]\n [3 4]\n [5 6]]\nsplit_info: [2 1]\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n add_axis=1,\n axis=3\n)\n\nworkspace.FeedBlob(\"X1\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nworkspace.FeedBlob(\"X2\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[[[1 8 3 9 0]\n [6 4 6 5 6]\n [3 9 1 9 9]\n [5 1 0 7 7]\n [9 4 0 0 9]]]]\nX2: [[[[7 0 2 6 1]\n [3 9 4 0 3]\n [5 3 8 9 4]\n [3 4 2 1 0]\n [0 8 8 8 1]]]]\nY: [[[[[1 8 3 9 0]\n [7 0 2 6 1]]\n\n [[6 4 6 5 6]\n [3 9 4 0 3]]\n\n [[3 9 1 9 9]\n [5 3 8 9 4]]\n\n [[5 1 0 7 7]\n [3 4 2 1 0]]\n\n [[9 4 0 0 9]\n [0 8 8 8 1]]]]]\nsplit_info: [1 1]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "description": "Order of blob dimensions. Concats on the C dimension.", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "description": "Pass non-zero integer to add the axis specified in `axis` to all input tensors.", + "name": "add_axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "name": "inputs", + "option": "variadic" + }, + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Concatenated tensor.", + "name": "concat_result" + }, + { + "description": "*(type: Tensor``)* The dimensions of the inputs.", + "name": "split_info" + } + ], + "support_level": "default" + }, + { + "name": "ConcatBatchMatMulBatchGatherOp", + "support_level": "default" + }, + { + "name": "ConcatTensorVector", + "description": "\nConcat Tensors in the std::unique_ptr >\nalong the first dimension.\n ", + "inputs": [ + { + "description": "std::unique_ptr >", + "name": "vector of Tensor" + } + ], + "outputs": [ + { + "description": "tensor after concatenating", + "name": "tensor" + } + ], + "support_level": "default" + }, + { + "name": "Conditional", + "description": "\nGiven a 1-D tensor of boolean values, apply conditional operator along the first\ndimension of DataT and DataF and return DataO. Note, DataT and DataF must\nhave the exact same shape and type.\n", + "inputs": [ + { + "description": "Boolean tensor to select DataT or DataF", + "name": "Condition" + }, + { + "description": "Data to use when True", + "name": "DataT" + }, + { + "description": "Data to use when False", + "name": "DataF" + } + ], + "outputs": [ + { + "description": "Output data after applying ConditionalOp", + "name": "DataO" + } + ], + "support_level": "default" + }, + { + "name": "ConditionalSetAtomicBool", + "description": "\nSet an atomic to true if the given condition bool variable is true\n ", + "inputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + }, + { + "description": "Blob containing a bool", + "name": "condition" + } + ], + "support_level": "default" + }, + { + "name": "ConstantFill", + "description": "\nThis operator fills the elements of the output tensor with a constant value\nspecified by the `value` argument.\n\n- The data type is specified by the `dtype` argument\n\n- Currently, the data types supported are *float*, *int32*, *int64*, and *bool*\n\n- If the `dtype` argument is not provided, the data type of `value` is used\n\n- The output tensor shape is either specified by the `shape` argument or will\nmatch the shape of the input tensor if one is provided (if an input tensor is\nprovided, a shape argument should not be set)\n\n- Optional additional dimensions can be appended at the end as specified by\n`extra_shape` argument\n\n- If `input_as_shape` is set to True, the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in `extra_shape`\nwill also be appended)\n\n- If a second input V is passed, fill the output with the first element of V\n\nWhen specifying `dtype` argument, use the integer keys from the *DataType* enum\nin TensorProto:\n\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [],\n [\"Y\"],\n shape=(1,5,5)\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nY: [[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]]\n```\n
\n\n
\n Example 2 \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [\"X\"],\n [\"Y\"],\n value=4.0,\n dtype=1,\n extra_shape=(1,2)\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[86. 30. 84.]\n [34. 51. 9.]\n [29. 86. 59.]]\nY: [[[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]]\n```\n\n
\n\n", + "attributes": [ + { + "description": "value to populate output tensor with.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from *DataType* enum in TensorProto.", + "name": "dtype", + "option": "optional", + "type": "int64" + }, + { + "description": "Shape of the output tensor. Cannot pass an input blob and this arg at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "Additional dimensions appended at the end of the shape indicated by the input blob. Cannot set thisargument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* [OPTIONAL] Input tensor to provide shape information.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor of constant values.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Conv", + "category": "Layer", + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "default": 0, + "name": "pad" + }, + { + "default": 1, + "name": "stride" + }, + { + "name": "exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Conv1D", + "description": "\nThe convolution operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Conv1DGradient", + "support_level": "default" + }, + { + "name": "Conv2D", + "description": "\nThe convolution operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Conv2DGradient", + "support_level": "default" + }, + { + "name": "Conv3D", + "description": "\nThe convolution operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Conv3DGradient", + "support_level": "default" + }, + { + "name": "ConvGradient", + "support_level": "default" + }, + { + "name": "ConvRelu", + "support_level": "default" + }, + { + "name": "ConvTranspose", + "category": "Layer", + "description": "\nThe ConvTranspose op takes an input data tensor $X$, an input weight tensor $filter$, and optionally an input bias tensor $bias$. It then computes the transposed convolution, sometimes referred to as deconvolution, and produces a single output tensor $Y$. The hyperparameters of the op such as kernel size, stride, and padding are specified as args. At each stride, the filter is deconvolved with a subset of $X$ and the $bias$ is added. This is done throughout the input data until the output computation is complete.\n\nThe output shapes are computed as follows. The number of channels in the output feature map is the number of kernels specified in the filter blob. The spatial height and width are computed as:\n\n$$H_{out} = (H_{in}-1)*strides[0] - 2*pads[0] + kernels[0]$$\n\n\n$$W_{out} = (W_{in}-1)*strides[1] - 2*pads[1] + kernels[1]$$\n\nNote on the implementation layout: conv_transpose_op_impl.h is the templated implementation of the conv_transpose_op.h file, which is why they are separate files. Also, in the implementation this operator inherits from the *ConvTransposeUnpoolOpBase* operator.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.h\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.cc\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_unpool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConvTranspose\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernels=[2,2],\n pads=[4,4,4,4],\n strides=[2,2]\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(2,3,5,5).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create filter: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,2,2).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (2, 3, 5, 5)\nFilter shape: (3, 1, 2, 2)\nBias shape: (1,)\nY:\n [[[[0.53606427 0.5775447 ]\n [0.40148795 1.5188271 ]]]\n\n\n [[[1.9903406 3.2794335 ]\n [0.09960175 0.31917763]]]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "description": "Should the legacy padding be VALID or SAME. When used, pads should not be used.", + "name": "legacy_pad", + "option": "optional", + "type": "int64" + }, + { + "description": "Desired kernel size. If left at default the kernel size will be inferred from the input $filter$ blob.", + "name": "kernels", + "option": "optional", + "type": "int64[]" + }, + { + "description": "Controls the stride of the kernel as it traverses the input blob.", + "name": "strides", + "option": "optional", + "type": "int64[]" + }, + { + "description": "Controls the amount of padding applied to the input feature map before computation.", + "name": "pads", + "option": "optional", + "type": "int64[]" + }, + { + "description": "", + "name": "adjs", + "option": "optional", + "type": "int64[]" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "default": 0, + "description": "", + "name": "shared_buffer", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "", + "name": "no_bias", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be operated on.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{out}, K_H, K_W)$, containing the filters to be used in the transposed convolution.", + "name": "filter" + }, + { + "description": "The bias blob, of length $C_{out}$, containing the biases for the operation, one bias per output channel. If not passed, biases assumed to be zeros.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the operation.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ConvTransposeGradient", + "support_level": "default" + }, + { + "name": "Copy", + "description": "\nCopy input tensor into output, potentially across devices.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Copy\",\n [\"input\"],\n [\"output\"]\n)\n\nworkspace.FeedBlob(\"input\", np.random.rand(3,3))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output:\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\noutput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): input tensor to copy", + "name": "input" + } + ], + "outputs": [ + { + "description": "(*Tensor*): copy of input tensor", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "CopyFromCPUInput", + "description": "\nTake a CPU input tensor and copy it to an output in the current\nContext (GPU or CPU). This may involves cross-device MemCpy.\n", + "inputs": [ + { + "description": "The input CPU tensor.", + "name": "input" + } + ], + "outputs": [ + { + "description": "either a TensorCUDA or a TensorCPU", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "CopyOnDeviceLike", + "description": "Copy input tensor into output to the specific device.", + "inputs": [ + { + "description": "The input tensor.", + "name": "input" + }, + { + "description": "Tensor, on which device the copy will be performed.", + "name": "dst" + } + ], + "outputs": [ + { + "description": "Tensor that will contain a copy of the input.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "CopyRowsToTensor", + "description": "\n This operator takes in a 2d tensor, a list of indices, and a 1d tensor\n with the same width of the 2d tensor. It will replace the rows in 2d\n tensor specified in indices with the 2d tensor. The operator does an\n in-place change to the input tensor.\n Example:\n INPUT_TENSOR = [[1, 2], [3, 4], [5, 6]]\n INDICES = [1]\n ROW = [9, 0]\n OUTPUT_TENSOR = [[1, 2], [9, 0], [5, 6]]\n ", + "inputs": [ + { + "description": "Input tensor needs to be modified.", + "name": "input_tensor" + }, + { + "description": "Indices of rows need to be copied", + "name": "indices" + }, + { + "description": "1-d tensor that is going to replace the rows", + "name": "row" + } + ], + "outputs": [ + { + "description": "updated tensor", + "name": "output_tensor" + } + ], + "support_level": "default" + }, + { + "name": "CopyRowsToTensorGradient", + "support_level": "default" + }, + { + "name": "Cos", + "description": "\nCalculates the cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cos_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cos\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.6816719 0.76771533 0.933932 0.01404487 0.11862425]\nY: [0.7765203 0.71949923 0.5946774 0.99990135 0.9929724 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cosine of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "CosGradient", + "support_level": "default" + }, + { + "name": "Cosh", + "description": "\nCalculates the hyperbolic cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cosh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cosh\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.66423494 0.32074615 0.81523746 0.90423071 0.39275789]\nY: [1.22883528 1.05188156 1.35112322 1.43744212 1.07812598]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic cosine values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "CoshGradient", + "support_level": "default" + }, + { + "name": "CosineEmbeddingCriterion", + "description": "\nCosineEmbeddingCriterion takes two inputs: the similarity value and\nthe label, and computes the elementwise criterion output as\n\n output = 1 - s, if y == 1\n max(0, s - margin), if y == -1\n", + "inputs": [ + { + "description": "The cosine similarity as a 1-dim TensorCPU.", + "name": "S" + }, + { + "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.", + "name": "Y" + } + ], + "outputs": [ + { + "description": "The output loss with the same dimensionality as S.", + "name": "loss" + } + ], + "support_level": "default" + }, + { + "name": "CosineEmbeddingCriterionGradient", + "support_level": "default" + }, + { + "name": "CosineSimilarity", + "description": "\nThis op takes two input float tensors of the same size, $X$ and $Y$, and produces one output float tensor , $Z$, calculated as the cosine similarity between $X$ and $Y$. Recall, the cosine similarity between two tensors $X$ and $Y$ is defined as:\n\n$$\\mathbf{Z}=CosineSimilarity(\\mathbf{X},\\mathbf{Y}) = \\frac{\\mathbf{X}\\cdot\\mathbf{Y}}{\\|\\mathbf{X}\\|\\|\\mathbf{Y}\\|} = \\frac{\\sum_n^{i=1}X_iY_i}{\\sqrt{\\sum_n^{i=1}X_i^2}\\sqrt{\\sum_n^{i=1}Y_i^2}}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CosineSimilarity\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = np.random.randn(3, 3)\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.random.randn(3, 3)\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.42635564 -0.23831588 -0.25515547]\n [ 1.43914719 -1.05613228 1.01717373]\n [ 0.06883105 0.33386519 -1.46648334]]\nY:\n [[-0.90648691 -0.14241514 -1.1070837 ]\n [ 0.92152729 -0.28115511 -0.17756722]\n [-0.88394254 1.34654037 -0.80080998]]\nZ:\n [-1.7849885e-23 1.7849885e-23 -1.0842022e-07]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor (must have the same shape as X)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "CosineSimilarityGradient", + "support_level": "default" + }, + { + "name": "CountDown", + "description": "\nIf the internal count value > 0, decreases count value by 1 and outputs False,\notherwise outputs True.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: bool)* False unless the internal count is zero.", + "name": "done" + } + ], + "support_level": "default" + }, + { + "name": "CountUp", + "description": "\nIncreases count value by 1 and outputs the previous value atomically.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* Count value BEFORE this operation.", + "name": "previous_count" + } + ], + "support_level": "default" + }, + { + "name": "CreateAtomicBool", + "description": "Create an unique_ptr blob to hold an atomic", + "outputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + } + ], + "support_level": "default" + }, + { + "name": "CreateBlobsQueue", + "support_level": "default" + }, + { + "name": "CreateBlobsQueueDB", + "description": "Create a DBReader from a BlobsQueue", + "attributes": [ + { + "description": "(default: -1 (no key)) index of blob for DB key in the BlobsQueue.", + "name": "key_blob_index", + "option": "optional" + }, + { + "description": "(default: 0) index of blob for DB value in the BlobsQueue.", + "name": "value_blob_index", + "option": "optional" + }, + { + "description": "(default: 0.0 (no timeout)) Timeout in seconds for reading from the BlobsQueue.", + "name": "timeout_secs", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The shared pointer to a queue containing Blobs.", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The DBReader for the given BlobsQueue", + "name": "reader" + } + ], + "support_level": "default" + }, + { + "name": "CreateCommonWorld", + "description": "\nCreates a common world for communication operators.\n", + "attributes": [ + { + "description": "(int) size of the common world.", + "name": "size", + "option": "optional" + }, + { + "description": "(int) rank of this node in the common world.", + "name": "rank", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Key/value handler for rendezvous (optional).", + "name": "kv_handler" + } + ], + "outputs": [ + { + "description": "A common world for collective operations.", + "name": "comm_world" + } + ], + "support_level": "default" + }, + { + "name": "CreateCounter", + "description": "\nCreates a count-down counter with initial value specified by the `init_count`\nargument.\n\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Initial count for the counter, must be >= 0.", + "name": "init_count", + "option": "optional", + "type": "int64" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a new counter.", + "name": "counter" + } + ], + "support_level": "default" + }, + { + "name": "CreateDB", + "support_level": "default" + }, + { + "name": "CreateMap", + "description": "Create an empty map blob", + "attributes": [ + { + "description": "Key's TensorProto::DataType (default INT32)", + "name": "key_dtype", + "option": "optional" + }, + { + "description": "Value's TensorProto::DataType (default INT32)", + "name": "value_dtype", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "support_level": "default" + }, + { + "name": "CreateMutex", + "description": "Creates an unlocked mutex and returns it in a unique_ptr blob.", + "outputs": [ + { + "description": "Blob containing a std::unique_ptr.", + "name": "mutex_ptr" + } + ], + "support_level": "default" + }, + { + "name": "CreateRebatchingQueue", + "description": "\nCreates the Queue.\n", + "attributes": [ + { + "description": "Number of input tensors the queue will support", + "name": "num_blobs", + "option": "optional" + }, + { + "description": "Maximal number of elements the queue can hold at any given point", + "name": "capacity", + "option": "optional" + } + ], + "outputs": [ + { + "description": "object representing the queue", + "name": "queue" + } + ], + "support_level": "default" + }, + { + "name": "CreateScope", + "description": "\n'CreateScope' operator initializes and outputs empty scope that is used\nby Do operator to store local blobs\n ", + "support_level": "default" + }, + { + "name": "CreateTensorVector", + "description": "Create a std::unique_ptr >", + "support_level": "default" + }, + { + "name": "CreateTextFileReader", + "description": "Create a text file reader. Fields are delimited by .", + "attributes": [ + { + "description": "Path to the file.", + "name": "filename", + "option": "optional" + }, + { + "description": "Number of passes over the file.", + "name": "num_passes", + "option": "optional" + }, + { + "description": "List with type of each field. Type enum is found at core.DataType.", + "name": "field_types", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Pointer to the created TextFileReaderInstance.", + "name": "handler" + } + ], + "support_level": "default" + }, + { + "name": "CreateTreeCursor", + "description": "\nCreates a cursor to iterate through a list of tensors, where some of those\ntensors contain the lengths in a nested schema. The schema is determined by\nthe `fields` arguments.\n\nFor example, to represent the following schema:\n\n Struct(\n a=Int(),\n b=List(List(Int)),\n c=List(\n Struct(\n c1=String,\n c2=List(Int),\n ),\n ),\n )\n\nthe field list will be:\n [\n \"a\",\n \"b:lengths\",\n \"b:values:lengths\",\n \"b:values:values\",\n \"c:lengths\",\n \"c:c1\",\n \"c:c2:lengths\",\n \"c:c2:values\",\n ]\n\nAnd for the following instance of the struct:\n\n Struct(\n a=3,\n b=[[4, 5], [6, 7, 8], [], [9]],\n c=[\n Struct(c1='alex', c2=[10, 11]),\n Struct(c1='bob', c2=[12]),\n ],\n )\n\nThe values of the fields will be:\n {\n \"a\": [3],\n \"b:lengths\": [4],\n \"b:values:lengths\": [2, 3, 0, 1],\n \"b:values:values\": [4, 5, 6, 7, 8, 9],\n \"c:lengths\": [2],\n \"c:c1\": [\"alex\", \"bob\"],\n \"c:c2:lengths\": [2, 1],\n \"c:c2:values\", [10, 11, 12],\n }\n\nIn general, every field name in the format \"{prefix}:lengths\" defines a domain\n\"{prefix}\", and every subsequent field in the format \"{prefix}:{field}\" will\nbe in that domain, and the length of the domain is provided for each entry of\nthe parent domain. In the example, \"b:lengths\" defines a domain of length 4, so\nevery field under domain \"b\" will have 4 entries.\nThe \"lengths\" field for a given domain must appear before any reference to\nthat domain.\n\nReturns a pointer to an instance of the Cursor, which keeps the current offset\non each of the domains defined by `fields`. Cursor also ensures thread-safety\nsuch that ReadNextBatch and ResetCursor can be used safely in parallel.\n\nA cursor does not contain data per se, so calls to ReadNextBatch actually need\nto pass a list of blobs containing the data to read for each one of the fields.\n", + "attributes": [ + { + "description": "A list of strings each one representing a field of the dataset.", + "name": "fields", + "option": "optional" + } + ], + "outputs": [ + { + "description": "A blob pointing to an instance of a new TreeCursor.", + "name": "cursor" + } + ], + "support_level": "default" + }, + { + "name": "CrossEntropy", + "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a $NxD$ dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = \\sum_j (label_{ij} * log(X_{ij}))$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([[0.,0.,0.,0.,1.],[0.,0.,1.,0.,0.]])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [[0. 0. 0. 0. 1.]\n [0. 0. 1. 0. 0.]]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input. $label$ is the same shape as $X$.", + "name": "label" + } + ], + "outputs": [ + { + "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "CrossEntropyGradient", + "support_level": "default" + }, + { + "name": "CTCBeamSearchDecoder", + "description": "Prefix beam search decoder for connectionist temporal classification.", + "attributes": [ + { + "description": "Maximum number of candidates to carry over to next activation step.", + "name": "beam_width", + "option": "optional" + }, + { + "description": "Probability threshold below which outputs are ignored.", + "name": "prune_threshold", + "option": "optional" + } + ], + "inputs": [ + { + "description": "3D float Tensor sized [max_activation_length, batch_size, alphabet_size] of network logits (before softmax application).", + "name": "INPUTS" + }, + { + "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size] seq_len will be set to max_time if not provided.", + "name": "SEQ_LEN" + } + ], + "outputs": [ + { + "description": "Output_len matrix size (batch_size * num_candidates). Each index stores lengths of candidates for its corresponding batch item.", + "name": "OUTPUT_LEN" + }, + { + "description": "Values vector, size (total_decoded_outputs). The flattened vector of final output sequences, in batch order.", + "name": "VALUES" + }, + { + "description": "Probability vector, size (total_decoded_outputs). Each index stores final output probability of its corresponding batch item.", + "name": "OUTPUT_PROB" + } + ], + "support_level": "default" + }, + { + "name": "CTCGreedyDecoder", + "description": "Greedy decoder for connectionist temporal classification.", + "attributes": [ + { + "description": "When merge_repeated is true, merge repeated classes in output.", + "name": "merge_repeated", + "option": "optional" + } + ], + "inputs": [ + { + "description": "3D float Tensor sized [max_time, batch_size, num_classes]", + "name": "INPUTS" + }, + { + "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size]seq_len will be set to max_time if not provided", + "name": "SEQ_LEN" + } + ], + "outputs": [ + { + "description": "Output_len matrix size (batch). The row store: [decoded_length]", + "name": "OUTPUT_LEN" + }, + { + "description": "Values vector, size (total_decoded_outputs). The vector stores the decoded classes", + "name": "VALUES" + } + ], + "support_level": "default" + }, + { + "name": "Cube", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cube of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "CubeGradient", + "support_level": "default" + }, + { + "name": "DataCouple", + "description": "\n\nA one to one operator that takes an arbitrary number of input and output blobs\nsuch that each input blob is inplace with it's matching output blob. It then proceedes\nto do nothing with each of these operators. This serves two purposes. It can make it\nappear as if a blob has been written to, as well as can tie together different blobs\nin a data dependency\n\n", + "support_level": "default" + }, + { + "name": "DBExists", + "description": "\nChecks if the db described by the arguments exists.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DBExists\",\n [],\n [\"exists\"],\n db_name=\"test_db\",\n db_type=\"leveldb\",\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"exists:\", workspace.FetchBlob(\"exists\"))\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "description": "Path to the db in question; see the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db_name", + "option": "optional", + "type": "string" + }, + { + "description": "Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional", + "type": "string" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Scalar boolean output tensor. True if the db exists, else false.", + "name": "exists" + } + ], + "support_level": "default" + }, + { + "name": "DenseVectorToIdList", + "description": "\nDenseVectorToIdList: Convert a blob with dense feature into a ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nInput is a single blob where the first dimension is the batch size and the\nsecond dimension is the length of dense vectors. This operator produces a\nID_LIST where out_values are the indices of non-zero entries\nand out_lengths are the number of non-zeros entries in each row.\n\n", + "inputs": [ + { + "description": "A data blob of dense vectors", + "name": "values" + } + ], + "outputs": [ + { + "description": "Lengths of the sparse feature", + "name": "out_lengths" + }, + { + "description": "Values of the sparse feature", + "name": "out_values" + } + ], + "support_level": "default" + }, + { + "name": "DepthConcat", + "description": "Backward compatible operator name for Concat.", + "support_level": "default" + }, + { + "name": "DepthSplit", + "description": "Backward compatible operator name for Split.", + "support_level": "default" + }, + { + "name": "DequeueBlobs", + "description": "\n Dequeue the blobs from queue.\n ", + "attributes": [ + { + "description": "Timeout in secs, default: no timeout", + "name": "timeout_secs", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The blob to store the dequeued data", + "name": "blob" + } + ], + "support_level": "default" + }, + { + "name": "DequeueRebatchingQueue", + "description": "\nDequeue Tensors from the Queue.\nIf the Queue is closed this might return less elements than asked.\nIf num_elements > 1 the returned elements will be concatenated into one\ntensor per component.\n", + "attributes": [ + { + "description": "Number of elements to dequeue. By default we dequeue one element.", + "name": "num_elements", + "option": "optional" + } + ], + "inputs": [ + { + "description": "object representing the queue", + "name": "rebatching_queue" + }, + { + "description": "First tensor to enqueue", + "name": "tensor" + } + ], + "support_level": "default" + }, + { + "name": "DestroyCommonWorld", + "description": "Closes all connections managed by a common world.", + "inputs": [ + { + "description": "The common world to be destroyed.", + "name": "common_world" + } + ], + "support_level": "default" + }, + { + "name": "DiagonalFill", + "description": "\nThe operator fills the diagonal elements of the output tensor (>= 2D)\nwith a constant value specified by the 'value' argument, and others 0. If\nnumber of dimensions of the output tensor is greater than 2, all dimensions\nmust be equal.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message. If the 'dtype' argument is not provided, the data type of\n'value' is used.\n\nThe output tensor shape is specified by the 'shape' argument. If the number of\ninput is 1, the shape will be identical to that of the input at run time with\noptional additional dimensions appended at the end as specified by 'extra_shape'\nargument. In that case the 'shape' argument should not be set.\n\nIf input_as_shape is set to true, then the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in extra_shape\nwill also be appended)\n\nNOTE: Currently, it supports data type of float, int32, int64, and bool.\n", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor.Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape", + "name": "input_as_shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor (optional) to provide shape information.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensorargument and its type is specified by the 'dtype' argument", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "DistributeFpnProposals", + "description": "\n...\n", + "attributes": [ + { + "description": "(int) ROI_CANONICAL_SCALE", + "name": "roi_canonical_scale", + "option": "optional" + }, + { + "description": "(int) ROI_CANONICAL_LEVEL", + "name": "roi_canonical_level", + "option": "optional" + }, + { + "description": "(int) ROI_MAX_LEVEL", + "name": "roi_max_level", + "option": "optional" + }, + { + "description": "(int) ROI_MIN_LEVEL", + "name": "roi_min_level", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + } + ], + "outputs": [ + { + "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn2" + }, + { + "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn3" + }, + { + "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn4" + }, + { + "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn5" + }, + { + "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.", + "name": "rois_idx_restore" + } + ], + "support_level": "default" + }, + { + "name": "Div", + "description": "\nPerforms element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Div\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[18,8],[2,9]]))\nworkspace.FeedBlob(\"B\", np.array([[9,2],[3,2]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[18 8]\n [ 2 9]]\nB:\n[[9 2]\n [3 2]]\nC:\n[[2 4]\n [0 4]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "DivGradient", + "support_level": "default" + }, + { + "name": "Do", + "description": "\n'Do' control operator, executes a subnet in a separate workspace.\nLast blobs in the input and output lists should be the same blob created with\nCreateScope op. Arguments 'inner_blobs' and 'outer_blobs_idx' provide a mapping\nbetween selected inner blob names and corresponding outer blob indices.\n ", + "attributes": [ + { + "description": "Subnet with blob bindings", + "name": "net", + "option": "optional" + }, + { + "description": "List of inner net blob names to bind to outer workspace", + "name": "inner_blobs", + "option": "optional" + }, + { + "description": "Indices of corresponding outer workspace blobs, in order: operator inputs, operator outputs (skipping workspace blobs)", + "name": "outer_blobs_idx", + "option": "optional" + }, + { + "description": "List of blobs from the forward Do operator workspace needed in backward pass, used in gradient Do operator", + "name": "saved_fwd_blobs", + "option": "optional" + }, + { + "description": "Whether to reuse workspace or create a new one in a given scope", + "name": "reuse_workspace", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "DotProduct", + "description": "\nComputes and outputs the dot product of the two input float tensors `X` and `Y`.\nNote that `X` and `Y` must be either 1D or 2D, and they must be the same shape.\nThe output tensor is 1D, which represents either the product of each element in\na respective dimension if the inputs are 1D, or the sum of the products in a\ngiven dimension if the inputs are 2D matrices. Note that the actual dot product\nis a scalar value, which is effectively the sum of the elements in the 1D\noutput tensor.\n\nFor 1D inputs:\nGiven two vectors $X = [x_0, x_1, x_2]$ and $Y = [y_0, y_1, y_2]$; $Z = [x_0 * y_0, x_1 * y_1, x_2 * y_2]$\n\nFor 2D inputs:\nGiven two matrices:\n$$X = [[x_0^0, x_1^0, x_2^0], \\\\ [x_0^1, x_1^1, x_2^1], \\\\ [x_0^2, x_1^2, x_2^2], \\\\ ..., \\\\ [x_0^n, x_1^n, x_2^n]]$$\n\nand\n\n$$Y = [[y_0^0, y_1^0, y_2^0], \\\\ [y_0^1, y_1^1, y_2^1], \\\\ [y_0^2, y_1^2, y_2^2], \\\\ ..., \\\\ [y_0^n, y_1^n, y_2^n]]$$\n\nthen\n\n$$Z = \\biggl[\\Big((x_0^0 * y_0^0) + (x_1^0 * y_1^0) + (x_2^0 * y_2^0)\\Big), \\\\ \\Big((x_0^1 * y_0^1) + (x_1^1 * y_1^1) + (x_2^1 * y_2^1)\\Big), \\\\ \\Big((x_0^2 * y_0^2) + (x_1^2 * y_1^2) + (x_2^2 * y_2^2)\\Big), \\\\ ..., \\\\ \\Big((x_0^n * y_0^n) + (x_1^n * y_1^n) + (x_2^n * y_2^n)\\Big)\\biggr]$$\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DotProduct\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(20, size=(5)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(20, size=(5)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"X\"))\n\n\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [ 2. 15. 2. 7. 12.]\nY:\n [ 3. 12. 9. 3. 18.]\nZ:\n [ 2. 15. 2. 7. 12.]\nX:\n [[2. 0. 4.]\n [7. 7. 4.]\n [7. 9. 9.]]\nY:\n [[2. 0. 8.]\n [9. 6. 1.]\n [7. 8. 0.]]\nZ:\n [ 36. 109. 121.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* 1D or 2D input tensor.", + "name": "X" + }, + { + "description": "*(type: Tensor``)* 1D or 2D input tensor (must have the same shape as X).", + "name": "Y" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 1D output tensor.", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "DotProductGradient", + "support_level": "default" + }, + { + "name": "DotProductWithPadding", + "description": "\nGiven two input float tensors X, Y with different shapes and produces one\noutput float tensor of the dot product between X and Y. We currently support\ntwo kinds of strategies to achieve this. Before doing normal dot_product 1)\npad the smaller tensor (using pad_value) to the same shape as the other one.\n2) replicate the smaller tensor to the same shape as the other one. Note the\nfirst dimension of X, Y must be equal. Only the second dimension of X or Y\ncan be padded.\n", + "attributes": [ + { + "description": "the padding value for tensors with smaller dimension", + "name": "pad_value", + "option": "optional" + }, + { + "description": "whether to replicate the smaller tensor or not", + "name": "replicate", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "DotProductWithPaddingGradient", + "support_level": "default" + }, + { + "name": "Dropout", + "category": "Dropout", + "description": "\n\n`Dropout` takes one input data tensor (`X`) and produces two tensor outputs, `Y` and\n`mask`. If the `is_test` argument is zero (default=0), the output `Y` will be the input\nwith random elements zeroed. The probability that a given element is zeroed is\ndetermined by the `ratio` argument.\n\nIf the `is_test` argument is set to non-zero, the output `Y` is exactly the same as the\ninput `X`. Note that outputs are scaled by a factor of $\\frac{1}{1-ratio}$ during\ntraining, so that during test time, we can simply compute an identity function. This\nscaling is important because we want the output at test time to equal the expected value\nat training time. Dropout has been proven to be an effective regularization technique to\nprevent overfitting during training.\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Dropout\",\n [\"X\"],\n [\"Y\"] + [\"mask\"],\n ratio=0.5,\n is_test=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(5, 5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\n```\n\n**Result**\n\n```\nX: [[5. 4. 3. 6. 9.]\n [2. 1. 8. 0. 9.]\n [7. 3. 0. 6. 3.]\n [1. 8. 2. 6. 4.]\n [6. 2. 6. 4. 0.]]\nY: [[ 0. 0. 0. 12. 18.]\n [ 0. 0. 16. 0. 0.]\n [ 0. 0. 0. 12. 6.]\n [ 0. 0. 4. 0. 0.]\n [12. 0. 0. 0. 0.]]\nmask: [[False False False True True]\n [False False True True False]\n [False False True True True]\n [False False True False False]\n [ True False False False False]]\n```\n\n
\n\n", + "attributes": [ + { + "default": 0.5, + "description": "Probability of an element to be zeroed.", + "name": "ratio", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "If zero (train mode), perform dropout. If non-zero(test mode), Y = X.", + "name": "is_test", + "type": "int64" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data" + }, + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output.", + "name": "output" + }, + { + "description": "*(type: Tensor``)* The output mask containing boolean values foreach element, signifying which elements are dropped out. If `is_test` isnonzero, this output is not filled.", + "name": "mask" + }, + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "DropoutGrad", + "support_level": "default" + }, + { + "name": "ElementwiseLinear", + "description": "\nThis op computes the elementwise linear combination of a batch of input vectors with a weight vector and bias vector. As input, the op takes an input tensor $X$ of shape $NxD$, a weight vector $w$ of length $D$, and a bias vector $b$ of length $D$. Here, $N$ represents the batch size and $D$ represents the length of the feature vectors. The output, $Y$, is a tensor of shape $NxD$ and is calculated as\n\n$$Y_{ij} = X_{ij}w_j + b_j \\ for \\ i\\in{N}, j\\in{D}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ElementwiseLinear\",\n [\"X\", \"w\", \"b\"],\n [\"Y\"]\n)\n\n// Create X\nX = np.array([[1,2,3,4,5],[6,8,9,16,10]])\nprint(\"X:\\n\",X)\n\n// Create w\nw = np.array([1,1/2.,1/3.,1/4.,1/5.])\nprint(\"w:\\n\",w)\n\n// Create b\nb = np.array([1.,1.,1.,1.,1.])\nprint(\"b:\\n\",b)\n\n\n// Feed X & w & b into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"w\", w.astype(np.float32))\nworkspace.FeedBlob(\"b\", b.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 1 2 3 4 5]\n [ 6 8 9 16 10]]\nw:\n [1. 0.5 0.33333333 0.25 0.2]\nb:\n [1. 1. 1. 1. 1.]\nY:\n [[2. 2. 2. 2. 2.]\n [7. 5. 4. 5. 3.]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs; defaults to one because the 0th axis most likely describes the batch size.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "2D input tensor of size $NxD$. This input represents the input data to be operated on.", + "name": "X" + }, + { + "description": "1D scaling factors, or weights, of size $D$. This input contains the weights that will be multiplied by the data.", + "name": "w" + }, + { + "description": "1D biases of size $D$. This input contains the biases that will be added to the products of the weights and data.", + "name": "b" + } + ], + "outputs": [ + { + "description": "2D output tensor of size $NxD$. Calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ElementwiseLinearGradient", + "support_level": "default" + }, + { + "name": "Elu", + "description": "\n\nThis op implements the exponential linear unit (ELU) activation function as described in [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289). The op takes an input tensor $X$ of arbitrary shape, computes the elementwise elu operation, and returns a vector $Y$ of the same shape as output. The alpha parameter may be passed as an argument, but defaults to 1. The elu operation is defined as\n\n$$y=f(x) =\\begin{cases}\\alpha(e^x-1) & x < 0 \\\\ x & otherwise\\end{cases}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Elu\",\n [\"X\"],\n [\"Y\"],\n alpha=1.1\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 0.35339102 1.1860217 -0.10710736]\n [-3.1173866 -0.1889988 -0.20330353]\n [ 1.8525308 -0.368949 0.506277 ]]\n\nY:\n [[ 0.35339102 1.1860217 -0.11172786]\n [-1.0513 -0.18943374 -0.20236646]\n [ 1.8525308 -0.33939326 0.506277 ]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 1.0, + "description": "Defines alpha parameter used in calculation.", + "name": "alpha", + "option": "optional", + "type": "float32" + } + ], + "inputs": [ + { + "description": "1D input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor, calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "EluGradient", + "description": "\nEluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + }, + { + "name": "EnforceFinite", + "description": "\nRaise if there is NaN or Inf values in the input tensor.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "support_level": "default" + }, + { + "name": "EnqueueBlobs", + "support_level": "default" + }, + { + "name": "EnqueueRebatchingQueue", + "description": "\nEnqueues Tensors into the queue.\nNumber of input tensors should be equal to the number of components passed\nduring creation of the queue.\nIf the Queue is closed this operation will fail.\nIf enqueue_batch argument is set. We will split the input tensors by the\nfirst dimension to produce single queue elements.\n", + "attributes": [ + { + "description": "Are we enqueuing a batch or just a single element. By default we enqueue single element.", + "name": "enqueue_batch", + "option": "optional" + } + ], + "inputs": [ + { + "description": "object representing the queue", + "name": "queue" + }, + { + "description": "First tensor to enque. ", + "name": "tensor" + } + ], + "support_level": "default" + }, + { + "name": "EnsureClipped", + "description": "\nGiven a tensor, apply clip after gradient is applied; when the param is sparse as\nindicated by valid indices and grad, in-place is required\n", + "inputs": [ + { + "description": "Parameters to be normalized", + "name": "param" + }, + { + "description": "Sparse indices, only needed for sparse param", + "name": "indices" + }, + { + "description": "Gradient computed, only needed for sparse param", + "name": "grad" + } + ], + "outputs": [ + { + "description": "param ensured to be clipped within range", + "name": "output_param" + } + ], + "support_level": "default" + }, + { + "name": "EnsureCPUOutput", + "description": "\nThis Op always create TensorCPU output, and may involves cross-device MemCpy.\nUnder CPU Context, this Op takes TensorCPU as input. Under the CUDA Context,\nthis Op accepts either CUDA or CPU Tensor input.\n", + "inputs": [ + { + "description": "The input CUDA or CPU tensor.", + "name": "input" + } + ], + "outputs": [ + { + "description": "TensorCPU that is a copy of the input.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "EnsureDense", + "description": "\nThis operator converts dense or sparse gradients to dense ones.\nTherefore, sparse gradient can be back propagated to Operators that consume\ndense gradients only (e.g., FCGradient).\n\nThe operator's behaviors:\n\n- In forward, simply pass in place or copy input to the output.\n- In backward, if the gradient passed-in is sparse gradient, change it to dense gradient in linear time; otherwise, simply pass the dense gradient.\n", + "inputs": [ + { + "description": "Input tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "EQ", + "description": "\nPerforms element-wise equal to comparison **==** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"EQ\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False False True True False]\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "Erf", + "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arcsine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "ErfGradient", + "support_level": "default" + }, + { + "name": "Exp", + "description": "\nCalculates the exponential of the given input tensor ($exp(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/exp_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Exp\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.5821691 0.07719802 0.50159824]\n [0.40952456 0.36788362 0.84887683]\n [0.02472685 0.65730894 0.9066397 ]]\nX after running op:\n[[1.7899168 1.080256 1.6513585]\n [1.5061016 1.4446739 2.3370204]\n [1.0250351 1.9295927 2.4759884]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* The exponential of the input tensor computed element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Expand", + "description": "\n Broadcast the input tensor to a materialized new tensor using given shape.\n Broadcast rule is similar to \"numpy.array(input) * numpy.ones(shape)\":\n Dimensions are right alignment;\n Two corresponding dimensions must have the same value, or one of them\n equals to 1.\n In order to align with PyTorch's `expand`, `shape` is allowed to have entries\n equal to -1, which means to preserve the size of the corresponding dimension\n in `X` (so it's actually equivalent to equal to 1).\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): expand shape", + "name": "shape" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): expanded tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ExpandDims", + "description": "\nThe *ExpandDims* op inserts single-dimensional entries into the shape of the input tensor *data,* and produces a single output tensor *expanded*. The op also takes an argument *dims* with a list of dimensions for where to add the single dimensional entries. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *Squeeze*.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ExpandDims\",\n [\"data\"],\n [\"expanded\"],\n dims=[0,1],\n)\n\nworkspace.FeedBlob(\"data\", np.zeros((100,100)).astype(np.float32))\nprint(\"data.shape:\", workspace.FetchBlob(\"data\").shape)\n\nworkspace.RunOperatorOnce(op)\nprint(\"expanded.shape:\", workspace.FetchBlob(\"expanded\").shape)\n\n```\n\n**Result**\n\n```\n\ndata.shape: (100, 100)\nexpanded.shape: (1, 1, 100, 100)\n\n```\n\n
\n\n\n\n", + "attributes": [ + { + "description": "List of dimensions of *data* to add single dimensional entry.", + "name": "dims", + "option": "optional", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "expanded" + } + ], + "support_level": "default" + }, + { + "name": "ExpandGradient", + "support_level": "default" + }, + { + "name": "Fail", + "support_level": "default" + }, + { + "name": "FbFCPacked", + "description": "Same as FC,\n but the weight is prepacked as a fbgemm::PackedGemmMatrixFP16", + "support_level": "default" + }, + { + "name": "FbGemmPack", + "description": "Prepack weight for fbgemm", + "inputs": [ + { + "description": "row major format weight matrix", + "name": "X" + } + ], + "outputs": [ + { + "description": "Block row major packed format weight matrix", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "FbGemmPackTranspose", + "description": "Prepack weight for fbgemm", + "inputs": [ + { + "description": "col major format weight matrix", + "name": "X" + } + ], + "outputs": [ + { + "description": "Block col major packed format weight matrix", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "FC", + "category": "Layer", + "description": "\nThe FC operator computes an output $(Y)$ as a linear combination of the input data blob $(X)$ with a weight blob $(W)$ and bias blob $(b)$. More formally,\n\n$$Y = XW^T+b$$\n\nHere, $X$ is a matrix of shape $(M,K)$, $W$ is a matrix of shape $(N,K)$, $b$ is a vector of length $N$, and $Y$ is a matrix of shape $(M,N)$. $N$ can be thought of as the number of nodes in the layer, $M$ is the batch size, and $K$ is the number of features in an input observation.\n\n*NOTE: $X$ does not need to explicitly be a 2-dimensional matrix, however, if it is not it will be coerced into one. For an arbitrary $n$-dimensional tensor $X$, e.g. $[a_0, a_1, \\ldots ,a_{k-1}, a_k, \\ldots , a_{n-1}]$, where $a_i$ in $N$, and $k$ is the $axis$ arg provided, then $X$ will be coerced into a 2-dimensional tensor with dimensions $[a_0 * \\ldots * a_{k-1}, a_k * \\ldots * a_{n-1}]$. For the default case where axis=1, this means the $X$ tensor will be coerced into a 2D tensor of dimensions $[a_0, a_1 * \\ldots * a_{n-1}]$, where $a_0$ is often the batch size. In this situation, we must have $a_0 = M$ and $a_1 * \\ldots * a_{n-1} = K$. Lastly, even though $b$ is a vector of length $N$, it is copied and resized to shape $(M x N)$ implicitly, then added to each vector in the batch.*\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\n// In this example, our batch size is 1 (M=1), the input observation will have\n// 6 features (K=6), and the layer will have one hidden node (N=1). The\n// expected output is Y=7.\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FC\",\n [\"X\", \"W\", \"b\"],\n [\"Y\"]\n)\n\n// Create X: MxK\ndata = np.array([1,2,3,4,5,6]).astype(np.float32)\ndata = data[np.newaxis,:]\n\n// Create W: NxK\nweights = np.array(np.array([1,1/2.,1/3.,1/4.,1/5.,1/6.])).astype(np.float32)\nweights = weights[np.newaxis,:]\n\n// Create b: N\nbias = np.array([1.]).astype(np.float32)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"W\", weights)\nworkspace.FeedBlob(\"b\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nY:\n [[7.]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the input data $X$. Defaults to one because in the common case when the input $X$ has shape $(M,K)$, the first axis encodes the batch size.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Describes the axis of the input weight matrix $W$. Defaults to one because the first axis most likely describes the batch_size.", + "name": "axis_w", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "Whether to use float-16 compute kernel.", + "name": "float16_compute", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Input blob to be coerced into a 2D matrix of shape $(M,K)$, where $M$ is the batch size and $K$ is the number of features in a single observation.", + "name": "X" + }, + { + "description": "Input blob to be coerced into a 2D matrix of shape $(N,K)$ describing a fully connected weight matrix. Here, $K$ is the number of features in a single observation and $N$ is the number of nodes in the FC layer.", + "name": "W" + }, + { + "description": "Input blob containing vector of length $N$ which describes one bias for each node in the layer.", + "name": "b" + } + ], + "outputs": [ + { + "description": "Output blob containing a 2D output matrix of shape $(M,N)$, where $M$ is the batch size and $N$ is the number of nodes in the layer. The output is calculated as $Y=XW^T+b$.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "FCGradient", + "support_level": "default" + }, + { + "name": "FCTransposed", + "description": "\nSame as FC, but weight matrix is supposed to be already pretransposed.\nFCTransposed stands for calling blass with no noTrans, noTrans\n", + "support_level": "default" + }, + { + "name": "FCTransposedGradient", + "support_level": "default" + }, + { + "name": "FeedBlob", + "description": "\nFeedBlobs the content of the blobs. The input and output blobs should be\none-to-one inplace.", + "attributes": [ + { + "description": "(string) if provided then we will use this string as the value for theprovided output tensor", + "name": "value", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "FileStoreHandlerCreate", + "description": "\nCreates a unique_ptr that uses the filesystem as backing\nstore (typically a filesystem shared between many nodes, such as NFS).\nThis store handler is not built to be fast. Its recommended use is for\nintegration tests and prototypes where extra dependencies are\ncumbersome. Use an ephemeral path to ensure multiple processes or runs\ndon't interfere.\n", + "attributes": [ + { + "description": "base path used by the FileStoreHandler", + "name": "path", + "option": "optional" + }, + { + "description": "prefix for all keys used by this store", + "name": "prefix", + "option": "optional" + } + ], + "outputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "support_level": "default" + }, + { + "name": "Find", + "description": "\nFinds elements of second input from first input,\noutputting the last (max) index for each query.\nIf query not find, inserts missing_value.\nSee IndexGet() for a version that modifies the index when\nvalues are not found.\n", + "attributes": [ + { + "description": "Placeholder for items that are not found", + "name": "missing_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Index (integers)", + "name": "index" + }, + { + "description": "Needles / query", + "name": "query" + } + ], + "outputs": [ + { + "description": "Indices of the needles in index or 'missing value'", + "name": "query_indices" + } + ], + "support_level": "default" + }, + { + "name": "FindDuplicateElements", + "description": "\nThe *FindDuplicateElements* op takes a single 1-D tensor *data* as input and returns a single 1-D output tensor *indices*. The output tensor contains the indices of the duplicate elements of the input, excluding the first occurrences. If all elements of *data* are unique, *indices* will be empty.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FindDuplicateElements\",\n [\"data\"],\n [\"indices\"],\n)\n\nworkspace.FeedBlob(\"data\", np.array([8,2,1,1,7,8,1]).astype(np.float32))\nprint(\"data:\\n\", workspace.FetchBlob(\"data\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"indices: \\n\", workspace.FetchBlob(\"indices\"))\n\n```\n\n**Result**\n\n```\n\ndata:\n [8. 2. 1. 1. 7. 8. 1.]\nindices:\n [3 5 6]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "a 1-D tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Indices of duplicate elements in data, excluding first occurrences.", + "name": "indices" + } + ], + "support_level": "default" + }, + { + "name": "Flatten", + "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n$(d_0, d_1, ..., d_n)$ then the output will have shape\n$\\bigl((d_0 * d_1 * ... * d_{(axis-1)}), (d_{axis} * d_{(axis+1)} * ... * d_n)\\bigr)$.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/flatten_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Flatten\",\n [\"X\"],\n [\"Y\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(1,3,2,2))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[[[0.53432311 0.23734561]\n [0.56481598 0.52152617]]\n\n [[0.33662627 0.32472711]\n [0.17939016 0.97175851]]\n\n [[0.87226421 0.49045439]\n [0.92470531 0.30935077]]]]\nY: [[0.53432311 0.23734561 0.56481598 0.52152617 0.33662627 0.32472711\n 0.17939016 0.97175851 0.87226421 0.49045439 0.92470531 0.30935077]]\n```\n\n
\n\n", + "attributes": [ + { + "default": 1, + "description": "Indicates up to which input dimensions (exclusive) should be flattened to the outer dimension of the output.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input Tensor of rank >= axis.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* A 2D tensor with the contents of the input tensor, with input dimensions up to `axis` flattened to the outer dimension of the output and the remaining input dimensions flattened into the inner dimension of the output.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "FlattenToVec", + "description": "\n\nThe *FlattenToVec* op flattens the input tensor into a 1-D vector. The op accepts a single input tensor and returns a single output tensor.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FlattenToVec\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[ 1. 2. 3.]\n [ 4. 5. 6.]\n [ 7. 8. 9.]\n [10. 11. 12.]]\noutput:\n [ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "A tensor of rank >= 1.", + "name": "input" + } + ], + "outputs": [ + { + "description": "A tensor of rank 1 (vector) with the contents of the input tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FlexibleTopK", + "description": "\nGiven two tensors: X and K,\nretrieve the top K[..., 1] elements from X on the last dimension.\nX is an input tensor of shape [a_1, a_2, ..., a_n, r].\nK is an input tensor of shape [a_1, a_2, ..., a_n, 1],\nwhere for each element, r >= K[..., 1] > 0\nOutput two outputs:\n-Flatten values tensor of shape [ \\sum_i K[i, 1] ] which contains the values of\n the top K[..., 1] elements along the last dimension\n-Flatten indices tensor of shape [ \\sum_i K[i, 1] ] which contains the indices\n of the top K[..., 1] elements, flatten indices from the input tensor).\nThese two outputs should be used with the input K, so that we know which indices\nin X are picked.\n\nGiven two equivalent values, this operator uses the indices along the last dim-\nension as a tiebreaker. That is, the element with the lower index will appear\nfirst.\n ", + "inputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]", + "name": "X" + }, + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, 1]", + "name": "K" + } + ], + "outputs": [ + { + "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing top K[..., 1] values from the input tensor", + "name": "Flatten values" + }, + { + "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing the indices into the flatten input", + "name": "Flatten indices" + } + ], + "support_level": "default" + }, + { + "name": "FlexibleTopKGradient", + "support_level": "default" + }, + { + "name": "Float16ConstantFill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "option": "optional" + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Output tensor of constant values specified by 'value'", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Float16SparseNormalize", + "description": "\nGiven a sparse matrix, apply max_norm or constant_norm sparse regularization.\n", + "attributes": [ + { + "description": "A bool variable to control whether to use max norm or constant norm. When use_max_norm = false, constant norm is used so that all the embedding vectors are scaled to have a L2 norm equals to A (see blow argument norm=A). If use_max_norm = true, max norm is used so that embedding is scaled so that its l2 norm is no larger than A. If an embedding's norm is less than A originally, the embedding is left unchanged. The default is True.", + "name": "use_max_norm", + "option": "optional" + }, + { + "description": "L2 norm of the embedding. The default is 1.0.", + "name": "norm", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be normalized", + "name": "param" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed (optional - not used, this argument is for backwards compatibility)", + "name": "grad" + } + ], + "outputs": [ + { + "description": "Normalized parameters", + "name": "output_param" + } + ], + "support_level": "default" + }, + { + "name": "Float16UniformFill", + "description": "Fills a half float tensor of a specified shape with values from a uniform distribution[min,max]", + "attributes": [ + { + "description": "Shape of the tensor", + "name": "shape", + "option": "optional" + }, + { + "description": "Minimim value to generate", + "name": "min", + "option": "optional" + }, + { + "description": "Maximum value to generate", + "name": "max", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused2BitFakeRowwiseQuantized", + "description": "\nApplies 2-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused2BitRowwiseQuantized", + "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused4BitFakeRowwiseQuantized", + "description": "\nApplies 4-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused4BitRowwiseQuantized", + "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused8BitRowwiseQuantized", + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.\nFor N-dimensional input tensor, the first N-1 dimensions are interpreted as\nrows and the last dimension is interpreted as a column. For example, an\ninput tensor with dimension 5x2x4 is interpreted as 10 rows and 4 columns.\n)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFused8BitRowwiseQuantizedHalfScaleBias", + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a half float storing the scale\nfollowed by another half float containing the scale.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToFusedRandRowwiseQuantized", + "description": "\nApplies row-wise stochastic/random quantization by determining the range of\neach row in the input matrix, and then quantize each element to one of two\nclosest discrete levels by randomly drawing Bernoulli distribution.\nThe method is extended from TernGrad [1],\nwhich randomly quantizes gradients to three levels to reduce communication in distributed training.\nThe format of each row (x) in the output matrix is [bitwidth][tail][min][max][data]:\nbitwidth[1 Byte]: bitwidth per data [1, 2, 4 or 8];\ntail[1 Byte]: the number of unused buckets [1-8] (One byte is split to 8/bitwidth buckets and each bucket stores one low-precision data in bitwidth bits);\nmin[4 Bytes]: the minimum floating value min(x);\nmax[4 Bytes]: the maximum floating value max(x);\ndata: quantized data.\nThe quantization is uniform with levels q = min + (max-min)/(2^bitwidth - 1)*[0:1:2^bitwidth].\nDuring stochastic/random quantization x'=Quantize(x), for q_j < x_i <= q_{j+1}, we draw quantization x'_i from Bernoulli distributions with\nP(x'_i = q_{j+1}) = (x_i - q_j)/(q_{j+1} - q_j), and\nP(x'_i = q_j) = (q_{j+1} - x_i)/(q_{j+1} - q_j) where x'_i is the quantized value of x_i.\n[1] proved E{x'_i}=x_i, which is an unbiased approximation. More details are in the paper.\nFor example, suppose targeted bitwidth = 2 and x = [0.3, -1.4, -0.6, 0.9, 1.0],\nthen tail = 3, min = -1.4, max = 1.0 and q = [-1.4, -0.6, 0.2, 1.0].\nx_1 = 0.3 will be quantized to x'_1 = 0.2 with probability 7/8 and to x'_1 = 1.0 with probability 1/8.\nThe storage format of quantized data is: [x'_1|x'_3|x'_5|xxx]-[x'_2|x'_4|xxx|xxx].\nIn general, a input row is split to multiple segments. One segment is a continuous subarray of the row,\nand its length is the number of bytes storing quantized data in the output matrix.\nThe b-th bucket of the i-th byte stores the i-th data of the b-th segment of input row.\n\n[1] Wen, Wei, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li.\n\"Terngrad: Ternary gradients to reduce communication in distributed deep learning.\"\nIn Advances in Neural Information Processing Systems, pp. 1508-1518. 2017.\n\n", + "attributes": [ + { + "description": "How many bits to quantize per data (defaults to 8).", + "name": "bitwidth", + "option": "optional" + }, + { + "description": "random or not (True). False is set up for unittest.", + "name": "random", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused bitwidth, tail, min, max and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "FloatToHalf", + "support_level": "default" + }, + { + "name": "FloatToRowwiseQuantized8Bits", + "description": "\nThis operator applies 8Bit row-wise quantization to\ninput tensor and returns quantized tensor. Row wise quantization of\ninput tensor is the following process. We take tensor of size\n(m_1, m_2,...,m_n), n >= 2, reshape it into matrix of size\n(m_1, m_2 x... x m_n) and apply row-wise quantization. After this,\nwe compute scale_i= (min_i - max_i) / 255 and bias_i = min_i for\ni-th row r_i of reshaped matrix, where min_i and max_i -- minimum\nand maximum elements of i-th row, and quantize each element r_{ij} as\n0 <= round(r_ij - bias_i) / scale_i) < 256. Instead of input tensor\nwe obtain uint8 tensor and auxiliary information as scale and bias to\nrestore input tensor (with losses).\n", + "inputs": [ + { + "description": "input", + "name": "input" + } + ], + "outputs": [ + { + "description": "quantized_input", + "name": "quantized_input" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i", + "name": "scale_bias" + } + ], + "support_level": "default" + }, + { + "name": "Floor", + "description": "\nElement-wise application of the floor function ($y=floor(x)$) to the input\ntensor `X`. Output tensor shape is the same as the input tensor. This\noperator can be used in an in-place fashion by using the same input blob as the\noutput blob.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/floor_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Floor\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 3.813361 -1.319647 5.2089314 -4.931328 0.6218652 ]\n [ 7.2757645 5.5552588 5.785643 -2.4790506 -0.41400087]\n [ 1.1541046 -6.933266 3.3754056 1.6569928 -1.7670316 ]\n [-3.4932013 4.891472 1.5530115 -3.2443287 -4.605099 ]\n [-4.574543 -7.360948 5.91305 -8.196495 -5.357458 ]]\nX after running op:\n[[ 3. -2. 5. -5. 0.]\n [ 7. 5. 5. -3. -1.]\n [ 1. -7. 3. 1. -2.]\n [-4. 4. 1. -4. -5.]\n [-5. -8. 5. -9. -6.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Free", + "description": "\nFrees the content of the blobs. The input and output blobs should be\none-to-one inplace.", + "support_level": "default" + }, + { + "name": "Ftrl", + "support_level": "default" + }, + { + "name": "Fused2BitRowwiseQuantizedToFloat", + "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused2BitRowwiseQuantizedToHalf", + "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused4BitRowwiseQuantizedToFloat", + "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused4BitRowwiseQuantizedToHalf", + "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToFloat", + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToHalfFloat", + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused8BitRowwiseQuantizedToFloat", + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + }, + { + "name": "Fused8BitRowwiseQuantizedToHalfFloat", + "description": "\nDe-quantizes the result of the\nHalfFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + }, + { + "name": "FusedRandRowwiseQuantizedToFloat", + "description": "\nDe-quantizes the result of the FloatToFusedRandRowwiseQuantized operator.\nRefer FloatToFusedRandRowwiseQuantized operator for details.\n", + "inputs": [ + { + "description": "Fused bitwidth, tail, min, max and quantized data", + "name": "quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_input" + } + ], + "support_level": "default" + }, + { + "name": "Gather", + "category": "Transform", + "description": "\n\nThe *Gather* op accepts a *DATA* tensor of rank $r >= 1$ and *INDICES* tensor of rank $q$ as inputs. It then gathers entries of the outer-most dimension of *DATA*, indexed by *INDICES*, and concatenate them in an output tensor of rank $q + (r - 1)$.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Gather\",\n [\"DATA\", \"INDICES\"],\n [\"OUTPUT\"]\n)\ndata = np.array([[1., 1.2],[2.3, 3.4],[4.5, 5.7]])\nprint(\"DATA:\\n\",data)\n\ninds = np.array([[0, 1],[1, 2]])\nprint(\"INDICES:\\n\",inds)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"DATA\", data.astype(np.float32))\nworkspace.FeedBlob(\"INDICES\", inds.astype(np.int32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT:\\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [[1. 1.2]\n [2.3 3.4]\n [4.5 5.7]]\nINDICES:\n [[0 1]\n [1 2]]\nOUTPUT:\n [[[1. 1.2]\n [2.3 3.4]]\n\n [[2.3 3.4]\n [4.5 5.7]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor of rank $r>=1$", + "name": "DATA" + }, + { + "description": "Input indices tensor of rank $q$. This tensor must contain integers.", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "Output tensor of rank $q+(r-1)$", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "GatherByKey", + "description": "\nInverse operation of Partition.\n\nTakes the original, full 'keys' tensor followed by sharded value tensors,\nand returns the full value tensor, combined using the same hash used in\nPartition.\n", + "inputs": [ + { + "description": "The first input is the full keys tensor (same as the first input of Partition).", + "name": "keys" + }, + { + "description": "Subsequented inputs are sharded values tensors.", + "name": "sharded_values" + } + ], + "outputs": [ + { + "description": "Reconstructed values tensor.", + "name": "values" + } + ], + "support_level": "default" + }, + { + "name": "GatherFused8BitRowwise", + "description": "\nPerform the same operation as Gather, but operating on 8-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\nthe scale and offset).\nDATA needs to have rank 2 and INDICES needs to have rank 1.\n", + "inputs": [ + { + "description": "uint8 tensor with rank 2 obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA forthe rows that are being gathered", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "output", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "GatherPadding", + "description": "\nGather the sum of start and end paddings in a padded input sequence. Used in\norder to compute the gradients of AddPadding w.r.t the padding tensors.\n", + "attributes": [ + { + "description": "Outer-size of padding present around each range.", + "name": "padding_width", + "option": "optional" + }, + { + "description": "(Optional) Specifies a different end-padding width.", + "name": "end_padding_width", + "option": "optional" + } + ], + "inputs": [ + { + "description": "T Padded input data", + "name": "data_in" + }, + { + "description": "(i64) Num of elements in each range. sum(lengths) = N. If not provided, considers all data as a single segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Sum of all start paddings, or of all paddings if end_padding_sum is not provided.", + "name": "padding_sum" + }, + { + "description": "T Sum of all end paddings, if provided.", + "name": "end_padding_sum" + } + ], + "support_level": "default" + }, + { + "name": "GatherRanges", + "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather\ncorresponding ranges into a 1-D tensor OUTPUT.\n\nRANGES dimentions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nAnother output LENGTHS represents each example length within OUTPUT\n\nExample:\n DATA = [1, 2, 3, 4, 5, 6]\n RANGES = [\n [\n [0, 1],\n [2, 2],\n ],\n [\n [4, 1],\n [5, 1],\n ]\n ]\n OUTPUT = [1, 3, 4, 5, 6]\n LENGTHS = [3, 2]\n", + "inputs": [ + { + "description": "Tensor of rank 1.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)", + "name": "RANGES" + } + ], + "outputs": [ + { + "description": "1-D tensor of size sum of range lengths", + "name": "OUTPUT" + }, + { + "description": "1-D tensor of size N with lengths over gathered data for each row in a batch. sum(LENGTHS) == OUTPUT.size()", + "name": "LENGTHS" + } + ], + "support_level": "default" + }, + { + "name": "GatherRangesToDense", + "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather values\ncorresponding to each range into a separate output tensor. If the optional input\nKEY tensor is also given, the output will be sorted by KEY for each example.\n\nRANGES dimensions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nEach feature has fixed lengths which are passed as lengths argument and a\nseparate tensor will be produced for each feature.\ni.e. DATA.dim(1) = len(lengths) = NumOuptuts.\n\nMissing features (represented by empty ranges) filled with default_value.\n\nExample 1:\n DATA = [1, 2, 3, 4, 5, 6, 7, 8]\n RANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n ]\n lengths = [4, 2]\n OUTPUT[0] = [[3, 4, 5, 6], [0, 0, 0, 0]]\n OUTPUT[1] = [[1, 2], [7, 8]]\n\nExample 2 (with KEY):\nDATA = [1, 2, 3, 4, 5, 6, 7, 8]\nKEY = [0, 1, 3, 2, 1, 0, 1, 0]\nRANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n]\nlengths = [4, 2]\nOUTPUT[0] = [[6, 5, 4, 3], [0, 0, 0, 0]]\nOUTPUT[1] = [[1, 2], [8, 7]]\n\nContrast Example 2 with Example 1. For each data point per feature, the values\nare sorted by the corresponding KEY.\n", + "attributes": [ + { + "description": "Expected lengths for ranges", + "name": "lengths", + "option": "optional" + }, + { + "description": "The number of observations needed before deciding that the ratio of mismatched ranges is alarming, also determines whether an info sumarizing the empty and mismatch ratio will be printed at the end.", + "name": "min_observation", + "option": "optional" + }, + { + "description": "An error is raised when ratio of empty ranges exceeds this (default is 1, which means by default no error will be triggered).", + "name": "max_empty_ratio", + "option": "optional" + }, + { + "description": "An error is raised when ratio of mismatched ranges exceeds this.", + "name": "max_mismatched_ratio", + "option": "optional" + }, + { + "description": "A log is recorded only after an error is triggered every n times.", + "name": "log_every_n", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of rank 1.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)", + "name": "RANGES" + }, + { + "description": "Tensor of rank 1 and type int64.", + "name": "KEY" + } + ], + "outputs": [ + { + "description": "1-D tensor of size sum of range lengths", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "GaussianFill", + "description": "\nThis op fills an output tensor with samples drawn from a normal distribution specified by the mean and standard deviation arguments. The output tensor shape is specified by the *shape* argument. However, if *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: cannot set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GaussianFill\",\n [],\n [\"out\"],\n shape=[3,3],\n mean=2.0,\n std=1.1\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [[1.2084167 2.3336504 2.827349 ]\n [2.7108908 0.9374752 1.7173369 ]\n [0.03320992 2.1775863 1.0894578 ]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0.0, + "description": "Mean of the distribution to draw from.", + "name": "mean", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0, + "description": "Standard deviation of the distribution to draw from.", + "name": "std", + "option": "optional", + "type": "float32" + }, + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor of random values drawn from a normal distribution. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "GE", + "description": "\nPerforms element-wise greater or equal than comparison **>=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True True False True True False]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "Gelu", + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = xP(X <= x) where X ~ N(0, 1),\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "description": "If true, use y = 0.5x * (1 + tanh(sqrt(2/Pi) * (x + 0.044715x^3))).", + "name": "fast_gelu", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "GeluGradient", + "support_level": "default" + }, + { + "name": "GenerateProposals", + "description": "\nGenerate bounding box proposals for Faster RCNN. The propoasls are generated for\na list of images based on image score 'score', bounding box regression result\n'deltas' as well as predefined bounding box shapes 'anchors'. Greedy\nnon-maximum suppression is applied to generate the final bounding boxes.\n", + "attributes": [ + { + "description": "(float) spatial scale", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) RPN_PRE_NMS_TOP_N", + "name": "pre_nms_topN", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "post_nms_topN", + "option": "optional" + }, + { + "description": "(float) RPN_NMS_THRESH", + "name": "nms_thresh", + "option": "optional" + }, + { + "description": "(float) RPN_MIN_SIZE", + "name": "min_size", + "option": "optional" + }, + { + "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility", + "name": "correct_transform_coords", + "option": "optional" + }, + { + "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_on", + "option": "optional" + }, + { + "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_lo", + "option": "optional" + }, + { + "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_hi", + "option": "optional" + }, + { + "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.", + "name": "clip_angle_thresh", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Scores from conv layer, size (img_count, A, H, W)", + "name": "scores" + }, + { + "description": "Bounding box deltas from conv layer, size (img_count, 4 * A, H, W)", + "name": "bbox_deltas" + }, + { + "description": "Image info, size (img_count, 3), format (height, width, scale)", + "name": "im_info" + }, + { + "description": "Bounding box anchors, size (A, 4)", + "name": "anchors" + } + ], + "outputs": [ + { + "description": "Proposals, size (n x 5), format (image_index, x1, y1, x2, y2)", + "name": "rois" + }, + { + "description": "scores of proposals, size (n)", + "name": "rois_probs" + } + ], + "support_level": "default" + }, + { + "name": "GenerateProposalsCPP", + "support_level": "default" + }, + { + "name": "GetAllBlobNames", + "description": "\nReturn a 1D tensor of strings containing the names\nof each blob in the active workspace.\n", + "attributes": [ + { + "description": "(bool, default true) Whether to include blobs inherited from parent workspaces.", + "name": "include_shared", + "option": "optional" + } + ], + "outputs": [ + { + "description": "1D tensor of strings containing blob names.", + "name": "blob_names" + } + ], + "support_level": "default" + }, + { + "name": "GetCursorOffset", + "description": "Get the current offset in the cursor.", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + } + ], + "outputs": [ + { + "description": "Tensor containing the offsets for the cursor.", + "name": "offsets" + } + ], + "support_level": "default" + }, + { + "name": "GFtrl", + "support_level": "default" + }, + { + "name": "GivenTensorBoolFill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorByteStringToUInt8Fill", + "description": "\nThis op fills a uint8 output tensor with the data specified by the *value* argument. The data must previously be serialized as a byte string. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\nThis op allows us to write uint8 tensors to Protobuf as byte strings and read them back as uint8 tensors in order to avoid the Protobuf uint32_t varint encoding size penalty.\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nval = np.array([1, 2, 3], dtype=np.uint8)\nop = core.CreateOperator(\n \"GivenTensorByteStringToUInt8Fill\",\n [],\n [\"out\"],\n values=[val.tobytes()],\n shape=val.shape,\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1 2 3]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorDoubleFill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorFill", + "description": "\nThis op fills an output tensor with the data specified by the *value* and *dtype* arguments. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: Do not set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GivenTensorFill\",\n [],\n [\"out\"],\n values=[1., 2., 3.],\n shape=[3],\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1. 2. 3.]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "The value of the elements to go in the *output* tensor.", + "name": "values" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + }, + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor with desired dimension filled with specified data. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorInt16Fill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorInt64Fill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorIntFill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GivenTensorStringFill", + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Glu", + "description": "\nApplies gated linear unit to the input Tensor X. The output Y is half the size\nof the input X, so if the shape of X is [d1, d2, ..., N] shape of Y will be\n[d1, d2, ..., dn/2] and Y(:dn-1, i) = GLU(X(:dn-1, i), X(:dn-1, i+N/2)) =\nX(dn-1, i) * sigmoid(X(dn-1, i+N/2))\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "GroupNorm", + "description": "\nGroup Normalization (GN) operation: https://arxiv.org/abs/1803.08494\n", + "attributes": [ + { + "description": "(int) default 32; number of groups used by GN.", + "name": "num_groups", + "option": "optional" + }, + { + "description": "(float) default 1e-5; small constant added to var.", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": ">=4D feature map input of shape (N, C, H, W) or (N, C, T, H, W)", + "name": "X" + }, + { + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output.", + "name": "gamma" + }, + { + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output.", + "name": "beta" + } + ], + "outputs": [ + { + "description": "The output >=4-dimensional tensor of the same shape as X.", + "name": "Y" + }, + { + "description": "The mean of shape (N, G). For backward usage or reference. Cannot be used as activations.", + "name": "mean" + }, + { + "description": "The std of shape (N, G). For backward usage or reference. Cannot be used as activations.", + "name": "std" + } + ], + "support_level": "default" + }, + { + "name": "GroupNormGradient", + "support_level": "default" + }, + { + "name": "GRUUnit", + "description": "\nGRUUnit computes the activations of a standard GRU,\nin a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous hidden\nstate (NxD), and the sequence lengths (N), computes the GRU\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X[t][n] >= seqLengths[n].\n\n", + "attributes": [ + { + "description": "Bool to determine if hidden state is zeroes or passed along for timesteps past the given sequence_length.", + "name": "drop_states", + "option": "optional" + }, + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "outputs": [ + { + "description": "The new GRU hidden state calculated by this op.", + "name": "hidden" + } + ], + "support_level": "default" + }, + { + "name": "GRUUnitGradient", + "attributes": [ + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "GT", + "description": "\nPerforms element-wise greater than comparison **>** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False True False False False False]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "HalfFloatToFused8BitRowwiseQuantized", + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HalfFloatToFused8BitRowwiseQuantizedHalfScaleBias", + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HalfToFloat", + "support_level": "default" + }, + { + "name": "HalfToFused2BitFakeRowwiseQuantized", + "description": "\nApplies 2-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HalfToFused2BitRowwiseQuantized", + "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HalfToFused4BitFakeRowwiseQuantized", + "description": "\nApplies 4-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HalfToFused4BitRowwiseQuantized", + "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "HardSigmoid", + "description": "\nApplies hard sigmoid operation to the input data element-wise.\nThe HardSigmoid operation takes one input $X$, produces one output $Y$, and is defined as:\n\n$$Y = max(0,min(1,x * alpha + beta))$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HardSigmoid\",\n [\"X\"],\n [\"Y\"],\n alpha = 0.2,\n beta = 0.5,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"sigmoid:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\ninput: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ]\nhard_sigmoid: [ 0.81488073, 0.56326419, 0.85684538, 0.78901446, 0.06546044]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "description": "float: the slope of the function. Defaults to 0.2", + "name": "alpha", + "option": "optional" + }, + { + "description": "float: the bias value of the function. Defaults to 0.5", + "name": "beta", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor with same shape as input", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "HardSigmoidGradient", + "description": "\nHardSigmoidGradient takes both Y and dY as well as an argument alpha and uses\nthis to update dX according to the chain rule and derivatives of the hard\nsigmoid function.\n", + "support_level": "default" + }, + { + "name": "HasElements", + "description": "\nThe *HasElements* op accepts a single or multiple input tensors, and produces a single boolean output $has\\_elements$. The output is *True* if and only if any of the input tensor has size > 0. Note, this op is the opposite of the *IsEmpty* op.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HasElements\",\n [\"tensor\"],\n [\"has_elements\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.6116506 -0.54433197]\n [ 0.19406661 -0.7338629 ]]\nhas_elements: True\n\ntensor:\n []\nhas_elements: False\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor to check for elements.", + "name": "tensor" + }, + { + "description": "List of input data tensors to check for elements.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "Output scalar boolean tensor. True if input has size > 0.", + "name": "has_elements" + } + ], + "support_level": "default" + }, + { + "name": "HasScope", + "description": "\nChecks whether scope blob has any saved scopes left\n ", + "support_level": "default" + }, + { + "name": "HeatmapMaxKeypoint", + "support_level": "default" + }, + { + "name": "Histogram", + "description": "\n Computes a histogram for values in the given list of tensors.\n For logging activation histograms for post-hoc analyses, consider using the\n HistogramObserver observer.\n For iteratively computing a histogram for all input tensors encountered through\n history, consider using the AccumulateHistogram operator.\n ", + "attributes": [ + { + "description": "length-(k + 1) sequence of float values wherein the i-th element represents the inclusive left boundary of the i-th bin for i in [0, k - 1] and the exclusive right boundary of the (i-1)-th bin for i in [1, k].", + "name": "bin_edges", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "1D tensor of length k, wherein the i-th element expresses the count of tensor values that fall within range [bin_edges[i], bin_edges[i + 1])", + "name": "histogram" + } + ], + "support_level": "default" + }, + { + "name": "HSoftmax", + "description": "\nHierarchical softmax is an operator which approximates the softmax operator\nwhile giving significant training speed gains and reasonably comparable\nperformance. In this operator, instead of calculating the probabilities of all\nthe classes, we calculate the probability of each step in the path from root to\nthe target word in the hierarchy.\n\nThe operator takes a 2-D tensor (Tensor) containing a batch of layers, a\nset of parameters represented by the weight matrix and bias terms, and a 1-D\ntensor (Tensor) holding labels, or the indices of the target class. The\nhierarchy has to be specified as an argument to the operator.\n\nThe operator returns a 1-D tensor holding the computed log probability of the\ntarget class and a 2-D tensor of intermediate outputs (from the weight matrix\nand softmax from each step in the path from root to target class) which will be\nused by the gradient operator to compute gradients for all samples in the batch.\n", + "attributes": [ + { + "description": "Serialized HierarchyProto string containing list of vocabulary words and their paths from root of hierarchy to the leaf", + "name": "hierarchy", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data from previous layer", + "name": "X" + }, + { + "description": "2D blob containing 'stacked' fully connected weight matrices. Each node in the hierarchy contributes one FC weight matrix if it has children nodes. Dimension is N*D, D is input dimension of data (X), N is sum of all output dimensions, or total number of nodes (excl root)", + "name": "W" + }, + { + "description": "1D blob with N parameters", + "name": "b" + }, + { + "description": "int word_id of the target word", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D of log probability outputs, one per sample", + "name": "Y" + }, + { + "description": "Extra blob to store the intermediate FC and softmax outputs for each node in the hierarchical path of a word. The outputs from samples are stored in consecutive blocks in the forward pass and are used in reverse order in the backward gradientOp pass", + "name": "intermediate_output" + } + ], + "support_level": "default" + }, + { + "name": "HSoftmaxGradient", + "support_level": "default" + }, + { + "name": "HSoftmaxSearch", + "description": "\nHSoftmaxSearch is an operator to generate the most possible paths given a\nwell-trained model and input vector. Greedy algorithm is used for pruning the\nsearch tree.\n", + "attributes": [ + { + "description": "Serialized TreeProto string containing a tree including all intermidate nodes and leafs. All nodes must have names for correct outputs", + "name": "tree", + "option": "optional" + }, + { + "description": "beam used for pruning tree. The pruning algorithm is that only children, whose score is smaller than parent's score puls beam, will be propagated.", + "name": "beam", + "option": "optional" + }, + { + "description": "Number of nodes in outputs", + "name": "topN", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data from previous layer", + "name": "X" + }, + { + "description": "The matrix trained from Softmax Ops", + "name": "W" + }, + { + "description": "The bias trained from Softmax Ops", + "name": "b" + } + ], + "outputs": [ + { + "description": "The name of selected nodes and leafs. For nodes, it will be the name defined in the tree. For leafs, it will be the index of the word in the tree.", + "name": "Y_names" + }, + { + "description": "The corresponding scores of Y_names", + "name": "Y_scores" + } + ], + "support_level": "default" + }, + { + "name": "HuffmanTreeHierarchy", + "description": "\nHuffmanTreeHierarchy is an operator to generate huffman tree hierarchy given\nthe input labels. It returns the tree as serialized HierarchyProto\n", + "attributes": [ + { + "description": "The number of classes used to build the hierarchy.", + "name": "num_classes", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The labels vector", + "name": "Labels" + } + ], + "outputs": [ + { + "description": "Huffman coding hierarchy of the labels", + "name": "Hierarch" + } + ], + "support_level": "default" + }, + { + "name": "If", + "description": "\n'If' control operator, first input is a scalar boolean blob that stores condition\nvalue. Accepts 'then_net' (required) and 'else_net' (optional) arguments for 'then' and\n'else' subnets respectively. Subnets are executed in the same workspace as 'If'.\n ", + "attributes": [ + { + "description": "Net executed when condition is true", + "name": "then_net", + "option": "optional" + }, + { + "description": "Net executed when condition is false (optional)", + "name": "else_net", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Scalar boolean condition", + "name": "condition" + } + ], + "support_level": "default" + }, + { + "name": "Im2Col", + "description": "The Im2Col operator from Matlab.", + "inputs": [ + { + "description": "4-tensor in NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "4-tensor. For NCHW: N x (C x kH x kW) x outH x outW.For NHWC: N x outH x outW x (kH x kW x C", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "IncrementPut", + "description": "\n Consume a value and pushes it to the global stat registry as an sum.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + }, + { + "name": "IndexFreeze", + "description": "\nFreezes the given index, disallowing creation of new index entries.\nShould not be called concurrently with IndexGet.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "The input handle.", + "name": "handle" + } + ], + "support_level": "default" + }, + { + "name": "IndexGet", + "description": "\nGiven an index handle and a tensor of keys, return an Int tensor of same shape\ncontaining the indices for each of the keys. If the index is frozen, unknown\nentries are given index 0. Otherwise, new entries are added into the index.\nIf an insert is necessary but max_elements has been reached, fail.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + }, + { + "description": "Tensor of keys to be looked up.", + "name": "keys" + } + ], + "outputs": [ + { + "description": "Indices for each of the keys.", + "name": "indices" + } + ], + "support_level": "default" + }, + { + "name": "IndexHash", + "description": "\nThis operator translates a list of indices into a list of hashed indices.\nA seed can be fed as an argument to change the behavior of the hash function.\nIf a modulo is specified, all the hashed indices will be modulo the\nspecified number. All input and output indices are enforced to be positive.\n", + "attributes": [ + { + "description": "seed for the hash function", + "name": "seed", + "option": "optional" + }, + { + "description": "must be > 0, hashed ids will be modulo this number", + "name": "modulo", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input feature indices.", + "name": "Indices" + } + ], + "outputs": [ + { + "description": "Hashed feature indices.", + "name": "HashedIndices" + } + ], + "support_level": "default" + }, + { + "name": "IndexLoad", + "description": "\nLoads the index from the given 1-D tensor. Elements in the tensor will be given\nconsecutive indexes starting at 1. Fails if tensor contains repeated elements.\n", + "attributes": [ + { + "description": "If set, skips the first entry of the tensor. This allows to load tensors that are aligned with an embedding, where the first entry corresponds to the default 0 index entry.", + "name": "skip_first_entry", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + }, + { + "description": "1-D tensor with elements starting with index 1.", + "name": "items" + } + ], + "outputs": [ + { + "description": "The input handle.", + "name": "handle" + } + ], + "support_level": "default" + }, + { + "name": "IndexSize", + "description": "\nReturns the number of entries currently present in the index.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "Scalar int64 tensor with number of entries.", + "name": "items" + } + ], + "support_level": "default" + }, + { + "name": "IndexStore", + "description": "\nStores the keys of this index in a 1-D tensor. Since element 0 is reserved\nfor unknowns, the first element of the output tensor will be element of index 1.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "1-D tensor with elements starting with index 1.", + "name": "items" + } + ], + "support_level": "default" + }, + { + "name": "InferenceLSTM", + "attributes": [ + { + "description": "(*long*): number of layers in the lstm stack", + "name": "num_layers", + "option": "optional" + }, + { + "description": "(*bool*): whether the cells have biases or not", + "name": "has_biases", + "option": "optional" + }, + { + "description": "(*bool*): whether the batch is at dim 0", + "name": "batch_first", + "option": "optional" + }, + { + "description": "(*bool*): if bidirectional", + "name": "bidirectional", + "option": "optional" + } + ], + "outputs": [ + { + "description": "the output of the last layer of lstm", + "name": "output" + }, + { + "description": "hidden state at t = seq_len", + "name": "hidden" + }, + { + "description": "cell state at t = seq_len", + "name": "cell" + } + ], + "support_level": "default" + }, + { + "name": "InstanceNorm", + "description": "\nThe *InstanceNorm* op applies Instance Normalization over a 4D input as described in [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022).\n\n$$output = \\frac{input-\\mu_{input}}{\\sqrt{\\sigma_{input}^2} + \\epsilon}*scale + bias$$\n\nNotice, two of the outputs are optional so there are three output cases for this op. Case 1: output; Case 2: output, saved_mean; Case 3: output, saved_mean, saved_inv_stdev.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"InstanceNorm\",\n [\"input\", \"scale\", \"bias\"],\n [\"output\"],\n epsilon=1e-5,\n)\n\nworkspace.FeedBlob(\"input\", np.random.randn(2, 1, 3, 3).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"), \"\\n\")\n\nworkspace.FeedBlob(\"scale\", np.array([1.5]).astype(np.float32))\nprint(\"scale: \", workspace.FetchBlob(\"scale\"))\n\nworkspace.FeedBlob(\"bias\", np.array([1.]).astype(np.float32))\nprint(\"bias: \", workspace.FetchBlob(\"bias\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output:\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[[[ 0.97856593 -1.1832817 -0.2540021 ]\n [-1.3315694 -0.7485018 0.3787225 ]\n [-0.6826597 -1.4637762 0.57116514]]]\n\n\n [[[-0.44948956 0.85544354 -0.9315333 ]\n [-0.37202677 -0.22266895 -0.27194235]\n [ 0.4948163 -0.7296504 1.3393803 ]]]]\n\nscale: [1.5]\nbias: [1.]\noutput:\n [[[[ 3.5017493 -0.3791256 1.2890853 ]\n [-0.6453266 0.40137637 2.4249308 ]\n [ 0.5195738 -0.8826599 2.7703972 ]]]\n\n\n [[[ 0.12639964 2.856744 -0.8821926 ]\n [ 0.28847694 0.60098207 0.49788612]\n [ 2.1021945 -0.45978796 3.869297 ]]]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 1e-05, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "option": "optional", + "type": "float32" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + } + ], + "inputs": [ + { + "description": "The input 4-dimensional NCHW tensor to be operated on.", + "name": "input" + }, + { + "description": "The input 1-dimensional scale tensor of size *C*.", + "name": "scale" + }, + { + "description": "The input 1-dimensional bias tensor of size *C*.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as input.", + "name": "output" + }, + { + "description": "(Optional) Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean" + }, + { + "description": "(Optional) Saved inverse stdev used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_inv_stdev" + } + ], + "support_level": "default" + }, + { + "name": "InstanceNormGradient", + "support_level": "default" + }, + { + "name": "Int8Add", + "description": "\n Performs element-wise binary Add (with no broadcast support).\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. It should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "Int8AddRelu", + "description": "\n Performs element-wise binary Add (with no broadcast support). \"\n \"Output will go through rectified linear \"\n \"function, where y = max(0, x).\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. It should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "Int8AveragePool", + "category": "Pool", + "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8AveragePoolRelu", + "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear function, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8ChannelShuffle", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Int8Concat", + "description": "Concatenate a list of tensors into a single tensor", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "Which axis to concat on", + "name": "axis", + "option": "optional" + }, + { + "description": "Pass 1 to add the axis specified in arg 'axis' to all input tensors", + "name": "add_axis", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result" + }, + { + "description": "The dimensions of the inputs.", + "name": "split_info" + } + ], + "support_level": "default" + }, + { + "name": "Int8Conv", + "category": "Layer", + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "default": 0, + "name": "pad" + }, + { + "default": 1, + "name": "stride" + } + ], + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ", + "name": "X" + }, + { + "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution; has size (M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8ConvRelu", + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ", + "name": "X" + }, + { + "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution; has size (M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths. Output will go through rectified linear function, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8ConvTranspose", + "description": "\nThe transposed convolution consumes an input vector, the filter blob, and\nthe bias blob, and computes the output. Note that other parameters, such as\nthe stride and kernel size, or the pads' sizes in each direction are not\nnecessary for input because they are provided by the\nConvTransposeUnpoolOpBase operator. Various dimension checks are done\nimplicitly, and the sizes are specified in the Input docs for this operator.\nAs is expected, the filter is deconvolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_transpose_op_impl.h is the templated implementation of the\nconv_transpose_op.h file, which is why they are separate files.\n ", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x H x W x C), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that NHWC is supported now", + "name": "X" + }, + { + "description": "The filter blob that will be used in the transposed convolution; has size (M x kH x kW x C), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution;has size (C). Optional, if not passed, will treat it as all 0.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the transposed convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8Dequantize", + "inputs": [ + { + "description": "Int8 Tensor qX.", + "name": "qX" + } + ], + "outputs": [ + { + "description": "FP32 Tensor that represents mapped real value of qX.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8FC", + "category": "Layer", + "description": "\nComputes the result of passing an input vector X into a fully\nconnected layer with 2D weight matrix W and 1D bias vector b. That is,\nthe layer computes Y = X * W^T + b, where X has size (M x K),\nW has size (N x K), b has size (N), and Y has size (M x N),\nwhere M is often the batch size.\n\n\nNOTE: X does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1 * ... * a_{n-1}]. Only this case is supported!\nLastly, even though b is a 1D vector of size N, it is copied/resized to\nbe size (M x N) implicitly and added to each vector in the batch.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "input tensor that's coerced into a 2D matrix of size (MxK) as described above", + "name": "X" + }, + { + "description": "A tensor that is coerced into a 2D blob of size (KxN) containing fully connected weight matrix", + "name": "W" + }, + { + "description": "1D blob containing bias vector", + "name": "b" + }, + { + "description": "Optional scale quantization param computed on activation histogram dataWill overwrite Y_scale argument if specified", + "name": "Scale qparam" + }, + { + "description": "Optionsl zero-point quantization param computed on activation dataWill overwrite Y_zero_point argument if specified", + "name": "Zero-point qparam" + }, + { + "description": "Optional Qparam blob that contains quant param computed on activation histogram dataWill overwrite Y_scale and Y_zero_point argument if specified", + "name": "Qparam" + } + ], + "outputs": [ + { + "description": "2D output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8FCPackWeight", + "description": "Prepack weight for Int8FC", + "attributes": [ + { + "description": "See FC operator", + "name": "axis_w", + "option": "optional" + }, + { + "description": "Default false. Per output channel quantization", + "name": "quantize_channelwise", + "option": "optional" + }, + { + "description": "Default false. Store unpacked quantized weights to W_q.original_tensor", + "name": "save_unpacked_weights", + "option": "optional" + }, + { + "description": "The scale of input activation tensor. Only meaningful when bias is provided (NOTE: this is not the scale of weight", + "name": "in_scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Weight tensor in KRSC layout", + "name": "W" + }, + { + "description": "Bias tensor", + "name": "b" + } + ], + "outputs": [ + { + "description": "Weight/bias tensor in a packed format with type Int8FCDNNLowPPackedWeightBlob", + "name": "W_q" + }, + { + "description": "Bias int32 quantized tensor", + "name": "B_q" + } + ], + "support_level": "default" + }, + { + "name": "Int8Flatten", + "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(Default to 1) Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output", + "name": "axis", + "option": "optional" + } + ], + "inputs": [ + { + "description": "A Int8 tensor of rank >= axis.", + "name": "input" + } + ], + "outputs": [ + { + "description": "A 2D Int8 tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Int8GenQuantParams", + "description": "Operator wrapper for generating int8 tensor quantization parameters given the input data and quant scheme", + "inputs": [ + { + "description": "The input data, or last N samples of the output activations.", + "name": "X" + }, + { + "description": "Int8QuantSchemeBlob that specifies the quantization kind and preserve_sparsity options when generating the quant params.", + "name": "quant_scheme" + } + ], + "outputs": [ + { + "description": "Int8QuantParamsBlob that contains the scale and zero_point info in TensorQuantizationParams type.", + "name": "quant_param" + } + ], + "support_level": "default" + }, + { + "name": "Int8GivenIntTensorFill", + "description": "\n Creates quantized tensor of type int32 with scale and zero point info.\n", + "attributes": [ + { + "description": "Input array of type int32", + "name": "values", + "option": "optional" + }, + { + "description": "Input tensor shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "outputs": [ + { + "description": "An Int8TensorCPU with scale and zero point info", + "name": "Tensor" + } + ], + "support_level": "default" + }, + { + "name": "Int8GivenTensorFill", + "description": "\n Creates quantized tensor of type char(byte) with scale and zero point info.\n", + "attributes": [ + { + "description": "Input array of type char(byte)", + "name": "values", + "option": "optional" + }, + { + "description": "Input tensor shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "outputs": [ + { + "description": "An Int8TensorCPU with scale and zero point info", + "name": "Tensor" + } + ], + "support_level": "default" + }, + { + "name": "Int8LeakyRelu", + "description": "\nLeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "attributes": [ + { + "description": "Coefficient of leakage, default value is 0.01", + "name": "alpha", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8MaxPool", + "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8MaxPoolRelu", + "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linearfunction, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8Quantize", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "FP32 Tensor X.", + "name": "X" + }, + { + "description": "Optional scale quantization param computed on activation histogram dataWill overwrite Y_scale argument if specified", + "name": "Scale qparam" + }, + { + "description": "Optionsl zero-point quantization param computed on activation dataWill overwrite Y_zero_point argument if specified", + "name": "Zero-point qparam" + }, + { + "description": "Optional Qparam blob that contains quant param computed on activation histogram dataWill overwrite Y_scale and Y_zero_point argument if specified", + "name": "Qparam" + } + ], + "outputs": [ + { + "description": "Int8 Tensor qX representing X with linear quantization.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8Relu", + "category": "Activation", + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8Reshape", + "description": "\nReshape the input tensor similar to numpy.reshape.\n\nIt takes a tensor as input and an optional tensor specifying the new shape.\nWhen the second input is absent, an extra argument `shape` must be specified.\nIt outputs the reshaped tensor as well as the original shape.\n\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is going to be copied\nfrom the input tensor.\n", + "attributes": [ + { + "description": "New shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + }, + { + "description": "New shape.", + "name": "new_shape" + } + ], + "outputs": [ + { + "description": "Reshaped data.", + "name": "reshaped" + }, + { + "description": "Original shape.", + "name": "old_shape" + } + ], + "support_level": "default" + }, + { + "name": "Int8ResizeNearest", + "description": "\nResizes the spatial dimensions of the input using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + }, + { + "description": "Output dimensions (HxW). If specified this takes precedence over scale values.", + "name": "output_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input Int8 tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output Int8 tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8RoIAlign", + "description": "\nRegion of Interest (RoI) align operation as used in Mask R-CNN.\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "inputs": [ + { + "description": "4D Int8 Tensor feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 4 or 5) specifying R RoIs representing: batch index in [0, N - 1], x1, y1, x2, y2. The RoI coordinates are in the coordinate system of the input image. For inputs corresponding to a single image, batch index can be excluded to have just 4 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D Int8 Tensor output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Int8Sigmoid", + "description": "\nApply the Sigmoid function element-wise to the input tensor. This is often used\nas a non-linear activation function in a neural network. The sigmoid function is\ndefined as:\n\n$$Sigmoid(x) = \\frac{1}{1+\\exp(-x)}$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sigmoid_op.cc\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + } + ], + "outputs": [ + { + "description": "The sigmoid normalized output values with the same shape as input tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Int8Slice", + "description": "\nProduces a slice of the input Int8 tensor. Currently, only slicing in a single\ndimension is supported.\nSlices are passed as 2 1D vectors or as two keyword argument lists with starting\nand end indices for each dimension of the input `data` tensor. If a negative\nvalue is passed for any of the start or end indices, it represents the number of\nelements before the end of that dimension. End indices are non-inclusive unless\nnegative (end index -1 means up to and including the last element).\n\n\nExample:\n\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 3]\n\n result = [\n [2, 3],\n [6, 7],\n ]\n", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "List of starting indices", + "name": "starts", + "option": "optional" + }, + { + "description": "List of ending indices", + "name": "ends", + "option": "optional" + }, + { + "description": "(Optional) The dimension to slice over. If specified start_idx and end_idx should also be given and it takes precedence over starts and ends", + "name": "dim", + "option": "optional" + }, + { + "description": "(Optional) The dimension to start slice from. Default is 0", + "name": "start_idx", + "option": "optional" + }, + { + "description": "(Optional) The dimension to end the slice. Default is -1", + "name": "end_idx", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Int8 Tensor of data to extract slices from.", + "name": "data" + }, + { + "description": "1D tensor: start-indices for each dimension of data.", + "name": "starts" + }, + { + "description": "1D tensor: end-indices for each dimension of data.", + "name": "ends" + } + ], + "outputs": [ + { + "description": "Sliced Int8 data tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Int8Softmax", + "category": "Activation", + "description": "\nThe operator computes the softmax normalized values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the softmax normalized values of the corresponding input.\n\nX does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then X will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the X tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(int) default to 1; describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size", + "name": "axis", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + } + ], + "outputs": [ + { + "description": "The softmax normalized output values with the same shape as input tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Int8Sum", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Int8SumRelu", + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Int8Transpose", + "description": "\nTranspose the input tensor by permuting the axes of the input according\nto the `axes` argument. Similar to numpy's\n[transpose](https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html)\nfunction.\n\nFor example, when axes=(1, 0, 2), given an input tensor of shape\n(1, 2, 3), the output shape will be (2, 1, 3).\n", + "attributes": [ + { + "description": "Order to permute axes of input tensor. Reverses the dimensions by default.", + "name": "axes", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Transposed output", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "IntegralImage", + "description": "\nComputes an integral image, which contains the sum of pixel values within\nan image vertically and horizontally. This integral image can then be used\nwith other detection and tracking techniques.\n", + "inputs": [ + { + "description": "Images tensor of the form (N, C, H, W)", + "name": "X" + } + ], + "outputs": [ + { + "description": "Integrated image of the form (N, C, H+1, W+1)", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "IntegralImageGradient", + "support_level": "default" + }, + { + "name": "IntIndexCreate", + "description": "\nCreates a dictionary that maps int32 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handler" + } + ], + "support_level": "default" + }, + { + "name": "IsEmpty", + "description": "\nThe *IsEmpty* op accepts a single input $tensor$, and produces a single boolean output $is\\_empty$. The output is *True* if and only if $tensor$ has size == 0.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsEmpty\",\n [\"tensor\"],\n [\"is_empty\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.26018378 0.6778789 ]\n [-1.3097627 -0.40083608]]\nis_empty: False\n\ntensor:\n []\nis_empty: True\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor to check if empty.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Output scalar boolean tensor. True if input has size == 0.", + "name": "is_empty" + } + ], + "support_level": "default" + }, + { + "name": "IsMemberOf", + "description": "\nThe *IsMemberOf* op takes an input tensor *X* and a list of values as argument, and produces one output data tensor *Y*. The output tensor is the same shape as *X* and contains booleans. The output is calculated as the function *f(x) = x in value* and is applied to *X* elementwise.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsMemberOf\",\n [\"X\"],\n [\"Y\"],\n value=[0,2,4,6,8],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"X\", np.array([0,1,2,3,4,5,6,7,8]).astype(np.int32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y: \\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n// value=[0,2,4,6,8]\n\nX:\n [0 1 2 3 4 5 6 7 8]\nY:\n [ True False True False True False True False True]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "List of values to check for membership.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor of any shape", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor (same size as X containing booleans)", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "IsNaN", + "description": "Returns a new tensor with boolean elements representing if each element is NaN or not.", + "inputs": [ + { + "description": "Tensor to check for nan", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Tensor containing a 1 at each location of NaN elements.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Iter", + "description": "\nStores a singe integer, that gets incremented on each call to Run().\nUseful for tracking the iteration count during SGD, for example.\n", + "support_level": "default" + }, + { + "name": "KeySplit", + "support_level": "default" + }, + { + "name": "KeyValueToMap", + "description": "Convert key and value blob pairs into a map blob", + "inputs": [ + { + "description": "Blob reference to the key", + "name": "key blob" + }, + { + "description": "Blob reference to the value", + "name": "value blob" + } + ], + "outputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "support_level": "default" + }, + { + "name": "L1Distance", + "description": "\nComputes the row-wise L1 Distance between the two input tensors $X$ and $Y$, which is defined as\n\n$$L1Distance(\\mathbf{x},\\mathbf{y}) = \\sum_{i}\\mid x_i - y_i\\mid$$\n\nNote, both inputs must either be 1-dimensional or 2-dimensional and both must have the same shape. The output $Z$ will be 1-dimensional regardless and its length will equal the number of rows in the inputs.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"L1Distance\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = 5*np.ones((1, 4))\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.ones((1, 4))\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[5. 5. 5. 5.]]\nY:\n [[1. 1. 1. 1.]]\nZ:\n [16.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "First input tensor. (1D or 2D)", + "name": "X" + }, + { + "description": "Second input tensor. (must have the same shape as $X$)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor. One value for each row of the inputs.", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "L1DistanceGradient", + "support_level": "default" + }, + { + "name": "LabelCrossEntropy", + "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a one dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = -log(X_{ij})$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nThe difference between *LabelCrossEntropy* and *CrossEntropy* is how the labels are specified. Here, the labels are a length $N$ list of integers, whereas in CrossEntropy the labels are a $NxD$ dimensional matrix of one hot label vectors. However, the results of computation should be the same, as shown in the two examples where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LabelCrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([4,2])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [4 2]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input. $label$ is a length $N$ list of integers, where each element is the integer label for the $n$th element of the batch.", + "name": "label" + } + ], + "outputs": [ + { + "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LabelCrossEntropyGradient", + "support_level": "default" + }, + { + "name": "LambdaRankNdcg", + "description": "\nIt implements the LambdaRank as appeared in Wu, Qiang, et al. \"Adapting boosting\nfor information retrieval measures.\" Information Retrieval 13.3 (2010): 254-270.\n\nThis method heuristically optimizes the NDCG.\n", + "support_level": "default" + }, + { + "name": "LambdaRankNdcgGradient", + "support_level": "default" + }, + { + "name": "Lars", + "description": "\nImplement Layer-wise Adaptive Rate Scaling (LARS) with clipping. Before adding weight\ndecay, given a parameter tensor X and its gradient dX, the local learning rate\nfor X will be\n\nlocal_lr = trust * norm(X) / ( norm(dX) + wd * norm(X) + offset * norm(X) )\n\n = trust / ( norm(dX) / norm(X) + wd + offset ),\n\nwhere offset is a preset hyper-parameter to avoid numerical issue and trust\nindicates how much we trust the layer to change its parameters during one update.\nIn this implementation, we uses l2 norm and the computed local learning rate is\nclipped based on the upper bound lr_max and the lower bound lr_min:\n\nlocal_lr = min(local_lr, lr_max) and local_lr = max(local_lr, lr_min)\n\n", + "attributes": [ + { + "description": "rescaling offset parameter", + "name": "offset", + "option": "optional" + }, + { + "description": "minimum learning rate for clipping", + "name": "lr_min", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameter tensor", + "name": "X" + }, + { + "description": "Gradient tensor", + "name": "dX" + }, + { + "description": "Weight decay", + "name": "wd" + }, + { + "description": "Trust", + "name": "trust" + }, + { + "description": "Upper bound of learning rate", + "name": "lr_max" + } + ], + "outputs": [ + { + "description": "Rescaled local learning rate", + "name": "lr_rescaled" + } + ], + "support_level": "default" + }, + { + "name": "LastNWindowCollector", + "description": "\nCollect the last N rows from input data. The purpose is to keep track of data\naccross batches, so for example suppose the LastNWindowCollector is called\nsuccessively with the following input data\n\n [1, 2, 3, 4]\n [5, 6, 7]\n [8, 9, 10, 11]\n\nAnd the number of items is set to 6, then the output after the 3rd call\nwill contain the following elements:\n\n [6, 7, 8, 9, 10, 11]\n\nNo guarantee is made on the ordering of elements in input. So a valid value for\noutput could have been\n\n [11, 10, 9, 8, 7, 6]\n\nAlso, this method works for any order tensor, treating the first dimension as\ninput rows and keeping the last N rows seen as input. So for instance:\n\n [[1, 2], [2, 3], [3, 4], [4, 5]]\n [[5, 6], [6, 7], [7, 8]]\n [[8, 9], [9, 10], [10, 11], [11, 12]]\n\nA possible output would be\n\n [[6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12]]\n\nThis is not thread safe unless a mutex is given.\n", + "attributes": [ + { + "description": "The number of random samples to append for each positive samples", + "name": "num_to_collect", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The buffer for last-N record. Should be initialized to empty tensor", + "name": "last-N buffer" + }, + { + "description": "The cursor pointing to the next position that should be replaced. Should be initialized to 0.", + "name": "next cursor" + }, + { + "description": "tensor to collect from", + "name": "DATA" + }, + { + "description": "(optional) mutex to use to make this thread-safe", + "name": "MUTEX" + }, + { + "description": "", + "name": "NUM_VISITED" + } + ], + "outputs": [ + { + "description": "Data stored in sessions", + "name": "last-N buffer" + }, + { + "description": "Updated input cursor", + "name": "next cursor" + }, + { + "description": "number of records seen so far", + "name": "NUM_VISITED" + } + ], + "support_level": "default" + }, + { + "name": "LayerNorm", + "description": "\nComputes layer normalization as described in https://arxiv.org/pdf/1607.06450.pdf.\nGiven an input vector x \\in [a_0, a_1, ...,a_{k-1}, a_k, ..., a_{n-1}],\nthis op treats dimensions a_k through a_{n-1} as feature vectors. For each\nfeature vector, the op contains the mean and standard deviation. Then,\nit returns the normalized values (with respect to the feature vector).\n\nNote that this op does not contain the scale an bias terms described in the\npaper. Simply follow this op with an FC op to add those. Concretely, this op\nimplements:\n\nh = \\frac{1}{\\sigma}(a - \\mu)\nwhere \\mu = \\frac{1}{H}\\sum_{i=1}^{H} a_i\nand \\sigma = \\sqrt{\\frac{1}{H}\\sum_{i=1}^{H}(a_i - \\mu)^2}\nwhere H is the number of hidden units (i.e. product of dimensions from 'axis'\nto the end.)\n", + "attributes": [ + { + "description": "(int) default to 1; Describes axis of the inputs. Defaults to one because the 0th axis most likely describes the batch size", + "name": "axis", + "option": "optional" + }, + { + "description": "(float) default to 0.001. Small value to be added to the stdev when dividing out by that value. This prevents division by zero.", + "name": "epsilon", + "option": "optional" + }, + { + "description": "(bool) default to False; If true, this op will do affine transformation after normalization.", + "name": "elementwise_affine", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor which layer normalization will be applied to", + "name": "input" + }, + { + "description": "scale tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "gamma" + }, + { + "description": "bias tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "beta" + } + ], + "outputs": [ + { + "description": "Normalized values", + "name": "output" + }, + { + "description": "Mean values for each feature vector", + "name": "mean" + }, + { + "description": "Standard deviations for each feature vector", + "name": "stddev" + } + ], + "support_level": "default" + }, + { + "name": "LayerNormGradient", + "support_level": "default" + }, + { + "name": "LC", + "description": "\nThe locally connected operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LC1D", + "description": "\nThe locally connected operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LC1DGradient", + "support_level": "default" + }, + { + "name": "LC2D", + "description": "\nThe locally connected operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LC2DGradient", + "support_level": "default" + }, + { + "name": "LC3D", + "description": "\nThe locally connected operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LC3DGradient", + "support_level": "default" + }, + { + "name": "LCGradient", + "support_level": "default" + }, + { + "name": "LE", + "description": "\nPerforms element-wise less or equal than comparison **<=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False True True True True]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "LeakyRelu", + "description": "\nThe *LeakyRelu* op takes one input tensor $X$ and an argument $alpha$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise leaky relu operation, defined as\n\n$$y=LeakyRelu(x) =\\begin{cases}\\alpha x & x < 0\\\\x & otherwise\\end{cases}$$\n\nThe default value of *alpha* is 0.01.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LeakyRelu\",\n [\"X\"],\n [\"Y\"],\n alpha=0.01\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.91060215 0.09374836 2.1429708 ]\n [-0.748983 0.19164062 -1.5130422 ]\n [-0.29539835 -0.8530696 0.7673204 ]]\n\nY:\n [[-0.00910602 0.09374836 2.1429708 ]\n [-0.00748983 0.19164062 -0.01513042]\n [-0.00295398 -0.0085307 0.7673204 ]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "default": 0.01, + "description": "Coefficient of leakage.", + "name": "alpha", + "option": "optional", + "type": "float32" + } + ], + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor, calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LeakyReluGradient", + "attributes": [ + { + "description": "Coefficient of leakage", + "name": "alpha", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "LearningRate", + "description": "\nLearning rate is a decreasing function of time. With low learning rates the\nimprovements will be linear. With high learning rates they will start to look\nmore exponential. Learning rate is controlled by the following arguments:\n\n\nRequired:\n `iterations`\n `base_lr`: base learning rate\n `policy`: this controls how the learning rate is applied, options are:\n `fixed`\n `step`: uses `stepsize`, `gamma`\n `exp`: uses `gamma`\n `gate`: uses 'multiplier_1', 'multiplier_2', `num_iter``\n `inv`: uses `gamma`, `power`\n `linearWarmup`: uses `start_multiplier`, `num_iter`\n `constantWarmup`: uses `multiplier`, `num_iter`\n `alter`: uses `active_first`, `active_period`, `inactive_period`\n `hill`: uses those in both `linearWarmup` and `inv`, plus `end_multiplier`\n `composite`: uses `sub_policy_num_iters` and additional args with format\n `cyclic`: uses `max_lr`, `stepsize`\n `cosine`: uses `min_lr`, `max_lr`, `period`, `t_mult`, `lr_shrink`\n `constantThenLinearWarmup`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`\n `compositeCyclical`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cyclical_max_lr`, `cyclical_step_size`, `cyclical_decay`\n `compositeCosine`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cosine_max_lr`, `cosine_period`, `cosine_t_mult`, `cosine_lr_shrink`\n sub_policy_{sub_policy_index}_{sub_policy_arg}, for example:\n sub_policy_0_policy: \"exp\", sub_policy_0_gamma: 0.99,\n sub_policy_0_lr_scale: 1.2\n sub_policy_0_policy: \"fixed\", sub_policy_0_lr_scale: 1.0\n sub_policy_num_iters: [1000, 1000]\n\nOptional:\n `stepsize`: defaults to 0\n `max_lr`: defaults to 0.005\n `gamma`: defaults to 0\n `power`: defaults to 0\n `num_iter`: defaults to 0\n `start_multiplier`: defaults to 0\n `multiplier`: defaults to 0.5\n `multiplier_1`: defaults to 1\n `multiplier_2`: defaults to 1\n `m1`: defaults to 0.5, the first piece lr of piece warmup\n `n1`: defaults to 0, iter threshold of the first piece lr\n `m2`: defaults to 0.5, the second piece lr of piece warmup\n `n2`: defaults to 0, iter threshold of the second piece lr\n `m3`: defaults to 0.5, the third piece lr of piece warmup\n `start_warmup_multiplier`: defaults to 0.1, part of constantThenLinearWarmup\n `constant_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup and constantThenLinearWarmup\n `linear_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup, CompositeCyclicalLRPolicy, CompositeCosineLRPolicy\n `cyclical_max_lr`: defaults to 0.05, part of CompositeCyclicalLRPolicy\n `cyclical_step_size`: defaults to 1000000, part of CompositeCyclicalLRPolicy\n `cyclical_decay`: defaults to 1.0, part of CompositeCyclicalLRPolicy\n `cosine_min_lr`:defaults to 0.01, part of CompositeCosineLRPolicy\n `cosine_max_lr`:defaults to 0.05, part of CompositeCosineLRPolicy\n `cosine_period`:defaults to 50, part of CompositeCosineLRPolicy\n `cosine_t_mult`:defaults to 1.0, part of CompositeCosineLRPolicy\n `cosine_lr_shrink`:defaults to 0.99, part of CompositeCosineLRPolicy\n\nUsage:\n train_net.LearningRate(*iterations*, \"*label*\", base_lr=*float*,\n policy=\"policy_name\", stepsize=*int*, gamma=*float*)\n\n\nExample usage:\n train_net.LearningRate(200, \"LR\", base_lr=-0.1,\n policy=\"step\", stepsize=20, gamma=0.9)\n", + "attributes": [ + { + "description": "(float, required) base learning rate", + "name": "base_lr", + "option": "optional" + }, + { + "description": "(float, default 1.0) strategy for gamma enforcement", + "name": "policy", + "option": "optional" + }, + { + "description": "(float, default 1.0) used only for inv policy type", + "name": "power", + "option": "optional" + }, + { + "description": "(float, default 1.0) momentum of change", + "name": "gamma", + "option": "optional" + }, + { + "description": "(float, default 1.0) sampling rate on iterations", + "name": "stepsize", + "option": "optional" + }, + { + "description": "(boolean, default True) in alter policy", + "name": "active_first", + "option": "optional" + }, + { + "description": "(int64_t, required) in alter policy", + "name": "active_period", + "option": "optional" + }, + { + "description": "(int64_t, required) in alter policy", + "name": "inactive_period", + "option": "optional" + }, + { + "description": "(int, default -1) maximum iterations in this training run", + "name": "max_iter", + "option": "optional" + }, + { + "description": "(int, default 0) number of iterations over which to warmup lr", + "name": "num_iter", + "option": "optional" + }, + { + "description": "(float, default 0) starting multiplier for learning rate", + "name": "start_multiplier", + "option": "optional" + }, + { + "description": "(float, default 0) end multiplier for learning rate", + "name": "end_multiplier", + "option": "optional" + }, + { + "description": "(float, default 0.5) constant multiplier for learning rate", + "name": "multiplier", + "option": "optional" + }, + { + "description": "(float, default 1) start multiplier for learning rate", + "name": "multiplier_1", + "option": "optional" + }, + { + "description": "(float, default 1) end multiplier for learning rate", + "name": "multiplier_2", + "option": "optional" + }, + { + "description": "(int array, default empty) number of iterations for each sub learning rate policy in composite policy", + "name": "sub_policy_num_iters", + "option": "optional" + }, + { + "description": "", + "name": "m1", + "option": "optional" + }, + { + "description": "", + "name": "n1", + "option": "optional" + }, + { + "description": "", + "name": "m2", + "option": "optional" + }, + { + "description": "", + "name": "n2", + "option": "optional" + }, + { + "description": "", + "name": "m3", + "option": "optional" + }, + { + "description": "(float, default 0.005) max learning rate", + "name": "max_lr", + "option": "optional" + }, + { + "description": "defaults to 0.1", + "name": "start_warmup_multiplier", + "option": "optional" + }, + { + "description": "defaults to 10000000", + "name": "constant_warmup_num_iter", + "option": "optional" + }, + { + "description": "defaults to 10000000", + "name": "linear_warmup_num_iter", + "option": "optional" + }, + { + "description": "defaults to 0.05, part of CompositeCyclicalLRPolicy", + "name": "cyclical_max_lr", + "option": "optional" + }, + { + "description": "defaults to 1000000, part of CompositeCyclicalLRPolicy", + "name": "cyclical_step_size", + "option": "optional" + }, + { + "description": "defaults to 0.999, part of CompositeCyclicalLRPolicy", + "name": "cyclical_decay", + "option": "optional" + }, + { + "description": "defaults to 0.01, part of CompositeCosineLRPolicy", + "name": "cosine_min_lr", + "option": "optional" + }, + { + "description": "defaults to 0.05, part of CompositeCosineLRPolicy", + "name": "cosine_max_lr", + "option": "optional" + }, + { + "description": "defaults to 50, part of CompositeCosineLRPolicy", + "name": "cosine_period", + "option": "optional" + }, + { + "description": "defaults to 1,0, part of CompositeCosineLRPolicy", + "name": "cosine_t_mult", + "option": "optional" + }, + { + "description": "defaults to 0.99, part of CompositeCosineLRPolicy", + "name": "cosine_lr_shrink", + "option": "optional" + } + ], + "inputs": [ + { + "description": "description needed", + "name": "input" + } + ], + "outputs": [ + { + "description": "description needed", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "LearningRateAdaption", + "description": "\n Learning Rate Adaption is an operation that perform one iteration of\n gradient descent based on learning rate:\n lr(k) = lr(k-1) - lr_alpha * df(k-1)/dlr,\n where df(k-1)/dlr is the gradient of objective function f on lr, and\n lr_alpha is a learning rate hyperparameter. It can be prove that\n df(k-1)/dlr equals INNERPRODUCT(grad(k-1), -grad(k-2)), where grad(k-1) is\n the grad of f(k-1) on parameters. When the argument\n \"normalized_lr_adaption\" is false, we simply perform the\n following update:\n lr(k) = lr(k-1) - lr_alpha * INNERPRODUCT(grad(k-1), grad(k-2)).\n If we set \"normalized_lr_adaption\" to be true, we do not directly apply\n INNERPRODUCT(grad(k-1), -grad(k-2)) as the grad. Instead, we perform the\n following update:\n lr(k) = lr(k-1) + lr_alpha * cosineSimilarity(grad(k-1), grad(k-2)).\n", + "attributes": [ + { + "description": "the learning rate for performing gradient descent on learning rate lr", + "name": "lr_alpha", + "option": "optional" + }, + { + "description": "whether to apply normalized lr adaption or not", + "name": "normalized_lr_adaption", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Learning rate", + "name": "lr" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "The effective grad", + "name": "effgrad" + } + ], + "outputs": [ + { + "description": "Updated learning rate", + "name": "output_lr" + } + ], + "support_level": "default" + }, + { + "name": "LengthsGather", + "description": "\nGather items from sparse tensor. Sparse tensor is described by items and\nlengths. This operator gathers items corresponding to lengths at the given\nindices. This deliberately doesn't return lengths of OUTPUTS so that both lists\nand maps can be supported without special cases. If you need lengths tensor for\n OUTPUT, use `Gather`.\n\nExample:\n ITEMS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n LENGTHS = [0, 2, 3, 1, 4]\n INDICES = [0, 2, 4]\n\n OUTPUT = [2, 3, 4, 6, 7, 8, 9]\n", + "inputs": [ + { + "description": "items tensor", + "name": "ITEMS" + }, + { + "description": "lengths tensor", + "name": "LENGTHS" + }, + { + "description": "indices into LENGTHS where items should be gathered", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "1-D tensor containing gathered items", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsIndicesInGradientMeanGradient", + "support_level": "default" + }, + { + "name": "LengthsIndicesInGradientSumGradient", + "support_level": "default" + }, + { + "name": "LengthsMax", + "description": "\nApplies 'Max' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Max' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMax computes the element-wise max of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMax* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the maximum value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [max([2,4]), max([3,1,2]), max([10])] = [4,3,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMax\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 4. 3. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsMaxWithMainInputAndForwardOutputGradient", + "support_level": "default" + }, + { + "name": "LengthsMean", + "description": "\nApplies 'Mean' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Mean' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMean* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the mean value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [mean([2,4]), mean([3,1,2]), mean([10])] = [3,2,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMean\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 3. 2. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsMeanGradient", + "support_level": "default" + }, + { + "name": "LengthsPad", + "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, pad each\nsegment in DATA with `value`, so that each segment's length is `target_length`.\nIf will throw, if there is segment of length larger than `target_length`.\n\nExample:\n DATA = [\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 1, 1]\n and target_length = 2, padding value = -1.0\n OUTPUT = [\n [-1.0, -1.0],\n [-1.0, -1.0],\n [2.3, 3.4],\n [-1.0, -1.0],\n [4.5, 5.7],\n [-1.0, -1.0],\n [6.8, 7.9],\n [-1.0, -1.0],\n ]\n", + "attributes": [ + { + "description": "The value to pad the data", + "name": "padding_value", + "option": "optional" + }, + { + "description": "The target length of each segment", + "name": "target_length", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Padded DATA tensor", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsPartition", + "description": "\nLengthsPartition splits the input int tensor into multiple ones according to the\nsecond tensor. The first dimension is expected to be the tensor that describes\nlengths of the elements.\n\nTakes the second input and partitions it to shards according to the remainder of\nvalues modulo the number of partitions. It requires the second tensor to be\na 1D-tensor of the integral type. The first tensor should be 1D-tensor of int32\nthat would represent the lengths of the elements in the input. The number of\npartitions is derived as (num_output / num_input).\n\nIf additional inputs are present they must have the same shape as the first\ninput, optionally with extra trailing dimensions. They will be partitioned\naccordingly to the first input.\n\nOptional arg 'pack_first_input' transforms the first tensor values as\nX_ij / num_partitions.\n\nOutputs are ordered as\nX_0_part_0, X_1_part_0, ..., X_N-1_part_0, X_0_part_1, ..., X_N-1_part_K-1\n", + "attributes": [ + { + "description": "(int, default 0) If set, the operator transforms the first tensor values as floor(X_ij / num_partitions)", + "name": "pack_first_input", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor containing data to be partitioned. The number of input tensors might be greater than 1 but must have the same shape as the previous tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output Partitions. The number of output tensors has to be a multiple of the number of input tensors.", + "name": "partitions" + } + ], + "support_level": "default" + }, + { + "name": "LengthsRangeFill", + "description": "\nThe *LengthsRangeFill* op takes a single input *lengths* and outputs a single tensor *range_sequence*. For each element of *lengths*, the op appends the range(0,lengths) vector to the end of *range_sequence*. For example, if input=[2,4,1], the output would be [0,1,0,1,2,3,0].\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsRangeFill\",\n [\"lengths\"],\n [\"range_sequence\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([2,4,1]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"range_sequence: \\n\", workspace.FetchBlob(\"range_sequence\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [2 4 1]\nrange_sequence:\n [0 1 0 1 2 3 0]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor whose size is the sum of *lengths*", + "name": "range_sequence" + } + ], + "support_level": "default" + }, + { + "name": "LengthsSplit", + "description": "\nGiven input vector LENGTHS, and input n_split, LengthsSplit returns\na single output vector. It \"splits\" each length into n_split values which add\nup to the original length. It will attempt to do equal splits, and if not possible,\nit orders larger values first. If the n_split is larger than the length, zero\npadding will be applied.\n\ne.g. LENGTHS = [9 4 5]\n n_split = 3\n Y = [3 3 3 2 1 1 2 2 1]\n\ne.g. LENGTHS = [2, 1, 2]\n n_split = 3\n Y = [1 1 0 1 0 0 1 1 0]\n", + "attributes": [ + { + "description": "Number of splits for each element in LENGTHS", + "name": "n_split", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Mx1 Input tensor denoting INT32 lengths", + "name": "LENGTHS" + }, + { + "description": "(Optional) Number of splits for each element in LENGTHS (overrides argument)", + "name": "n_split" + } + ], + "outputs": [ + { + "description": "(M*n_split)x1 Output vector denoting split lengths", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LengthsSum", + "description": "\nApplies 'Sum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Sum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n\n\nThe *LengthsSum* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the sum in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [sum([2,4]), sum([3,1,2]), sum([10])] = [6,6,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsSum\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 6. 6. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsSumGradient", + "support_level": "default" + }, + { + "name": "LengthsTile", + "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, duplicate each\nentry of the outer-most dimension of DATA according to LENGTHS, and concatenate\nthem in an output tensor of rank r.\n\nExample:\n DATA = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 3, 2]\n OUTPUT = [\n [2.3, 3.4],\n [4.5, 5.7],\n [4.5, 5.7],\n [4.5, 5.7],\n [6.8, 7.9],\n [6.8, 7.9],\n ]\n", + "inputs": [ + { + "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Tensor of rank r", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsToOffsets", + "description": "\nGiven a vector of segment lengths, returns a vector of offsets from these lengths,\nwhich will have the same size as the input vector. Output is going to have\nthe same type as input. For long tensors explicit casting from int32 to int64\nmight be necessary prior to this op.\n\nFor example, `[1, 3, 0, 2]` transforms into `[0, 1, 4, 4]`.\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor of the same shape and type as `lengths`", + "name": "offsets" + } + ], + "support_level": "default" + }, + { + "name": "LengthsTopK", + "description": "\nApply TopK to each segment of the input tensor, where segments are defined by\ntheir LENGTHS, and concatenate them in an output tensor of\nshape=(SIZE(LENGTHs), k). In case there's less than k values in a segment,\nthe output value will be padded by 0, and the corresponding output indices will\nbe padded by -1.\n", + "attributes": [ + { + "description": "the number of top values to return for each segment, if the number of values is smaller than k, the values would be padded with 0 and indices would be padded with -1.", + "name": "k", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of rank 1. First dimension must be equal to the sum of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Output top k elements for each segment, withshape=(SIZE(lengths), k)", + "name": "TopKValue" + }, + { + "description": "Output indices in DATA corresponding to value in TopKValue", + "name": "TopKIndices" + } + ], + "support_level": "default" + }, + { + "name": "LengthsTopKGradient", + "support_level": "default" + }, + { + "name": "LengthsToRanges", + "description": "\nGiven a vector of segment lengths, calculates offsets of each segment and packs\nthem next to the lengths. For the input vector of length N the output is a Nx2\nmatrix with (offset, lengths) packaged for each segment.\n\nFor example, `[1, 3, 0, 2]` transforms into `[[0, 1], [1, 3], [4, 0], [4, 2]]`.\n", + "inputs": [ + { + "description": "1D tensor of int32 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "2D tensor of shape len(lengths) X 2 and the same type as `lengths`", + "name": "ranges" + } + ], + "support_level": "default" + }, + { + "name": "LengthsToSegmentIds", + "description": "\nGiven a vector of segment lengths (*lengths*) the *LengthsToSegmentIds* op returns a zero-based, consecutive vector of segment ids (*segment_ids*). For example, *lengths=[1, 3, 0, 2]* will produce *segment_ids=[0, 1, 1, 1, 3, 3]*. In general, the inverse operation is *SegmentIdsToLengths*. Notice though that trailing empty sequence lengths can't be properly recovered from segment ids.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToSegmentIds\",\n [\"lengths\"],\n [\"segment_ids\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1, 3, 0, 2]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"segment_ids: \\n\", workspace.FetchBlob(\"segment_ids\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [1 3 0 2]\nsegment_ids:\n [0 1 1 1 3 3]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor of length *sum(lengths)*", + "name": "segment_ids" + } + ], + "support_level": "default" + }, + { + "name": "LengthsToShape", + "description": "\nThis operator takes a list of $N$ equal integers as input which represent the lengths of $N$ vectors. The output is the calculated shape of the matrix if the $N$ integers were combined into a single matrix.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToShape\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([2,2,2,2,2,2,2,2,2,2])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [2 2 2 2 2 2 2 2 2 2]\nY:\n [10 2]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "List, of length $N$, of equal integers representing the lengths of several vectors.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Vector of length 2 describing the dimensions of the data if the $N$ vectors from the input were combined to a single matrix.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LengthsToWeights", + "description": "\nSimilar as LengthsToSegmentIds but output vector of segment\nweights derived by lengths. i.e 1/pow(length, power)\n", + "attributes": [ + { + "description": "n of 1/pow(length,n) for normalization", + "name": "power", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of lengths", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1-D float tensor of weights by length", + "name": "a vector of weights" + } + ], + "support_level": "default" + }, + { + "name": "LengthsWeightedSum", + "description": "\nApplies 'WeightedSum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'WeightedSum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n\n\nThe *LengthsWeightedSum* op takes three inputs *DATA*, *LENGTHS*, and *SCALARS*, and produces a single output *OUTPUT*. The op finds the weighted sum in each of the segments of *DATA*, where segments are defined by their lengths. Before calculating the sums, the input *DATA* is weighted by the contents of *SCALARS*.\nFor example, if $DATA = [2,4,3,1,2,10]$, $SCALARS = [8, 2, 1, 4, 1, 0.6]$, and $LENGTHS = [2,3,1]$, then $OUTPUT = [sum([8*2,2*4]), sum([1*3,4*1,1*2]), sum([0.6*10])] = [24,9,6]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsWeightedSum\",\n [\"DATA\", \"SCALARS\",\"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"SCALARS\", np.array([8, 2, 1, 4, 1, 0.6]).astype(np.float32))\nprint(\"SCALARS:\\n\", workspace.FetchBlob(\"SCALARS\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nSCALARS:\n [8. 2. 1. 4. 1. 0.6]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [24. 9. 6.]\n\n```\n\n
\n\n\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "LengthsWeightedSumGradient", + "support_level": "default" + }, + { + "name": "LengthsWeightedSumWithMainInputGradient", + "support_level": "default" + }, + { + "name": "Load", + "description": "\nThe Load operator loads a set of serialized blobs from a db or multiple dbs. It\ntakes $[0, \\infty)$ number of inputs and $[0, \\infty)$ number of outputs, using\nthe db keys to match the db entries with the outputs.\n\nIf at least one input is passed, then it is assumed that that input blobs are a\nset of DBReaders to load from. Otherwise the `db` or `dbs` argument is used to load\nblobs from one single db or multiple dbs respectively. `db_type` argument is used\nto specify the type of the input db/dbs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Load\",\n [],\n [\"X\", \"Y\"],\n db=\"test_db\",\n db_type=\"lmdb\"\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "default": "", + "description": "Blobs will be prefixed with this when loading. Useful for avoiding collisions with blobs existing in the workspace. The output blob names specified to this op should include this prefix.", + "name": "add_prefix", + "option": "optional", + "type": "string" + }, + { + "default": "", + "description": "Characters in the provided blob names that match `strip_prefix` will be removed prior to saving. Also, characters that precede `strip_prefix` will be removed. Useful for removing device scope from blob names.", + "name": "strip_prefix", + "option": "optional", + "type": "string" + }, + { + "description": "The output path of the db. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db", + "option": "optional", + "type": "string" + }, + { + "description": "List of paths to dbs to load blobs from. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "dbs", + "option": "optional", + "type": "string[]" + }, + { + "description": "(type: string)* Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional" + }, + { + "default": 0, + "description": "If nonzero, the blobs are loaded into the device that is specified in the serialized `BlobProto`. Otherwise, the device will be set as the one that the `Load` operator is being run under.", + "name": "keep_device", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "If nonzero, will load all blobs pointed to by the db to the workspace overwriting/creating blobs as needed.", + "name": "load_all", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "If True, will allow not loading all the output blobs specified in the outputs.", + "name": "allow_incomplete", + "option": "optional", + "type": "boolean" + }, + { + "description": "If set, used instead of output blob names to specify which blobs in the db shall be loaded. Must be the same length as number of output blobs.", + "name": "source_blob_names", + "option": "optional", + "type": "string[]" + } + ], + "inputs": [ + { + "description": "*(type: List(DBReader))* [OPTIONAL] List of DBReaders to load from. Can use this instead of the `db`/`dbs` args.", + "name": "X, Y, ..." + } + ], + "support_level": "default" + }, + { + "name": "Log", + "description": "\nCalculates the natural log of the given input tensor ($ln(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/log_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Log\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.07341351 0.15404125 0.386613 ]\n [0.34090295 0.99727786 0.24141751]\n [0.32016268 0.8724168 0.93515724]]\nX after running op:\n[[-2.6116474 -1.8705349 -0.9503311 ]\n [-1.0761575 -0.00272586 -1.4212275 ]\n [-1.138926 -0.13648799 -0.06704059]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor computed as the natural log of the input tensor computed, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LogFatal", + "support_level": "default" + }, + { + "name": "Logit", + "description": "\nElementwise logit transform: logit(x) = log(x / (1 - x)), where x is the\ninput data clampped in (eps, 1-eps).\n", + "attributes": [ + { + "description": "small positive epsilon value, the default is 1e-6.", + "name": "eps (optional)", + "option": "optional" + } + ], + "inputs": [ + { + "description": "input float tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "output float tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LogitGradient", + "attributes": [ + { + "description": "small positive epsilon value, the default is 1e-6.", + "name": "eps", + "option": "optional" + } + ], + "inputs": [ + { + "description": "input float tensor", + "name": "X" + }, + { + "description": "input float tensor", + "name": "dY" + } + ], + "outputs": [ + { + "description": "output float tensor", + "name": "dX" + } + ], + "support_level": "default" + }, + { + "name": "LongIndexCreate", + "description": "\nCreates a dictionary that maps int64 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handler" + } + ], + "support_level": "default" + }, + { + "name": "LpNorm", + "description": "\nThis op computes the $L_p$ norm of the one dimensional input tensor $X$, and outputs a one dimensional output tensor $Y$. Here, the $L_p$ norm is calculated as\n\n$$L_p(\\mathbf{x}) = \\sum_i x_i^p$$\n\nThis op supports $p$ values of 1 or 2. If the average argument is set, the norm is calculated as Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x).\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpNorm\",\n [\"X\"],\n [\"Y\"],\n p=2\n)\nX = np.array([5., 2.])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [5. 2.]\nY:\n [29.]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 2, + "description": "Order of the norm in p-norm.", + "name": "p", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "Whether we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x)", + "name": "average", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "1D Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "LpNormGradient", + "description": "\nGiven one input float tensor X, derivative dout, and produces one output\nfloat tensor dX. dX is the derivative of the Lp norm of tensor X, computed as\ndx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only\nsupports l1 and l2 norm) determined by the argument p.\n", + "attributes": [ + { + "description": "Order of the norm in p-norm", + "name": "p", + "option": "optional" + }, + { + "description": "whehther we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined asLp_averaged_normgradient(x) = LpNormGradient(x) / size(x)", + "name": "average", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + }, + { + "description": "1D input tensor", + "name": "dout" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "dx" + } + ], + "support_level": "default" + }, + { + "name": "LpPool", + "description": "\n`LpPool` consumes an input blob and applies max pooling across the the blob according to kernel sizes, stride sizes, pad lengths and dilation. $L_p$ pooling consists of taking the $L_p$ norm of a subset of the input tensor according to the kernel size and downsampling the data into the output blob for further processing.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the output blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lp_pool_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n p=2.0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[[-1.1113514 -1.1173418 -0.1504435 0.1327146 -1.2221841 -0.5654315 ]\n [-1.9209646 -0.04675794 0.8604731 1.2042469 0.28154245 0.38656202]\n [-0.8772837 -0.03264008 0.26222762 0.28526652 0.321102 -2.5891325 ]\n [-0.9248281 1.440776 -0.56832 -0.6017927 1.2262512 -2.1443934 ]\n [ 0.5194415 -1.6858683 0.45221648 0.65029615 -0.8574544 0.8121054 ]\n [ 0.25902653 0.4934758 0.49870652 -0.48134378 -0.9178449 -0.07626943]]]]\n\nY:\n [[[[2.4851248 1.49361 1.4290358]\n [1.9240153 0.9139378 3.5928857]\n [1.8500228 1.0525136 1.4976646]]]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*float*): type of $L_p$ norm to use (default=2.0)", + "name": "p", + "option": "optional" + }, + { + "description": "(*int*): the size of the window to take a max over", + "name": "kernel", + "option": "optional" + }, + { + "description": "(*int*): the stride of the window", + "name": "stride", + "option": "optional" + }, + { + "description": "(*int*): implicit zero padding to be added on both sides", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): parameter that controls the stride of elements in the window", + "name": "dilation", + "option": "optional" + }, + { + "description": "(*string*): order of blob dimensions (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "LpPoolGradient", + "support_level": "default" + }, + { + "name": "LRN", + "category": "Normalization", + "description": "\n\n`LRN` applies Local Response Normalization to an input blob. This operation performs\na kind of \"lateral inhibition\" by normalizing over local input regions, where\nnormalization is applied across channels. This operator is typically used to\nnormalize an unbounded activation (such as ReLU). The output shape is the same as\nthe input shape. The `brew` module has a wrapper for this operator for use in a\n`ModelHelper` object.\n\nThe formula for LRN is as follows:\n\n$$b_{c} = a_{c}(bias + \\frac{\\alpha}{n}\\sum_{c'=max(0,c-n/2)}^{min(N-1,c+n/2)} a_{c'}^2 )^{-\\beta}$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\"LRN\",\n [\"X\"],\n [\"Y\", \"Y_scale\"],\n size=11,\n alpha=0.001,\n beta=0.5,\n bias=2.0,\n order=\"NHWC\"\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 6, 6, 1).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nprint(\"Y_scale:\\n\", workspace.FetchBlob(\"Y_scale\"))\n```\n\n**Result**\n\n```\nX:\n [[[[ 0.72985137]\n [-0.3753357 ]\n [ 2.7344604 ]\n [-0.5937792 ]\n [ 0.38440478]\n [-2.1659644 ]]\n\n [[-0.92846817]\n [-0.9996144 ]\n [ 0.212943 ]\n [-1.968045 ]\n [-0.77839696]\n [ 0.45492038]]\n\n [[-0.11263168]\n [ 1.9901097 ]\n [ 0.19275683]\n [ 0.15630436]\n [ 0.7536298 ]\n [-0.77339894]]\n\n [[ 0.8353551 ]\n [-0.7784452 ]\n [ 1.779317 ]\n [ 0.22421335]\n [ 1.3846219 ]\n [-3.0546608 ]]\n\n [[ 0.09977621]\n [ 2.2071757 ]\n [ 0.79971045]\n [ 3.563886 ]\n [-0.7169287 ]\n [ 0.77170426]]\n\n [[-1.4296649 ]\n [ 0.19181213]\n [ 0.45961624]\n [-1.0201577 ]\n [ 0.62854475]\n [-0.6395456 ]]]]\n\nY:\n [[[[ 0.5160766 ]\n [-0.26540157]\n [ 1.9332271 ]\n [-0.41986194]\n [ 0.27181432]\n [-1.5314047 ]]\n\n [[-0.6565133 ]\n [-0.7068181 ]\n [ 0.15057328]\n [-1.3914955 ]\n [-0.5504022 ]\n [ 0.32167578]]\n\n [[-0.0796426 ]\n [ 1.4070934 ]\n [ 0.13629955]\n [ 0.11052381]\n [ 0.53288984]\n [-0.5468682 ]]\n\n [[ 0.5906759 ]\n [-0.5504363 ]\n [ 1.2580767 ]\n [ 0.1585426 ]\n [ 0.9790328 ]\n [-2.1595135 ]]\n\n [[ 0.07055242]\n [ 1.5605361 ]\n [ 0.5654725 ]\n [ 2.5193207 ]\n [-0.50693923]\n [ 0.54567 ]]\n\n [[-1.0108787 ]\n [ 0.13563155]\n [ 0.3249962 ]\n [-0.72134334]\n [ 0.44444424]\n [-0.45222285]]]]\nY_scale:\n [[[[2.0000484]\n [2.0000129]\n [2.0006797]\n [2.000032 ]\n [2.0000134]\n [2.0004265]]\n\n [[2.0000784]\n [2.0000908]\n [2.000004 ]\n [2.0003521]\n [2.000055 ]\n [2.0000188]]\n\n [[2.0000012]\n [2.00036 ]\n [2.0000033]\n [2.0000021]\n [2.0000517]\n [2.0000544]]\n\n [[2.0000634]\n [2.000055 ]\n [2.0002878]\n [2.0000045]\n [2.0001743]\n [2.0008483]]\n\n [[2.000001 ]\n [2.000443 ]\n [2.0000582]\n [2.0011547]\n [2.0000467]\n [2.0000541]]\n\n [[2.0001857]\n [2.0000033]\n [2.0000193]\n [2.0000947]\n [2.000036 ]\n [2.0000372]]]]\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Amount of neighboring channels to sum over for normalization", + "name": "size", + "option": "optional", + "type": "int64" + }, + { + "default": 0.0, + "description": "Multiplicative (scaling) factor.", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": 0.0, + "description": "Exponent.", + "name": "beta", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0, + "description": "Additive factor.", + "name": "bias", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "Order of blob dimensions.", + "name": "order", + "option": "optional", + "type": "float32" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor (ReLU output).", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + }, + { + "description": "*(type: Tensor``)* Output scale.", + "name": "Y_scale" + } + ], + "support_level": "default" + }, + { + "name": "LRNGradient", + "support_level": "default" + }, + { + "name": "LSTMUnit", + "description": "\nLSTMUnit computes the activations of a standard LSTM (without peephole\nconnections), in a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous cell\nstate (NxD), and the sequence lengths (N), computes the LSTM\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X{t][n] >= seqLengths[n].\n\n", + "attributes": [ + { + "description": "Bias term to add in while calculating forget gate", + "name": "forget_bias", + "option": "optional" + }, + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "LSTMUnitGradient", + "attributes": [ + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "LT", + "description": "\nPerforms element-wise less than comparison **<** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False False True False False True]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "MakeTwoClass", + "description": "\nGiven a vector of probabilities, this operator transforms this into a 2-column\n matrix with complimentary probabilities for binary classification. In explicit\n terms, given the vector X, the output Y is vstack(1 - X, X).\n ", + "inputs": [ + { + "description": "Input vector of probabilities", + "name": "X" + } + ], + "outputs": [ + { + "description": "2-column matrix with complimentary probabilities of X for binary classification", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MakeTwoClassGradient", + "support_level": "default" + }, + { + "name": "MapToKeyValue", + "description": "Convert a map blob into key and value blob pairs", + "inputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "outputs": [ + { + "description": "Blob reference to the key", + "name": "key blob" + }, + { + "description": "Blob reference to the value", + "name": "value blob" + } + ], + "support_level": "default" + }, + { + "name": "MarginRankingCriterion", + "description": "\nMarginRankingCriterion takes two input data X1 (Tensor),\nX2 (Tensor), and label Y (Tensor) to produce the\nloss (Tensor) where the loss function,\nloss(X1, X2, Y) = max(0, -Y * (X1 - X2) + margin), is applied to\nthe tensor elementwise.\n\nIf y == 1 then it assumed the first input should be ranked higher\n(have a larger value) than the second input, and vice-versa for\ny == -1.\n", + "attributes": [ + { + "description": "The margin value as a float. Default is 1.0.", + "name": "margin", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The left input vector as a 1-dim TensorCPU.", + "name": "X1" + }, + { + "description": "The right input vector as a 1-dim TensorCPU.", + "name": "X2" + }, + { + "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.", + "name": "Y" + } + ], + "outputs": [ + { + "description": "The output loss with the same dimensionality as X1.", + "name": "loss" + } + ], + "support_level": "default" + }, + { + "name": "MarginRankingCriterionGradient", + "description": "\nMarginRankingCriterionGradient takes both X1, X2, Y and dY and\nuses them to update dX1, and dX2 according to the chain rule\nand derivatives of the loss function.\n", + "support_level": "default" + }, + { + "name": "MatMul", + "description": "\nMatrix multiplication $Y = A * B$, where `A` has size (M x K), `B` has size\n(K x N), and `Y` will have a size (M x N). To transpose `A` or `B` before\nmultiplication, pass 1 to the `trans_a` and/or `trans_b` arguments, which\nseparate the first and second dimensions of the respective matrices using\n`axis_a` and `axis_b`.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/matmul_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MatMul\",\n [\"A\", \"B\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nA: [[1. 8. 3.]\n [6. 4. 4.]\n [5. 4. 7.]]\nB: [[4. 0. 3.]\n [3. 1. 1.]\n [8. 5. 8.]]\nY: [[52. 23. 35.]\n [68. 24. 54.]\n [88. 39. 75.]]\n```\n\n
\n\n", + "attributes": [ + { + "default": 1, + "description": "Exclusive axis that divides the first and second dimension of matrix `A`.", + "name": "axis_a", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Exclusive axis that divides the first and second dimension of matrix `B`.", + "name": "axis_b", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "Pass 1 to transpose `A` before multiplication and after the dimension adjustment using `axis_a`.", + "name": "trans_a", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "Pass 1 to transpose `B` before multiplication and after the dimension adjustment using `axis_b`.", + "name": "trans_b", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* 2D matrix of size (M x K).", + "name": "A" + }, + { + "description": "*(type: Tensor``)* 2D matrix of size (K x N).", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 2D matrix of size (M x N).", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Max", + "description": "\nElement-wise max of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/minmax_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Max\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Max:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.4496477 0.07061381 0.7139333 ]\n [0.83203 0.05970785 0.72786295]\n [0.75988126 0.04601283 0.32820013]]\nY:\n[[0.05683139 0.16872478 0.671098 ]\n [0.70739156 0.09878621 0.03416285]\n [0.34087983 0.94986707 0.67263436]]\nZ:\n[[0.48051122 0.07141234 0.85264146]\n [0.77086854 0.22082241 0.13154659]\n [0.42401117 0.995431 0.4263775 ]]\nMax:\n[[0.48051122 0.16872478 0.85264146]\n [0.83203 0.22082241 0.72786295]\n [0.75988126 0.995431 0.67263436]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as input(s).Contains the maximum valued element at each location.", + "name": "M" + } + ], + "support_level": "default" + }, + { + "name": "MaxGradient", + "support_level": "default" + }, + { + "name": "MaxPool", + "category": "Pool", + "description": "MaxPool \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "name": "pad" + }, + { + "name": "cudnn_exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MaxPool1D", + "description": "MaxPool1D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MaxPool1DGradient", + "support_level": "default" + }, + { + "name": "MaxPool2D", + "description": "MaxPool2D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MaxPool2DGradient", + "support_level": "default" + }, + { + "name": "MaxPool3D", + "description": "MaxPool3D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MaxPool3DGradient", + "support_level": "default" + }, + { + "name": "MaxPoolGradient", + "support_level": "default" + }, + { + "name": "Mean", + "description": "\nElement-wise mean of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/mean_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mean\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Mean:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.6035237 0.5305746 0.6298913 ]\n [0.9169737 0.01280353 0.16286302]\n [0.6017664 0.9946255 0.05128575]]\nY:\n[[0.07544111 0.45371833 0.08460239]\n [0.9708728 0.7422064 0.7933344 ]\n [0.97671497 0.3411384 0.73818344]]\nZ:\n[[0.08837954 0.90187573 0.46734726]\n [0.6308827 0.8719029 0.39888734]\n [0.90059936 0.92883426 0.5695987 ]]\nMean:\n[[0.25578147 0.6287229 0.39394698]\n [0.8395764 0.5423043 0.45169494]\n [0.8263602 0.75486606 0.45302266]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with the same dimensions as inputs. Contains the mean values of the input tensors calculated element-wise.", + "name": "M" + } + ], + "support_level": "default" + }, + { + "name": "MeanGradient", + "support_level": "default" + }, + { + "name": "MergeDenseFeatureTensors", + "description": "Merge given multi-feature dense tensors into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "inputs": [ + { + "description": "", + "name": "in1" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values", + "name": "out_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeDim", + "description": "\nMerge first two dimensions in a single dimension with size dim(0) * dim(1).\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor.", + "name": "reshaped" + } + ], + "support_level": "default" + }, + { + "name": "MergeIdLists", + "description": "\nMergeIdLists: Merge multiple ID_LISTs into a single ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nGiven multiple inputs of the form lengths_0, values_0, lengths_1, values_1, ...\nwhich correspond to lengths and values of ID_LISTs of different features, this\noperator produces a merged ID_LIST that combines the ID_LIST features. The\nfinal merged output is described by a lengths and values vector.\n\nWARNING: The merge makes no guarantee about the relative order of ID_LISTs\nwithin a batch. This can be an issue if ID_LIST are order sensitive.\n", + "inputs": [ + { + "description": "Lengths of the ID_LISTs batch for first feature", + "name": "lengths_0" + }, + { + "description": "Values of the ID_LISTs batch for first feature", + "name": "values_0" + } + ], + "outputs": [ + { + "description": "Lengths of the merged ID_LISTs batch", + "name": "merged_lengths" + }, + { + "description": "Values of the merged ID_LISTs batch", + "name": "merged_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiListFeatureTensors", + "description": "Merge given multi-feature tensors with list features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values", + "name": "in1_values_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiListFeatureTensorsGradient", + "description": "Explode given multi-feature tensors with list features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values.values_grad", + "name": "in1_values_values_grad" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiMapFeatureTensors", + "description": "Merge given multi-feature tensors with map features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.keys", + "name": "in1_values_keys" + }, + { + "description": ".values.values", + "name": "in1_values_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values_lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.keys", + "name": "out_values_keys" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiMapFeatureTensorsGradient", + "description": "Explode given multi-feature tensors with map features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values.values_grad", + "name": "in1_values_values_grad" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiScalarFeatureTensors", + "description": "Merge given multi-feature tensors with scalar features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values", + "name": "in1_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values", + "name": "out_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeMultiScalarFeatureTensorsGradient", + "description": "Explode given multi-feature tensors with scalar features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values_grad", + "name": "out_values_grad" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "in1_values_grad" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleListFeatureTensors", + "description": "Merge given single-feature tensors with list features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values", + "name": "in1_values" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleListFeatureTensorsGradient", + "description": "Explode multi-feature tensors with list features into single-feature tensors.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values.values_grad", + "name": "out_values_values" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "out1_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleMapFeatureTensors", + "description": "Merge given single-feature tensors with map features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values", + "name": "in1_values" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.keys", + "name": "out_values_keys" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleMapFeatureTensorsGradient", + "description": "Explode given multi-feature tensors with map features into multiple single-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "in1_values_grad" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleScalarFeatureTensors", + "description": "Merge given single-feature tensors with scalar features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "inputs": [ + { + "description": "", + "name": "in1" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values", + "name": "out_values" + } + ], + "support_level": "default" + }, + { + "name": "MergeSingleScalarFeatureTensorsGradient", + "description": "Explode multi-feature tensor of scalar features into one or moresingle-feature tensors\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values_grad", + "name": ".values_grad" + } + ], + "outputs": [ + { + "description": "_grad of inputs", + "name": "in1_grad" + } + ], + "support_level": "default" + }, + { + "name": "Min", + "description": "\nElement-wise min of an arbitrary number of input tensors. This operation can be performed in-place, by using the first input blob as the output blob. All inputs must have the same shape and data type, and the output will have the same shape as the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/minmax_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Min\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(2,2)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(2,2)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(2,2)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Min:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.32731926 0.4939747 ]\n [0.29242373 0.43460014]]\nY:\n[[0.40928316 0.916115 ]\n [0.77526504 0.29339448]]\nZ:\n[[0.7899794 0.90335774]\n [0.82599413 0.2843068 ]]\nMin:\n[[0.32731926 0.4939747 ]\n [0.29242373 0.2843068 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as input(s).Contains the minimum valued element at each location.", + "name": "M" + } + ], + "support_level": "default" + }, + { + "name": "MinGradient", + "support_level": "default" + }, + { + "name": "Mish", + "description": "\nMish takes one input data (Tensor) and produces one output data\n(Tensor) where the Mish function, y = x * tanh(ln(1 + exp(x))), is applied to the\ntensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "MishGradient", + "description": "\nMishGradient takes X, Y and dY and uses this to update dX according to the\nchain rule and derivatives of the Mish function.\n", + "support_level": "default" + }, + { + "name": "Mod", + "description": "\nElement-wise modulo operation. Each element in the output is the modulo result\nof the corresponding element in the input data. The divisor of the modulo is\nprovided by the `divisor` argument.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/mod_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mod\",\n [\"X\"],\n [\"Y\"],\n divisor=10\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(5,5))))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[56 22 43 13 60]\n [ 4 55 58 10 45]\n [64 66 4 3 66]\n [10 36 47 52 78]\n [91 4 36 47 95]]\nX after running op:\n[[6 2 3 3 0]\n [4 5 8 0 5]\n [4 6 4 3 6]\n [0 6 7 2 8]\n [1 4 6 7 5]]\n\n ```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Divisor of the modulo operation (must be >= 1).", + "name": "divisor", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "If true, sign of output matches divisor, else if false, sign follows dividend.", + "name": "sign_follow_divisor", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor with int32 or int64 data.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of data with modulo operation applied.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Moments", + "description": "\n Computes the mean and variance of the input tensor's element along the\n provided axes. The resulted tensor has the same rank as the input if keepdims\n equals True.\n If keepdims equals False, then the resulted tensor have the reduced dimension\n pruned.\n", + "attributes": [ + { + "description": "A list of integers, along which to reduce. If axes is not provided, the op computes the element-wise mean and variance.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced mean tensor.", + "name": "mean" + }, + { + "description": "Reduced variance tensor.", + "name": "variance" + } + ], + "support_level": "default" + }, + { + "name": "MomentsGradient", + "support_level": "default" + }, + { + "name": "MomentumSGD", + "description": "\n\nComputes a momentum SGD update for an input gradient and momentum\nparameters. Concretely, given inputs (grad, m, lr) and parameters\n(momentum, nesterov), computes:\n\n if not nesterov:\n adjusted_gradient = lr * grad + momentum * m\n return (adjusted_gradient, adjusted_gradient)\n else:\n m_new = momentum * m + lr * grad\n return ((1 + momentum) * m_new - momentum * m, m_new)\n\nOutput is (grad, momentum)\n\nNote the difference to MomemtumSGDUpdate, which actually performs the\nparameter update (and is thus faster).\n", + "support_level": "default" + }, + { + "name": "MomentumSGDUpdate", + "description": "\n\nPerforms a momentum SGD update for an input gradient and momentum\nparameters. Concretely, given inputs (grad, m, lr, param) and arguments\n(momentum, nesterov), computes:\n\n if not nesterov:\n adjusted_gradient = lr * grad + momentum * m\n param = param - adjusted_gradient\n return (adjusted_gradient, adjusted_gradient, param)\n else:\n m_new = momentum * m + lr * grad\n param = param - ((1 + momentum) * m_new - momentum * m),\n return ((1 + momentum) * m_new - momentum * m, m_new, param)\n\nOutput is (grad, momentum, parameter).\n\nNote the difference to MomentumSGD, which returns a new gradient\nbut does not perform the parameter update.\n\n", + "support_level": "default" + }, + { + "name": "MSRAFill", + "support_level": "default" + }, + { + "name": "Mul", + "description": "\nPerforms element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mul\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[1 2]\n [3 4]]\nB:\n[[5 6]\n [7 8]]\nC:\n[[ 5 12]\n [21 32]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "MulGradient", + "support_level": "default" + }, + { + "name": "MultiClassAccuracy", + "description": "\nRespectively compute accuracy score for each class given a number of instances\nand predicted scores of each class for each instance.\n", + "inputs": [ + { + "description": "2-D float tensor (N,D,) of predicted scores of each class for each data. N is the number of instances, i.e., batch size. D is number of possible classes/labels.", + "name": "prediction" + }, + { + "description": "1-D int tensor (N,) of labels for each instance.", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D float tensor (D,) of accuracy for each class. If a class has no instance in the batch, its accuracy score is set to zero.", + "name": "accuracies" + }, + { + "description": "1-D int tensor (D,) of number of instances for each class in the batch.", + "name": "amounts" + } + ], + "support_level": "default" + }, + { + "name": "NanCheck", + "description": "Identity operator, but checks all values for nan or inf", + "inputs": [ + { + "description": "Tensor to check for nan/inf", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Tensor to copy input into if no NaNs or inf. Can be in-place", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "NCHW2NHWC", + "description": "\nThe operator switches the order of data in a tensor from NCHW- sample index N,\nchannels C, height H and width W, to the NHWC order (this is for 2D images).\nIn general, this operator switches the order of data in a tensor from N C H_1\n... H_k to N H_1 ... H_k C for k-dimensional features, and currently supports\nk=1, 2, and 3.\n", + "inputs": [ + { + "description": "The input data (Tensor) in the NCHW order.", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor (Tensor) in the NHWC order.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "NE", + "description": "\nPerforms element-wise not equal to comparison **!=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"NE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False True True False False True]\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "NegateGradient", + "description": "\nNegagteGradient operator in forward pass simply copies input to the\noutput, and in backward pass, flips the sign of the output gradient\n", + "support_level": "default" + }, + { + "name": "Negative", + "description": "\nComputes the element-wise negative of the input.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/negative_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Negative\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3).astype(np.float32)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[0.83296907 0.61407167 0.32562155]\n [0.59304523 0.03111175 0.29365504]\n [0.09478621 0.5424558 0.73940724]]\nY: [[-0.83296907 -0.61407167 -0.32562155]\n [-0.59304523 -0.03111175 -0.29365504]\n [-0.09478621 -0.5424558 -0.73940724]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* 1D input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 1D output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "NGramFromCategorical", + "support_level": "default" + }, + { + "name": "NHWC2NCHW", + "description": "\nThe operator switches the order of data in a tensor from NHWC- sample index N,\nheight H, width H and channels C, to the NCHW order (this is for 2D images).\nIn general, this operator switches the order of data in a tensor from N H_1 ...\nH_k C to N C H_1 ... H_k for k-dimensional features, and currently supports\nk=1, 2, and 3.\n", + "inputs": [ + { + "description": "The input data (Tensor) in the NHWC order.", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor (Tensor) in the NCHW order.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Normalize", + "description": "\nGiven a matrix, apply L2-normalization along the specified dimension.\n", + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "NormalizeGradient", + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "NormalizeL1", + "description": "\nGiven a matrix, apply L1-normalization along the specified axis.\n", + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "NormalizePlanarYUV", + "support_level": "default" + }, + { + "name": "Not", + "description": "\nPerforms element-wise negation on input tensor `X`.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n\"Not\",\n[\"X\"],\n[\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3, 3) > 0.5))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[ True False False]\n[False False False]\n[ True True True]]\nY:\n[[False True True]\n[ True True True]\n[False False False]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(Tensor``)* Negated output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "NumpyTile", + "inputs": [ + { + "description": "The input tensor.", + "name": "data" + }, + { + "description": "1-D Tensor specifying how many times to repeat each axis.", + "name": "repeats" + } + ], + "outputs": [ + { + "description": "Tensor that will contain input replicated along the given axis.", + "name": "tiled_data" + } + ], + "support_level": "default" + }, + { + "name": "OneHot", + "description": "\nThe *OneHot* op accepts two inputs *indices* and *index_size_tensor*, and produces a single output *one_hots*. For each index in *indices* the op creates a one-hot row in *one_hots* of length *index_size_tensor* where all entries are zero except the entry at the index is 1. The size of *one_hots* is *len(indices)* x *index_size_tensor*.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/one_hot_ops.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/one_hot_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"OneHot\",\n [\"indices\", \"index_size_tensor\"],\n [\"one_hots\"],\n)\n\nworkspace.FeedBlob(\"indices\", np.array([0,1,2,3,4]).astype(np.long))\nprint(\"indices:\\n\", workspace.FetchBlob(\"indices\"))\n\nworkspace.FeedBlob(\"index_size_tensor\", np.array([5]).astype(np.long))\nprint(\"index_size_tensor:\\n\", workspace.FetchBlob(\"index_size_tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"one_hots: \\n\", workspace.FetchBlob(\"one_hots\"))\n\n```\n\n**Result**\n\n```\n\nindices:\n [0 1 2 3 4]\nindex_size_tensor:\n [5]\none_hots:\n [[1. 0. 0. 0. 0.]\n [0. 1. 0. 0. 0.]\n [0. 0. 1. 0. 0.]\n [0. 0. 0. 1. 0.]\n [0. 0. 0. 0. 1.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "The active index for each example in the batch.", + "name": "indices" + }, + { + "description": "Scalar with the size of the index. Must be in CPU context", + "name": "index_size_tensor" + } + ], + "outputs": [ + { + "description": "Matrix of size len(indices) x index_size", + "name": "one_hots" + } + ], + "support_level": "default" + }, + { + "name": "Onnxifi", + "description": "\n The Onnxifi operator is a black-box operator to lower the computation to Onnxifi backend\n ", + "attributes": [ + { + "description": "(string default=\"\") Serialized ONNX model to be converted to backend representation", + "name": "onnx_model", + "option": "optional" + }, + { + "description": "Initialization pair indicating the mapping of the name between NetDef and ONNX model", + "name": "initializers", + "option": "optional" + }, + { + "description": "A list of key/value pairs indicating which input index to look up for real batch size for the given max output batch size", + "name": "output_resize_hints", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "ONNXWhile", + "description": "\n*** EXPERIMENTAL. This operator is a work-in-progress. No assumption should be\nmade about the stability or correctness of this op. ***\n\nGeneric Looping construct confirming to the ONNX Loop operator spec. This loop\nhas multiple termination conditions:\n\n1. Trip count. Iteration count specified at runtime. Set by specifying the\n input M. Optional. Set to empty string to omit. Note that a static trip\n count (specified at graph construction time) can be specified by passing\n in a constant node for input M.\n2. Loop termination condition. This is an input to the op that determines\n whether to run the first interation and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition\n variable, whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\nOperator inputs defined as (max_trip_count, condition_var). Omitted optional\ninputs are represented as empty string. Concretely, in this caffe2 op an input\nis marked as omitted by setting its 'has_{name}' argument to False.\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n ", + "attributes": [ + { + "description": "Net executed on each iteration", + "name": "body", + "option": "optional" + }, + { + "description": "Whether to use the trip count input", + "name": "has_trip_count", + "option": "optional" + }, + { + "description": "Whether to use the condition input", + "name": "has_cond", + "option": "optional" + }, + { + "description": "Whether to save the scopes across iterations, as in for backprop", + "name": "save_scopes", + "option": "optional" + }, + { + "description": "Do not create new scopes. Use this only if you're certain there will be no name collision, for example if you're converting from a fully-SSA IR", + "name": "disable_scopes", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Number of iterations to go out to. Used if the flag has_trip_count is True.", + "name": "max_trip_count" + }, + { + "name": "condition" + }, + { + "name": "initial", + "option": "variadic" + }, + { + "description": "Dynamic condition value for the first iteration. For all subsequent iterations, the condition from the body graph is used. This input is used if the flag has_cond is true.", + "name": "first_iter_condition" + } + ], + "outputs": [ + { + "name": "final_and_scan_outputs", + "option": "variadic" + } + ], + "support_level": "default" + }, + { + "name": "Or", + "description": "\nPerforms element-wise logical operation **or** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Or\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[False True True]\n [False True True]\n [ True True True]]\nB:\n[[False True False]\n [ True True True]\n [False True False]]\nC:\n[[False True True]\n [ True True True]\n [ True True True]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "PackedInt8BGRANHWCToNCHWCStylizerPreprocess", + "support_level": "default" + }, + { + "name": "PackRecords", + "description": "\nGiven a dataset under a schema specified by the `fields` argument, pack all\nthe input tensors into one, where each tensor element represents a row of data\n(batch of size 1). This format allows easier use with the rest of Caffe2\noperators.\n", + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "outputs": [ + { + "description": "One dimensional tensor having a complex type of SharedTensorVectorPtr. In order to reverse it back to the original input it has to be inserted into UnPackRecordsOp.", + "name": "tensor" + } + ], + "support_level": "default" + }, + { + "name": "PackRNNSequence", + "description": "\nPack values based on the length blob. Each number from length blob represents\nthe corresponding values that need to be packed. The dimension for each pack\nis the same as the maximum number from the length blob (padding with zero is\nimplemented for smaller length value). The overall output dimension is:\nT * N * D, where T is the max number of lengths, N is the size of lengths,\nand D is the dimension of each feature value. The following example shows\nthe input and output of this operator:\n\n\nGiven:\n values = [v1, v2, v3, v4, v5, v6, v7, v8]\n lengths = [2, 3, 1, 2];\n\n\nOutput:\n output = [\n [v1, v3, v6, v7],\n [v2, v4, 0, v8],\n [0, v5, 0, 0 ],\n ]\n\n\nOne application for this operator is the transfer data into the format that is\nused for RNN models. Note that the gradient operator of PackRNNSequence is\nUnpackRNNSequence.\n", + "inputs": [ + { + "description": "Data tensor, contains a sequence of features", + "name": "values" + }, + { + "description": "lengths with each number representing the pack size.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor after packing", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "PackSegments", + "description": "Map N dim tensor to N+1 dim based on length blob. Sequences that are shorter than the longest sequence are padded with zeros.", + "attributes": [ + { + "description": "The pre-defined max_length for the packed segments", + "name": "max_length", + "option": "optional" + }, + { + "description": "Padding number in the packed segments. Use true to pad -infinity, otherwise pad zeros", + "name": "pad_minf", + "option": "optional" + }, + { + "description": "bool whether to return presence mask, false by default", + "name": "return_presence_mask", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1-d int/long tensor contains the length in each of the output.", + "name": "lengths" + }, + { + "description": "N dim Tensor.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "N + 1 dim Tensorwhere dim(1) is the max length, dim(0) is the batch size.", + "name": "packed_tensor" + }, + { + "description": "2 dim boolean tensor, false where packed_tensor is padded, true otherwise.", + "name": "presence_mask" + } + ], + "support_level": "default" + }, + { + "name": "PadEmptySamples", + "description": "\nPad empty field given lengths and index features,\n\nInput(0) is a blob pointing to the lengths of samples in one batch,\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the features.\n\nPadEmptySamples is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Tensor containing lengths with empty sample padded.", + "name": "out_lengths" + } + ], + "support_level": "default" + }, + { + "name": "PadImage", + "description": "\nPadImage pads values around the boundary of an image according to the pad\nvalues and stride sizes defined by the ConvPoolOpBase operator.\n ", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case. ", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from padding the H and W dimensions on the tensor. Dimensions will vary based on various pad and stride sizes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "PadImageGradient", + "support_level": "default" + }, + { + "name": "PairWiseLoss", + "description": "\nOperator computes the pair wise loss between all pairs within a batch\n using the logit loss function on the difference in scores between pairs\n", + "inputs": [ + { + "description": "Input blob from the previous layer, which is almost always the result of a softmax operation; X is a 2D array of size N x 1where N is the batch size. For more info: D. Sculley, Large Scale Learning to Rank. https://www.eecs.tufts.edu/~dsculley/papers/large-scale-rank.pdf", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input", + "name": "label" + }, + { + "description": "Optional input blob that contains the lengthsof multiple sessions. The summation of this blob must be equalto the size of blob X. If lengths blob is provided, the outputblob has the same size as lengths blob, and the cross entropyis computed within each session.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output blob after the cross entropy computation", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "PairWiseLossGradient", + "support_level": "default" + }, + { + "name": "Partition", + "description": "\nSplits the input int tensor into multiple ones according to the first tensor.\n\nTakes the first input and partitions it to shards according to the remainder of\nvalues modulo the number of partitions. It requires that the first tensor is of\nintegral type. The number of partitions is derived as (num_output / num_input).\n\nIf additional inputs are present they must have the same shape as the first\ninput, optionally with extra trailing dimensions. They will be partitioned\naccordingly to the first input.\n\nOptional arg 'pack_first_input' transforms the first tensor values as\nX_ij / num_partitions.\n\nOutputs are ordered as\nX_0_part_0, X_1_part_0, ..., X_N-1_part_0, X_0_part_1, ..., X_N-1_part_K-1\n", + "attributes": [ + { + "description": "(int, default 0) If set, the operator transforms the first tensor values as floor(X_ij / num_partitions)", + "name": "pack_first_input", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor containing data to be partitioned. The number of input tensors might be greater than 1 but must have the same shape as the previous tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output Partitions. The number of output tensors has to be a multiple of the number of input tensors.", + "name": "partitions" + } + ], + "support_level": "default" + }, + { + "name": "Percentile", + "description": "\n This operator is used to find percentile representations for raw values, given a sample\n set of raw values, labeled with their corresponding percentiles from the same distribution.\n In particular, this operator takes as input a tensor of floats to find the percentile values\n for, a 2D tensor of floats, where the first column of the tensor represents sampled values,\n and the second column represents the percentile labels, and a tensor of integers lengths.\n\n This lengths tensor is used because the operator works on multiple sets of raw values at the same time. For\n example, for an input:\n original_values=[[3, 5, 3],[5, 1, 6]], lengths = [2, 1, 1], value_to_pct = [[3, 0.2], [5, 0.5], [1, 0.3], [3. 0.6]]\n\n Our operator expects that each column i of the input tensor is sampled from distribution i. Lengths tells\n us that the first two elements in value_to_pct are sampled from distribution 1, the next is from distribution two,\n and the last is from distribution 3. We expect the output of our operator to give us [[0.2, 1.0, 0.6], [0.5, 0.3, 1.0]].\n\n To calculate the percentile of an element, we check to see if its value is already mapped to\n a percentile in value_to_pct. If so, we return that value. If not, we linearly interpolate between\n the two closest values in value_to_pct. If the value is larger than all values in value_to_pct, we\n return 1. If it's smaller than all the values, we return 0.\n\n", + "inputs": [ + { + "description": "Input 2D tensor of floats, representing the original, raw data to calculate percentiles for.", + "name": "original_values" + }, + { + "description": "Sorted 2D tensor, with 2 columns. Each element in the first column is a float representing the raw value of a sample. Its corresponding element in the next column represents the percentile it maps to.", + "name": "value_to_pct" + }, + { + "description": "1D tensor, representing the length of each distribution. We expect that the sum of elements of this tensor is equal to the total length of value_to_pct.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor of floats, with the same dimensions as the flattened input tensor. Each element of this tensor, percentile_values[i], corresponds to the percentile calculated for original_values[i].", + "name": "percentile_values" + } + ], + "support_level": "default" + }, + { + "name": "Perplexity", + "description": "\nPerplexity calculates how well a probability distribution predicts a sample.\nPerplexity takes a 1-D tensor containing a batch of probabilities. Each value\nin the tensor belongs to a different sample and represents the probability of\nthe model predicting the true label for that sample. The operator returns a\nsingle (float) perplexity value for the batch.\n", + "inputs": [ + { + "description": "The input data as Tensor. It contains a batch oftrue label or target probabilities", + "name": "probabilities" + } + ], + "outputs": [ + { + "description": "The output- a single (float) perplexity value for the batch", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "PiecewiseLinearTransform", + "description": "\nPiecewiseLinearTransform takes inputs -- predictions, a 2-D or 1-D tensor\n(Tensor) of size (batch_size x prediction_dimensions). The piecewise\nlinear functions are stored in bounds, slopes and intercepts. The output tensor\nhas the same shape of input `predictions` and contains the predictions\ntransformed by the piecewise linear functions. Each column of predictions has\nits own piecewise linear transformation functions. Therefore the size of\npiecewise function parameters are pieces x prediction_dimensions, except for\nbinary predictions where only the positive prediction needs them. Note that in\neach piece, low bound is excluded while high bound is included. Also the\npiecewise linear function must be continuous.\n\nNotes\n- If the input is binary predictions (Nx2 or Nx1 tensor), set the binary arg\nto true so that one group of piecewise linear functions is needed (see\ndetails below).\n- The transform parameters (bounds, slopes, intercepts) can be passed either\nthrough args or through input blobs.\n- If we have multiple groups of piecewise linear functions, each group has the\nsame number of pieces.\n- If a prediction is out of the bounds, it is capped to the smallest or largest\nbound.\n", + "attributes": [ + { + "description": "1-D vector of size (prediction_dimensions x (pieces+1)) contain the upper bounds of each piece of linear function. One special case is the first bound is the lower bound of whole piecewise function and we treat it the same as the left most functions. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "bounds", + "option": "optional" + }, + { + "description": "1-D vector of size (prediction_dimensions x pieces) containing the slopes of linear function", + "name": "slopes", + "option": "optional" + }, + { + "description": "1-D vector of size (prediction_dimensions x pieces) containing the intercepts of linear function", + "name": "intercepts", + "option": "optional" + }, + { + "description": "If set true, we assume the input is a Nx1 or Nx2 tensor. If it is Nx1 tensor, it is positive predictions. If the input is Nx2 tensor, its first column is negative predictions and second column is positive and negative + positive = 1. We just need one group of piecewise linear functions for the positive predictions.", + "name": "binary", + "option": "optional" + } + ], + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing scores", + "name": "predictions" + }, + { + "description": "See bounds in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "bounds (optional)" + }, + { + "description": "See slopes in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "slopes (optional)" + }, + { + "description": "See intercepts in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "intercepts (optional)" + } + ], + "outputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing transformed predictions", + "name": "transforms" + } + ], + "support_level": "default" + }, + { + "name": "Pow", + "description": "\nThe *Pow* op takes an input data tensor $X$ and an exponent parameter *exponent*, which can be a scalar or another tensor. As output, it produces a single output data tensor $Y$, where the function $f(x) = x^{exponent}$ has been applied to $X$ elementwise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pow_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pow_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Pow\",\n [\"X\", \"exponent\"],\n [\"Y\"],\n broadcast=1\n)\n\nworkspace.FeedBlob(\"X\", np.array([1,2,3,4,5,6]).astype(np.float32))\nprint(\"X: \", workspace.FetchBlob(\"X\"))\n\nworkspace.FeedBlob(\"exponent\", np.array([2]).astype(np.float32))\nprint(\"exponent: \", workspace.FetchBlob(\"exponent\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y: \", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [1. 2. 3. 4. 5. 6.]\nexponent: [2.]\nY: [ 1. 4. 9. 16. 25. 36.]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "description": "The exponent of the power function. Do not use if setting exponent via input.", + "name": "exponent", + "option": "optional" + }, + { + "default": -1, + "description": "", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "", + "name": "broadcast", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "X" + }, + { + "description": "Exponent blob containing the exponent(s) for calculation. Do not use if setting exponent via argument.", + "name": "exponent" + } + ], + "outputs": [ + { + "description": "Output data blob with the same shape as the input.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "PRelu", + "category": "Activation", + "description": "\n\nThe *PRelu* op takes input data tensor $X$, an input slope tensor $slope$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise *PRelu* operation, defined as\n\n$$y=prelu(x) =\\begin{cases}slope * x & x < 0\\\\x & otherwise\\end{cases}$$\n\nNote, is slope is size 1, the value is shared across the channels, otherwise $X$ and $slope$ must be the same shape. See [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852) for more information.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/prelu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/prelu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"PRelu\",\n [\"X\",\"Slope\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.FeedBlob(\"Slope\", np.array([0.1]).astype(np.float32))\nprint(\"Slope:\\n\", workspace.FetchBlob(\"Slope\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 0.3957382 -0.19725518 -0.26991343]\n [ 1.5513182 -0.27427664 -0.14584002]\n [-0.4121164 0.9292345 0.96426094]]\n\nSlope:\n [0.1]\n\nY:\n [[ 0.3957382 -0.01972552 -0.02699134]\n [ 1.5513182 -0.02742766 -0.014584 ]\n [-0.04121164 0.9292345 0.96426094]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + }, + { + "description": "1D input slope tensor. If `Slope` is of size 1, the value is shared across different channels", + "name": "Slope" + } + ], + "outputs": [ + { + "description": "Output tensor, with same shape as $X$.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "PReluGradient", + "description": "\n\nPReluGradient takes both Y and dY and uses this to update dX and dW according\nto the chain rule and derivatives of the rectified linear function.\n\n", + "support_level": "default" + }, + { + "name": "PrependDim", + "description": "\nReshape the tensor by prepending a dimension of fixed size and dividing the\nsize of the next dimension by that amount.\n", + "attributes": [ + { + "description": "Size of the dimension to prepend.", + "name": "dim_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor.", + "name": "reshaped" + } + ], + "support_level": "default" + }, + { + "name": "Print", + "description": "Logs shape and contents of input tensor to stderr or to a file.", + "attributes": [ + { + "description": "(bool) if 1, saves contents to the root folder of the current workspace, appending the tensor contents to a file named after the blob name. Otherwise, logs to stderr.", + "name": "to_file", + "option": "optional" + }, + { + "description": "(int, default 0) If set, prints the first `limit` elements of tensor. If 0, prints the first `k_limit_default`(1000) elements of tensor", + "name": "limit", + "option": "optional" + }, + { + "description": "(int, default 1) Print tensor every `every_n` runs", + "name": "every_n", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The tensor to print.", + "name": "tensor" + } + ], + "support_level": "default" + }, + { + "name": "Python", + "support_level": "default" + }, + { + "name": "PythonDLPack", + "support_level": "default" + }, + { + "name": "PythonDLPackGradient", + "support_level": "default" + }, + { + "name": "PythonGradient", + "support_level": "default" + }, + { + "name": "QuantDecode", + "description": "\nDecode inputs using codebook. This is a general LUT operator that returns\ntensors with values from codebook (input 0) based on given indices in\ncodes (input 1 ~ n).\n\n\nExample:\n\n\nInput:\n codebook = [1.5, 2.5, 3.5]\n codes_0 = [0, 1, 1, 2]\n codes_1 = [2, 0, 0]\n\n\nOutput:\n decoded_0 = [1.5, 2.5, 2.5, 3.5]\n decoded_1 = [3.5, 1.5, 1.5]\n", + "inputs": [ + { + "description": "Codebook in 1d tensor (float)", + "name": "codebook" + }, + { + "description": "Encoded codes 0 (uint8/uint16/int32)", + "name": "codes_0" + }, + { + "description": "Encoded codes 1 if existed (uint8/uint16/int32)", + "name": "codes_1" + }, + { + "description": "Encoded codes n if existed (uint8/uint16/int32)", + "name": "codes_n" + } + ], + "outputs": [ + { + "description": "Decoded tensor for codes_0 (float)", + "name": "decoded_0" + }, + { + "description": "Decoded tensor for codes_1 (float)", + "name": "decoded_1" + }, + { + "description": "Decoded tensor for codes_n (float)", + "name": "decoded_n" + } + ], + "support_level": "default" + }, + { + "name": "QuantDecodeGradient", + "support_level": "default" + }, + { + "name": "Quantile", + "description": "\n Calculate the quantile for the value in the given list of tensors.\n", + "attributes": [ + { + "description": "If true (default), apply abs() on the tensor values.", + "name": "abs", + "option": "optional" + }, + { + "description": "multiplicative tolerance of the quantile_value.", + "name": "tol", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "Value at the given quantile", + "name": "quantile_value" + } + ], + "support_level": "default" + }, + { + "name": "Range", + "description": "\nGenerates an output tensor within the half-open interval $[start, stop)$ (the interval including start but excluding stop).\n- The `start` input is optional, and defaults to 0 when not set.\n- The `step` input is optional, and defaults to 1 when not set.\n- The type of the `output` tensor is determined by the types of inputs used.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Range\",\n [\"start\", \"stop\", \"step\"],\n [\"output\"]\n)\n\nworkspace.FeedBlob(\"start\", np.array(4, dtype=np.int32))\nworkspace.FeedBlob(\"stop\", np.array(17, dtype=np.int32))\nworkspace.FeedBlob(\"step\", np.array(2, dtype=np.int32))\nprint(\"start:\", workspace.FetchBlob(\"start\"))\nprint(\"stop:\", workspace.FetchBlob(\"stop\"))\nprint(\"step:\", workspace.FetchBlob(\"step\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output:\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\nstart: 4\nstop: 17\nstep: 2\noutput: [ 4 6 8 10 12 14 16]\n\n```\n\n
\n ", + "inputs": [ + { + "description": "(*Tensor*): [OPTIONAL] scalar or 1-element tensor containing the start of the interval (inclusive) (default=0)", + "name": "start" + }, + { + "description": "(*Tensor*): scalar or 1-element tensor containing the end of the interval (exclusive)", + "name": "stop" + }, + { + "description": "(*Tensor*): [OPTIONAL] scalar or 1-element tensor specifying the spacing between values (default=1)", + "name": "step" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as inputs that contains the sequence", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "RangeFill", + "support_level": "default" + }, + { + "name": "ReadNextBatch", + "description": "\nRead the next batch of examples out of the given cursor and data blobs.\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nReadNextBatch is thread safe.\n", + "attributes": [ + { + "description": "Number of top-level entries to read.", + "name": "batch_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing the next batch for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + }, + { + "name": "ReadRandomBatch", + "description": "\nRead the next batch of examples out of the given cursor,\nidx blob, offset matrix and data blobs.\n\nInput(0) is a blob pointing to a TreeCursor,\nInput(1) is a blob pointing to the shuffled idx\nInput(2) is a blob pointing to the offset matrix and\n[Input(3),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nReadRandomBatch is thread safe.\n", + "attributes": [ + { + "description": "Number of top-level entries to read.", + "name": "batch_size", + "option": "optional" + }, + { + "description": "(bool) Repeat the dataset indefinitely", + "name": "loop_over", + "option": "optional" + } + ], + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "idx with a shuffled order.", + "name": "idx" + }, + { + "description": "offset matrix containing length offset info.", + "name": "offsetsmat" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing the next batch for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + }, + { + "name": "ReceiveTensor", + "description": "\nReceives the tensor from another node.\n", + "attributes": [ + { + "description": "(int) he rank to receive the tensor from.", + "name": "src", + "option": "optional" + }, + { + "description": "(int) a tag to receive the tensor with.", + "name": "tag", + "option": "optional" + }, + { + "description": "(bool) if set, only send the content and assume that the receiver has already known the tensor's shape and information.", + "name": "raw_buffer", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "In-place output. If raw_buffer is specified, Y should have pre-allocated data and type..", + "name": "Y" + }, + { + "description": "An int CPUtensor of size 1 specifying the rank. If given, this overrides the 'from' argument of the op.", + "name": "src" + }, + { + "description": "An int CPUtensor of size 1 specifying the tag to send the tensor with. This overrides the 'tag' argument of the op.", + "name": "tag" + } + ], + "outputs": [ + { + "description": "The received tensor.", + "name": "Y" + }, + { + "description": "The sender that sent the message as a CPUTensor of size 1 and of type int.", + "name": "src" + }, + { + "description": "The tag that the message is sent with as a CPUTensor of size 1 and of type int.", + "name": "tag" + } + ], + "support_level": "default" + }, + { + "name": "Reciprocal", + "description": "\nPerforms element-wise reciprocal ($\\1/x$) of input tensor $X$.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reciprocal_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Reciprocal\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[8. 3. 3.]\n [4. 0. 0.]\n [1. 2. 5.]]\nY:\n[[0.125 0.3333333 0.3333333 ]\n [0.25 inf inf ]\n [1 0.5 0.2 ]]\n\n```\n\n
\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReciprocalGradient", + "support_level": "default" + }, + { + "name": "RecurrentNetwork", + "description": "\nRun the input network in a recurrent fashion. This can be used to\nimplement fairly general recurrent neural networks (RNNs).\n\nThe operator proceeds as follows.\n\n- First, initialized the states from the input recurrent states\n- For each timestep T, apply the links (that map offsets from input/output\ntensors into the inputs/outputs for the `step` network)\n- Finally, alias the recurrent states to the specified output blobs.\n\nThis is a fairly special-case meta-operator, and so the implementation\nis somewhat complex. It trades of generality (and frankly usability)\nagainst performance and control (compared to e.g. TF\ndynamic_rnn, Theano scan, etc).\n\nSee the usage examples for a flavor of how to use it.\n", + "support_level": "default" + }, + { + "name": "RecurrentNetworkBlobFetcher", + "description": "\nRetrieves blobs from scratch workspaces (which contain intermediate recurrent\nnetwork computation for each timestep) and puts them in the global\nworkspace under CPUContext.\n", + "attributes": [ + { + "description": "Prefix string to prepend extracted blobs.", + "name": "prefix", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Name of scratch workspace blob returned by recurrent network.", + "name": "ScratchWorkspaceBlob" + } + ], + "outputs": [ + { + "description": "1D tensor of strings containing extracted blob names.", + "name": "blob_names" + } + ], + "support_level": "default" + }, + { + "name": "RecurrentNetworkGradient", + "support_level": "default" + }, + { + "name": "Reduce", + "description": "\nDoes a reduce operation from every node to the root node. Currently only\nSum is supported.\n", + "attributes": [ + { + "description": "(int, default 0) the root to run reduce into.", + "name": "root", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be reduced.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The reduced result on root, not set for other nodes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceBackMax", + "description": "\nReduces the input tensor along the last dimension of the by applying **max**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [max(1,5), max(4,1,8), max(2)] = [5, 8, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackMax\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[2. 5. 1.]\n [6. 1. 9.]\n [8. 5. 9.]]\n\n [[5. 7. 8.]\n [9. 9. 6.]\n [6. 5. 0.]]]]\nY: [[9. 9.]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceBackMaxGradient", + "support_level": "default" + }, + { + "name": "ReduceBackMean", + "description": "\nReduces the input tensor along the last dimension of the by applying **mean**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the mean operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [mean(1,5), mean(4,1,8), mean(2)] = [3, 4.333, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_mean_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackMean\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[5. 9. 0.]\n [8. 4. 0.]\n [2. 2. 4.]]\n\n [[9. 0. 9.]\n [7. 9. 7.]\n [1. 0. 2.]]]]\nY: [[3.7777777 4.888889 ]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceBackMeanGradient", + "support_level": "default" + }, + { + "name": "ReduceBackSum", + "description": "\nReduces the input tensor along the last dimension of the by applying **sum**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [sum(1,5), sum(4,1,8), sum(2)] = [6, 13, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackSum\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[2. 7. 7.]\n [1. 1. 0.]\n [9. 7. 2.]]\n\n [[6. 6. 4.]\n [1. 2. 6.]\n [6. 6. 3.]]]]\nY: [[36. 40.]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceBackSumGradient", + "support_level": "default" + }, + { + "name": "ReduceFrontMax", + "description": "\nReduces the input tensor along the last dimension of the by applying **max**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [max(1,4), max(5,1,7), max(2), max(9,2)] = [4, 7, 2, 9]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontMax\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[2. 8. 1.]\n [9. 6. 6.]\n [7. 7. 0.]]\n\n [[4. 3. 9.]\n [9. 2. 7.]\n [6. 4. 7.]]]\nY: [9. 8. 9.]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceFrontMaxGradient", + "support_level": "default" + }, + { + "name": "ReduceFrontMean", + "description": "\nReduces the input tensor along the last dimension of the by applying **mean**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the mean operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [mean(1,4), mean(5,1,7), mean(2), mean(9,2)] = [2.5, 4.333, 2, 5.5]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_mean_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontMean\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[5. 0. 9.]\n [4. 1. 1.]\n [9. 0. 8.]]\n\n [[2. 6. 7.]\n [6. 2. 6.]\n [0. 4. 5.]]]\nY: [4.3333335 2.1666667 6.]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceFrontMeanGradient", + "support_level": "default" + }, + { + "name": "ReduceFrontSum", + "description": "\nReduces the input tensor along the last dimension of the by applying **sum**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example, if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [sum(1,4), sum(5,1,7), sum(2), sum(9,2)] = [2.5, 4.333, 2, 5.5]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontSum\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[4. 1. 1.]\n [0. 6. 7.]\n [7. 8. 6.]]\n\n [[5. 7. 7.]\n [0. 1. 6.]\n [2. 9. 0.]]]\nY: [18. 32. 27.]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceFrontSumGradient", + "support_level": "default" + }, + { + "name": "ReduceFrontWeightedSum", + "description": "\nReduces the input tensor along the first dimension of the input tensor by\napplying 'WeightedSum'. This op acts in a similar way to SortedSegmentWeightedSum and\nUnsortedSegmentWeightedSum but as if all input slices belong to a single segment.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "ReduceFrontWeightedSumGradient", + "support_level": "default" + }, + { + "name": "ReduceL1", + "description": "\nComputes the **L1 norm** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceL1\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[ 2. 7. 6. 4. 5.]\n [ 2. 1. 9. 8. 7.]\n [ 4. 9. 1. 0. 0.]\n [ 6. 4. 0. 8. 1.]\n [ 1. 7. 1. 0. 2.]]\n\n [[ 5. 8. 1. 7. 7.]\n [ 4. 5. 6. 5. 4.]\n [ 1. 9. 6. 6. 3.]\n [ 6. 6. 8. 8. 4.]\n [ 2. 3. 5. 8. 1.]]]]\n\nY:\n[[ 7. 15. 7. 11. 12.]\n [ 6. 6. 15. 13. 11.]\n [ 5. 18. 7. 6. 3.]\n [ 12. 10. 8. 16. 5.]\n [ 3. 10. 6. 8. 3.]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceL1Gradient", + "support_level": "default" + }, + { + "name": "ReduceL2", + "description": "\nComputes the **L2 norm** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceL2\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[ 8. 0. 2. 5. 1.]\n [ 1. 3. 0. 4. 0.]\n [ 1. 3. 6. 7. 7.]\n [ 6. 9. 8. 4. 6.]\n [ 6. 1. 5. 7. 3.]]\n\n [[ 2. 4. 6. 2. 8.]\n [ 1. 1. 8. 0. 8.]\n [ 5. 9. 0. 3. 2.]\n [ 1. 7. 3. 7. 3.]\n [ 6. 8. 9. 8. 7.]]]]\n\nY:\n[[ 8.24621105 4. 6.3245554 5.38516474 8.06225777]\n [ 1.41421354 3.1622777 8. 4. 8. ]\n [ 5.09901953 9.48683262 6. 7.6157732 7.28010988]\n [ 6.08276272 11.40175438 8.54400349 8.06225777 6.70820379]\n [ 8.48528099 8.06225777 10.29563046 10.63014603 7.6157732 ]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceL2Gradient", + "support_level": "default" + }, + { + "name": "ReduceMax", + "description": "\n Computes the max of the input tensor's element along the provided axes.\n The resulted tensor has the same rank as the input if keepdims equal True.\n If keepdims equal false, then the resulted tensor have the reduced dimension\n pruned.\n", + "attributes": [ + { + "description": "A list of integers, along which to reduce.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced" + } + ], + "support_level": "default" + }, + { + "name": "ReduceMaxGradient", + "support_level": "default" + }, + { + "name": "ReduceMean", + "description": "\nComputes the **mean** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceMean\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[9. 0. 3. 6. 0.]\n [3. 4. 5. 0. 9.]\n [6. 9. 1. 1. 5.]\n [6. 2. 3. 7. 7.]\n [3. 1. 1. 0. 1.]]\n\n [[4. 3. 9. 8. 1.]\n [8. 2. 0. 4. 0.]\n [8. 9. 9. 0. 2.]\n [7. 2. 5. 8. 9.]\n [5. 9. 1. 9. 0.]]]]\nY:\n[[6.5 1.5 6. 7. 0.5]\n [5.5 3. 2.5 2. 4.5]\n [7. 9. 5. 0.5 3.5]\n [6.5 2. 4. 7.5 8. ]\n [4. 5. 1. 4.5 0.5]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceMeanGradient", + "support_level": "default" + }, + { + "name": "ReduceMin", + "description": "\n Computes the min of the input tensor's element along the provided axes.\n The resulted tensor has the same rank as the input if keepdims equal True.\n If keepdims equal false, then the resulted tensor have the reduced dimension\n pruned.\n", + "attributes": [ + { + "description": "A list of integers, along which to reduce.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced" + } + ], + "support_level": "default" + }, + { + "name": "ReduceMinGradient", + "support_level": "default" + }, + { + "name": "ReduceScatter", + "description": "\nDoes reduce-scatter operation among the nodes. Currently only Sum is supported.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be reduce-scattered.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The reduced tensor, scattered on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceSum", + "description": "\nComputes the **sum** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceSum\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[5. 3. 7. 9. 5.]\n [4. 5. 1. 8. 3.]\n [1. 0. 9. 7. 6.]\n [7. 5. 0. 3. 1.]\n [6. 4. 4. 8. 3.]]\n\n [[8. 9. 6. 7. 7.]\n [5. 5. 4. 7. 0.]\n [9. 7. 6. 6. 7.]\n [7. 5. 2. 4. 2.]\n [4. 5. 1. 9. 4.]]]]\nY:\n[[13. 12. 13. 16. 12.]\n [ 9. 10. 5. 15. 3.]\n [10. 7. 15. 13. 13.]\n [14. 10. 2. 7. 3.]\n [10. 9. 5. 17. 7.]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReduceSumGradient", + "support_level": "default" + }, + { + "name": "ReduceTailSum", + "description": "\nReduce the tailing dimensions\n", + "inputs": [ + { + "description": "The matrix", + "name": "mat" + } + ], + "outputs": [ + { + "description": "Output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Relu", + "category": "Activation", + "description": "\nApplies rectified linear unit operation to the input data element-wise. The Relu operation takes one input $X$, produces one output $Y$, and is defined as:\n\n$$Y = max(0,X)$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/relu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/relu_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Relu\",\n [\"X\"],\n [\"Y\"]\n )\n\nworkspace.FeedBlob(\"X\", np.random.randn(4, 4).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-1.4655551 0.64575136 0.7921748 0.4150579 ]\n [ 0.41085166 -0.2837964 0.9881425 -1.9300346 ]\n [ 0.39705405 0.44639114 0.9940703 0.2926532 ]\n [-0.6726489 0.01330667 1.101319 0.33858967]]\n\nY:\n [[0. 0.64575136 0.7921748 0.4150579 ]\n [0.41085166 0. 0.9881425 0. ]\n [0.39705405 0.44639114 0.9940703 0.2926532 ]\n [0. 0.01330667 1.101319 0.33858967]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "name": "cudnn_exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor with same shape as input", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReluGradient", + "description": "\nReluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + }, + { + "name": "ReluN", + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = min(max(0, x), n),\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "description": "the cap of output", + "name": "n", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ReluNGradient", + "description": "\nReluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "attributes": [ + { + "description": "the cap of forward op output", + "name": "n", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "RemoveDataBlocks", + "description": "\nShrink the data tensor by removing data blocks with given zero-based indices in\nthe outermost dimension of the tensor. Indices are not assumed in any order or\nunique but with the range [0, blocks_size). Indices could be empty.\n ", + "inputs": [ + { + "description": "a N-D data tensor, N >= 1", + "name": "data" + }, + { + "description": "zero-based indices of blocks to be removed", + "name": "indices" + } + ], + "outputs": [ + { + "description": "data after removing data blocks indexed by 'indices'", + "name": "shrunk data" + } + ], + "support_level": "default" + }, + { + "name": "RemovePadding", + "description": "\nRemove padding around the edges of each segment of the input data. This is the\nreverse operation of **AddPadding**, and uses the same arguments and conventions\nfor input and output data format.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sequence_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\naddpad_op = core.CreateOperator(\n \"AddPadding\",\n [\"X\", \"lengths_add\"],\n [\"Y\", \"lengths_out_add\"],\n padding_width=1\n)\n\nrmpad_op = core.CreateOperator(\n \"RemovePadding\",\n [\"Y\", \"lengths_rm\"],\n [\"Z\", \"lengths_out_rm\"],\n padding_width=1\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(20, size=(3,5))))\nworkspace.FeedBlob(\"lengths_add\", np.array([3]).astype(np.int32))\nworkspace.FeedBlob(\"lengths_rm\", np.array([5]).astype(np.int32))\n\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(addpad_op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"lengths_out_add:\", workspace.FetchBlob(\"lengths_out_add\"))\n\nworkspace.RunOperatorOnce(rmpad_op)\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nprint(\"lengths_out_rm:\", workspace.FetchBlob(\"lengths_out_rm\"))\n```\n\n**Result**\n\n```\nX: [[17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]]\nY: [[ 0 0 0 0 0]\n [17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]\n [ 0 0 0 0 0]]\nlengths_out_add: [5]\nZ: [[17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]]\nlengths_out_rm: [3]\n```\n\n
\n\n", + "attributes": [ + { + "description": "Outer-size of padding to remove around each range.", + "name": "padding_width", + "option": "optional", + "type": "int64" + }, + { + "description": "[OPTIONAL] Specifies a different end-padding width. If this is not set, will use same as `padding_width`.", + "name": "end_padding_width", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "Input tensor ($T$).", + "name": "data_in" + }, + { + "description": "*(type: Tensor``)* Number of elements in each range. sum(lengths) = N. If not provided, considers all data as a single segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Padded data tensor ($T$).", + "name": "data_out" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Lengths for each padded range.", + "name": "lengths_out" + } + ], + "support_level": "default" + }, + { + "name": "ReplaceNaN", + "description": "\nReplace the NaN (not a number) element in the input tensor with argument `value`\n", + "attributes": [ + { + "description": "the value to replace NaN, the default is 0", + "name": "value (optional)", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input" + }, + { + "description": "Output tensor", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "ReservoirSampling", + "description": "\nCollect `DATA` tensor into `RESERVOIR` of size `num_to_collect`. `DATA` is\nassumed to be a batch.\n\nIn case where 'objects' may be repeated in data and you only want at most one\ninstance of each 'object' in the reservoir, `OBJECT_ID` can be given for\ndeduplication. If `OBJECT_ID` is given, then you also need to supply additional\nbook-keeping tensors. See input blob documentation for details.\n\nThis operator is thread-safe.\n", + "attributes": [ + { + "description": "The number of random samples to append for each positive samples", + "name": "num_to_collect", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The reservoir; should be initialized to empty tensor", + "name": "RESERVOIR" + }, + { + "description": "Number of examples seen so far; should be initialized to 0", + "name": "NUM_VISITED" + }, + { + "description": "Tensor to collect from. The first dimension is assumed to be batch size. If the object to be collected is represented by multiple tensors, use `PackRecords` to pack them into single tensor.", + "name": "DATA" + }, + { + "description": "Mutex to prevent data race", + "name": "MUTEX" + }, + { + "description": "(Optional, int64) If provided, used for deduplicating object in the reservoir", + "name": "OBJECT_ID" + }, + { + "description": "(Optional) Auxiliary bookkeeping map. This should be created from `CreateMap` with keys of type int64 and values of type int32", + "name": "OBJECT_TO_POS_MAP_IN" + }, + { + "description": "(Optional) Tensor of type int64 used for bookkeeping in deduplication", + "name": "POS_TO_OBJECT_IN" + } + ], + "outputs": [ + { + "description": "Same as the input", + "name": "RESERVOIR" + }, + { + "description": "Same as the input", + "name": "NUM_VISITED" + }, + { + "description": "(Optional) Same as the input", + "name": "OBJECT_TO_POS_MAP" + }, + { + "description": "(Optional) Same as the input", + "name": "POS_TO_OBJECT" + } + ], + "support_level": "default" + }, + { + "name": "ResetCounter", + "description": "\nResets a count-down counter with initial value specified by the `init_count`\nargument.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Resets counter to this value, must be >= 0.", + "name": "init_count", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* [OPTIONAL] count value BEFORE this operation.", + "name": "previous_value" + } + ], + "support_level": "default" + }, + { + "name": "ResetCursor", + "description": "\nResets the offsets for the given TreeCursor. This operation is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + } + ], + "support_level": "default" + }, + { + "name": "Reshape", + "category": "Shape", + "description": "\nReshape the input tensor similar to numpy's\n[reshape](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html).\n\nTakes a tensor as input and an optional tensor specifying the new shape. When\nthe second input is absent, an extra argument shape must be specified. Outputs\nthe reshaped tensor as well as the original shape.\n\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is going to be copied\nfrom the input tensor.\n\nFor empty tensor, we will set the -1 dimension to be 0 (if one dimension is -1).\nWhen the tensor is empty, dimension of 0 will remain to be 0.\nE.g: data=np.empty(shape=[4, 0]), shape=[0, -1], the output tensor will be\nnp.emtpy(shape=[0, 0])\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reshape_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Reshape\",\n [\"data\"],\n [\"reshaped\", \"old_shape\"],\n shape=(3,2)\n)\n\nworkspace.FeedBlob(\"data\", (np.random.randint(100, size=(6))))\nprint(\"data:\", workspace.FetchBlob(\"data\"))\nworkspace.RunOperatorOnce(op)\nprint(\"reshaped:\", workspace.FetchBlob(\"reshaped\"))\nprint(\"old_shape:\", workspace.FetchBlob(\"old_shape\"))\n```\n\n**Result**\n\n```\ndata: [86 60 85 96 7 37]\nreshaped: [[86 60]\n [85 96]\n [ 7 37]]\nold_shape: [6]\n```\n\n
\n\n", + "attributes": [ + { + "description": "New shape. Do not set if using `new_shape` input.", + "name": "shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "data" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Tensor containing new shape.", + "name": "new_shape" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Reshaped output tensor.", + "name": "reshaped" + }, + { + "description": "*(type: Tensor``)* Tensor containing old shape of `data`.", + "name": "old_shape" + } + ], + "support_level": "default" + }, + { + "name": "ResizeLike", + "description": "\nProduces tensor containing data of first input and shape of second input.\n", + "inputs": [ + { + "description": "Tensor whose data will be copied into the output.", + "name": "data" + }, + { + "description": "Tensor whose shape will be applied to output.", + "name": "shape_tensor" + } + ], + "outputs": [ + { + "description": "Tensor with data of input 0 and shape of input 1.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "ResizeNearest", + "description": "\nResizes the spatial dimensions of the input using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X" + }, + { + "description": "1D, 2-element, Scales tensor, [height_scale, width_scale]", + "name": "scales" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ResizeNearest3D", + "description": "\nResizes the spatial dimensions of the input tensor using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\nAssumptions:\n - Only resize height and width\n - Both width_scale and height_scale scale are 2\n", + "attributes": [ + { + "description": "Scale along temporal dimension", + "name": "temporal_scale", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ResizeNearest3DGradient", + "attributes": [ + { + "description": "Scale along temporal dimension", + "name": "temporal_scale", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "ResizeNearestGradient", + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "RetrieveCount", + "description": "\nRetrieve the current value from the counter as an integer.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* Current count value.", + "name": "count" + } + ], + "support_level": "default" + }, + { + "name": "ReversePackedSegs", + "description": "\nReverse segments in a 3-D tensor (lengths, segments, embeddings,), leaving\npaddings unchanged. This operator is used to reverse input of a recurrent neural\nnetwork to make it a BRNN.\n ", + "inputs": [ + { + "description": "a 3-D (lengths, segments, embeddings,) tensor.", + "name": "data" + }, + { + "description": "length of each segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "a (lengths, segments, embeddings,) tensor with each segment reversedand paddings unchanged.", + "name": "reversed data" + } + ], + "support_level": "default" + }, + { + "name": "RMACRegions", + "description": "\nComputes a fixed-grid of RMAC region coordinates at various levels\nas described in https://arxiv.org/abs/1511.05879.\n", + "attributes": [ + { + "description": "Number of scales to sample regions at.", + "name": "scales", + "option": "optional" + }, + { + "description": "Overlap between consecutive regions.", + "name": "overlap", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input 4D tensor of shape NCHW.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output RMAC regions for all items in the batch. Tensor of shape (N x 5) following the ROIPoolOp format - each row is of the format (batch_index x1 y1 x2 y2) where x1, y1, x2, y2 are the region co-ordinates. Each region is repeated N times corresponding to each item in the batch.", + "name": "RMAC_REGIONS" + } + ], + "support_level": "default" + }, + { + "name": "RMSNorm", + "attributes": [ + { + "description": "(int) default to 1; Describes axis of the inputs. Defaults to one because the 0th axis most likely describes the batch size", + "name": "axis", + "option": "optional" + }, + { + "description": "(float) default to 0.001. Small value to be added to the stdev when dividing out by that value. This prevents division by zero.", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor which layer normalization will be applied to", + "name": "input" + }, + { + "description": "scale tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "gamma" + }, + { + "description": "bias tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "beta" + } + ], + "outputs": [ + { + "description": "Normalized values", + "name": "output" + }, + { + "description": "Reciprocal of root mean square for each feature vector", + "name": "rrms" + } + ], + "support_level": "default" + }, + { + "name": "RMSNormGradient", + "support_level": "default" + }, + { + "name": "RmsProp", + "description": "\nComputes the RMSProp update\n(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).\nConcretely, given inputs (grad, mean_squares, mom, lr), computes:\n\n mean_squares_o = mean_squares + (1 - decay) * (square(grad) - mean_squares)\n mom_o = momentum * mom + lr * grad / sqrt(epsilon + mean_squares_o)\n grad_o = mom_o\n\nReturns (grad_o, mean_squares_o, mom_o).\n", + "support_level": "default" + }, + { + "name": "rnn_internal_accumulate_gradient_input", + "description": "\nInternal RNN operator.\n", + "support_level": "default" + }, + { + "name": "rnn_internal_apply_link", + "description": "\nInternal RNN operator.\n", + "support_level": "default" + }, + { + "name": "RoIAlign", + "description": "\nRegion of Interest (RoI) align operation as used in Mask R-CNN.\n", + "attributes": [ + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "inputs": [ + { + "description": "4D feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 4 or 5) specifying R RoIs representing: batch index in [0, N - 1], x1, y1, x2, y2. The RoI coordinates are in the coordinate system of the input image. For inputs corresponding to a single image, batch index can be excluded to have just 4 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "RoIAlignGradient", + "inputs": [ + { + "description": "See RoIPoolF.", + "name": "X" + }, + { + "description": "See RoIPoolF.", + "name": "RoIs" + }, + { + "description": "Gradient of forward output 0 (Y)", + "name": "dY" + } + ], + "outputs": [ + { + "description": "Gradient of forward input 0 (X)", + "name": "dX" + } + ], + "support_level": "default" + }, + { + "name": "RoIAlignRotated", + "description": "\nSimilar to RoIAlign but can handle rotated region proposals.\nBased on https://arxiv.org/abs/1703.01086.\n", + "attributes": [ + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "inputs": [ + { + "description": "4D feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 5 or 6) specifying R RoIs representing: batch index in [0, N - 1], center_x, center_y, width, height, angle. The RoI coordinates are in the coordinate system of the input image. `angle` should be specified in degrees and represents the RoI rotated counter-clockwise. For inputs corresponding to a single image, batch index can be excluded to have just 5 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "RoIPool", + "description": "\nCarries out ROI Pooling for Faster-RCNN.\nDepending on the mode, there are multiple output cases:\n\n Output case #1: Y, argmaxes (train mode)\n Output case #2: Y (test mode)\n", + "attributes": [ + { + "description": "If set, run in test mode and skip computation of argmaxes (used for gradient computation). Only one output tensor is produced. (Default: false).", + "name": "is_test", + "option": "optional" + }, + { + "description": "A StorageOrder string (Default: \"NCHW\").", + "name": "order", + "option": "optional" + }, + { + "description": "The pooled output height (Default: 1).", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "The pooled output width (Default: 1).", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling (Default: 1.0).", + "name": "spatial_scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input 4-D tensor of data. Only NCHW order is currently supported.", + "name": "X" + }, + { + "description": "RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...].", + "name": "rois" + } + ], + "outputs": [ + { + "description": "RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_h, pooled_w).", + "name": "Y" + }, + { + "description": "Argmaxes corresponding to indices in X used for gradient computation. Only output if arg \"is_test\" is false.", + "name": "argmaxes" + } + ], + "support_level": "default" + }, + { + "name": "RoIPoolGradient", + "support_level": "default" + }, + { + "name": "RowMul", + "description": "\nGiven a matrix A and column vector w, the output is the multiplication of row i\nof A and element i of w, e.g. C[i][j] = A[i][j] * w[i]. This operator should be\ndeprecated when the gradient operator of Mul with broadcast is implemented.\n", + "inputs": [ + { + "description": "The matrix", + "name": "mat" + }, + { + "description": "The column vector", + "name": "w" + } + ], + "outputs": [ + { + "description": "Output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Rowwise8BitQuantizedToFloat", + "description": "\nGiven uint8 tensor, quantized using 8bit row-wise\nquantization, and auxiliary scales and biases, this operator\nrestores float tensor in the following way. We take input 8bits tensor\nof size (m_1, m_2, ..., m_n), n >= 2, reshape it into matrix of size\n(m_1, m_2 x... x m_n). We compute element r_{ij} of output matrix as\nr_{ij} * s_i + b_i and after this we reshape this output matrix into\noutput tensor of size (m_1, m_2, ..., m_n).\n", + "inputs": [ + { + "description": "quantized_input", + "name": "quantized_input" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "name": null + }, + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseCounter", + "description": "\n Count the number recent update on rows. Exponential decay is\n applied on the counter with decay rate r, such that\n r^{counter_halflife} = 0.5; If counter_halflife is nonpositive,\n this operator is turned off.\n", + "attributes": [ + { + "description": "Default -1: off", + "name": "counter_halflife", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Iter at last update", + "name": "prev_iter" + }, + { + "description": "update counter", + "name": "update_counter" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "current iteration", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated iter at last update", + "name": "output_prev_iter" + }, + { + "description": "Output update counter", + "name": "output_update_counter" + } + ], + "support_level": "default" + }, + { + "name": "RowwiseMax", + "description": "\nCompute row-wise max reduction of the input tensor. This op takes one input, $X$, of shape $BxMxN$, where $B$ is the batch size, $M$ is number of rows, and $N$ is number of columns. The output of this op, $Y$, is a matrix of shape $BxM$, with one row for each element of the batch, and the same number of columns as the number of rows of the input tensor.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"RowwiseMax\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X, simulating a batch of 2, 4x4 matricies\nX = np.random.randint(0,high=20,size=(2,4,4))\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[ 5 12 10 1]\n [ 4 16 2 15]\n [ 5 11 12 15]\n [15 4 17 19]]\n\n [[16 5 5 13]\n [17 2 1 17]\n [18 3 19 5]\n [14 16 10 16]]]\nY:\n [[12. 16. 15. 19.]\n [16. 17. 19. 16.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "A tensor of dimensions $B x M x N$ to compute rowwise-max. Here, $B$ is batch size, and $M$ and $N$ are the number of rows and columns of each element of the batch, respectively.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output tensor of shape $B x M$, where each row represents the row-wise maximums for that element of the input batch.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "RowwiseMaxGradient", + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagrad", + "description": "\n\nGiven inputs (param, moment, indices, grad, lr), runs a modified sparse Adagrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_momwnr), where moment is a 1D tensor with length equal to the number of\nrows in param: shape(moment) == shape(param)[0]. Each element of moment is\napplied to an entire row of param, and the new moment is calculated by adding\nthe average squared sum of gradients across each row. Note that indices must\nalso be a 1D tensor indexing into the rows of param.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "name": "weight_decay", + "description": "Default 0", + "option": "optional" + }, + { + "name": "counter_halflife", + "description": "Optional arg when weight_decay is adjusted by frequency (default -1)", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment_1" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientMeanGradient (gradient of SparseLengthsMean) +\nRowWiseSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsMeanGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientMeanGradient (gradient of SparseLengthsMean) +\nRowWiseSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsMeanGradient operator.\n\n", + "attributes": [ + { + "description": "rounding option: 0 for nearest rounding, 1 for stochastic rounding", + "name": "round_option", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsSumGradient", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nRowWiseSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsSumGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "rounding option: 0 for nearest rounding, 1 for stochastic rounding", + "name": "round_option", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nRowWiseSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsSumGradient operator.\n\n", + "attributes": [ + { + "description": "rounding option: 0 for nearest rounding, 1 for stochastic rounding", + "name": "round_option", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient", + "description": "\n\nFused operator of SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + RowWiseSparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere're auxiliary inputs (aux_param) for which gradient is computed and\nreturns (aux_grad).\nYet additional input (lengths) is for fused SparseLengthsWeightedSumGradient\noperator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox", + "description": "\n\nApproximately fused operator of\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + RowWiseSparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere's race condition w.r.t. ordering between reading params and writing to\nparam, hence the name Approx.\nThere're auxiliary inputs (aux_param) for which gradient is computed\nand returns (aux_grad).\nYet additional input (lengths) is for fused SparseLengthsWeightedSumGradient\noperator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + }, + { + "name": "counter", + "description": "Optional input when weight_decay is adjusted by frequency ignored when counter_halflife == -1" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + }, + { + "name": "RowWiseSparseAdam", + "description": "\n\n Computes a modified Adam Update for the sparse case.\n Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the\n Adam update on (param, moment1[indices], moment2[indices], lr, iter) and returns\n (new_param, new_moment1, new_moment2), where moment2 is a 1D tensor\n with length equal to the number of rows in param:\n shape(moment2) == shape(param)[0]. Each element of moment2 is\n applied to an entire row of param, and the new moment2 values are\n calculated by averaging across the row.\n\n ", + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + }, + { + "name": "Rsqrt", + "description": "Computes the element-wise rsqrt of the input.", + "inputs": [ + { + "description": "ND input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "ND output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "RsqrtGradient", + "support_level": "default" + }, + { + "name": "SafeDequeueBlobs", + "description": "\nDequeue the blobs from queue. When the queue is closed and empty, the output\nstatus will be set to true which can be used as exit criteria for execution\nstep.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "attributes": [ + { + "description": "(default 1) If > 1, multiple records will be dequeued and tensors for each column will be concatenated. This requires all tensors in the records to be at least 1D, and to have the same inner dimensions.", + "name": "num_records", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The blob to store the dequeued data", + "name": "blob" + }, + { + "description": "Is set to 0/1 depending on the success of dequeue", + "name": "status" + } + ], + "support_level": "default" + }, + { + "name": "SafeEnqueueBlobs", + "description": "\nEnqueue the blobs into queue. When the queue is closed and full, the output\nstatus will be set to true which can be used as exit criteria for execution\nstep.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "support_level": "default" + }, + { + "name": "Save", + "description": "\nSaves a set of blobs to a db. It takes $[1, \\infty)$ number of inputs and has\nno output. The contents of the inputs are written into the db using the\nsettings specified by the arguments.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Save\",\n [\"X\", \"Y\", \"Z\"],\n [],\n db=\"test_db2\",\n db_type=\"leveldb\",\n blob_name_overrides=[\"x_scores\", \"y_scores\", \"z_scores\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(20, size=(5,5)))\nworkspace.FeedBlob(\"Y\", np.random.randint(20, size=(5,5)))\nworkspace.FeedBlob(\"Z\", np.random.randint(20, size=(5,5)))\nworkspace.RunOperatorOnce(op)\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "default": "", + "description": "Characters in the provided blob names that match `strip_prefix` will be removed prior to saving. Also, characters that precede `strip_prefix` will be removed. Useful for removing device scope from blob names.", + "name": "strip_prefix", + "option": "optional", + "type": "string" + }, + { + "description": "If set, used as blob names instead of original blob names. Must be same length as number of blobs.", + "name": "blob_name_overrides", + "option": "optional", + "type": "string[]" + }, + { + "description": "The output path of the db. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db", + "option": "optional", + "type": "string" + }, + { + "description": "Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional", + "type": "string" + }, + { + "default": "kDefaultChunkSize", + "description": "The chunk size to split tensor data into. If not set, caffe2_tensor_chunk_size will be used", + "name": "chunk_size", + "option": "optional", + "type": "string" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor(s).", + "name": "X" + } + ], + "support_level": "default" + }, + { + "name": "Scale", + "description": "\nScale takes one input data (Tensor) and produces one output data\n(Tensor) whose value is the input data tensor scaled element-wise.\n", + "attributes": [ + { + "description": "(float, default 1.0) the scale to apply.", + "name": "scale", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "ScaleBlobs", + "description": "\nScaleBlobs takes one or more input data (Tensor) and produces one\nor more output data (Tensor) whose value is the input data tensor\nscaled element-wise.\n", + "attributes": [ + { + "description": "(float, default 1.0) the scale to apply.", + "name": "scale", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "Scatter", + "description": "\nUpdate values of the tensor by overriding current value specified by indices.\n\nWrites all values from the tensor UPDATES into DATA at the indices specified in the INDICES tensor.\nFor each value in DATA, its output index is specified by its index in UPDATES and by the corresponding value in INDICES for the specified axis.\n\nFor a 3-D tensor, DATA is updated as:\n\nDATA[INDICES[i][j][k]][j][k] = UPDATES[i][j][k] # if axis == 0\nDATA[i][INDICES[i][j][k]][k] = UPDATES[i][j][k] # if axis == 1\nDATA[i][j][INDICES[i][j][k]] = UPDATES[i][j][k] # if axis == 2\n\nCurrently only works on CPU because of access to INDICES.\n", + "attributes": [ + { + "default": 1, + "description": "Which dimension to scatter on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "DATA" + }, + { + "description": "1-D list of indices on the first dimensionof X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "UPDATES" + } + ], + "outputs": [ + { + "description": "The updated output.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "ScatterAssign", + "description": "\nUpdate slices of the tensor in-place by overriding current value.\n\nNote: The op pretty much ignores the exact shapes of the input arguments and\ncares only about sizes. It's done for performance consideration to avoid\nunnecessary reshapes. Only first dimension of X_0 is important, let's call it\nN. If M is the total size of X_0 and K is the size of INDICES then X_i is\nassumed to be of shape K x (M / N) regardless of the real shape.\n\nNote: Each update in INDICES is applied independently which means that if\nduplicated elements are present in INDICES arbitrary one will win.\n\nCurrently only works on CPU because of access to INDICES.\n", + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "DATA" + }, + { + "description": "1-D list of indices on the first dimensionof X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "SLICES" + } + ], + "outputs": [ + { + "description": "Has to be exactly the same tensor as the input 0", + "name": "DATA" + } + ], + "support_level": "default" + }, + { + "name": "ScatterWeightedSum", + "description": "\nSimilar to WeightedSum, computes the weighted sum of several tensors, with\nthe difference that inputs are sliced tensors. The first tensor has to be\nin-place and only slices of it on the first dimension as indexed by INDICES\nwill be updated.\n\nNote: The op pretty much ignores the exact shapes of the input arguments and\ncares only about sizes. It's done for performance consideration to avoid\nunnecessary reshapes. Only first dimension of X_0 is important, let's call it\nN. If M is the total size of X_0 and K is the size of INDICES then X_i is\nassumed to be of shape K x (M / N) regardless of the real shape.\n\nNote: Each update in INDICES is applied independently which means that if\nduplicated elements are present in INDICES the corresponding slice of X_0\nwill be scaled multiple times. Manual collapsing of INDICES is required\nbeforehand if necessary.\n\nNote: Updates are applied sequentially by inputs which might have undesired\nconsequences if the input tensor is accessed concurrently by different op\n(e.g. when doing Hogwild). Other threads might see intermediate results even\non individual slice level, e.g. X_0 scaled by weight_0 but without any\nupdates applied.\n\nCurrently only works on CPU because of access to INDICES.\n", + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "X_0" + }, + { + "description": "Scalar weight for X_0, applied only to slices affected.", + "name": "Weight_0" + }, + { + "description": "1-D list of indices on the first dimension of X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "X_1" + }, + { + "description": "Scalar weight for X_1 update", + "name": "Weight_1" + } + ], + "outputs": [ + { + "description": "Has to be exactly the same tensor as the input 0", + "name": "X_0" + } + ], + "support_level": "default" + }, + { + "name": "SegmentIdsToLengths", + "description": "\nTransfers a vector of segment ids to a vector of segment lengths. This operation\nsupports non-consecutive segment ids. Segments not appearing in the input vector\nwill have length 0. If the second input is provided, the number of segments =\nthe size of its first dimension. Otherwise, the number of segments = the last\nindex in the first input vector + 1.\n\nIn general, for consecutive, zero-based segment IDs, this is the inverse\noperation of LengthsToSegmentIds, except that a vector of segment IDs\ncannot represent empty segments at the end (if the second input is absent).\n", + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of segment ids", + "name": "segment_ids" + }, + { + "description": "if provided, number of segments = the size of its first dimension", + "name": "data (optional)" + } + ], + "outputs": [ + { + "description": "1-D int64_t tensor of segment lengths", + "name": "lengths" + } + ], + "support_level": "default" + }, + { + "name": "SegmentIdsToRanges", + "description": "\nTransfers a vector of segment ids to a vector of segment ranges. This operation\nsupports non-consecutive segment ids. Segments not appearing in the input vector\nwill have length 0. If the second input is provided, the number of segments =\nthe size of its first dimension. Otherwise, the number of segments = the last\nindex in the first input vector + 1.\n", + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of segment ids", + "name": "segment_ids" + }, + { + "description": "if provided, number of segments = the size of its first dimension", + "name": "data (optional)" + } + ], + "outputs": [ + { + "description": "1-D int64_t tensor of segment lengths", + "name": "lengths" + } + ], + "support_level": "default" + }, + { + "name": "SegmentOneHot", + "description": "\nGiven a sequence of indices, segmented by the lengths tensor, returns a matrix\nthat has the elements in each sequence set to 1.0, and 0.0 everywhere else.\n", + "inputs": [ + { + "description": "Size of each segment.", + "name": "lengths" + }, + { + "description": "Active indices, of size sum(lengths)", + "name": "indices" + }, + { + "description": "Size of the index", + "name": "index_size_tensor" + } + ], + "outputs": [ + { + "description": "Matrix of size len(lengths) x index_size", + "name": "one_hots" + } + ], + "support_level": "default" + }, + { + "name": "SelfBinningHistogram", + "description": "\n Computes a histogram for values in the given list of tensors.\n For logging activation histograms for post-hoc analyses, consider using the\n HistogramObserver observer.\n For iteratively computing a histogram for all input tensors encountered through\n history, consider using the AccumulateHistogram operator.\n ", + "attributes": [ + { + "description": "Number of bins to use for the histogram. Must be >= 1.", + "name": "num_bins", + "option": "optional" + }, + { + "description": "A string indicating 'linear' or 'logarithmic' spacing for the bins.", + "name": "bin_spacing", + "option": "optional" + }, + { + "description": "A float that's used as the starting point for logarithmic spacing. Since logarithmic spacing cannot contain <=0 values this value will be used to represent all such values.", + "name": "logspace_start", + "option": "optional" + }, + { + "description": "Apply abs() on every input value.", + "name": "abs", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "1D tensor of edges of the bins, of dimension [num_bins+1]. The range appears as: [first, ..., last), wherein the i-th element expresses the start of a bin and i+1-th value represents the exclusive end of that bin.", + "name": "histogram_values" + }, + { + "description": "1D tensor of counts of each bin, of dimension [num_bins+1]. It is guaranteed to end with a 0 since the last edge is exclusive.", + "name": "histogram_counts" + } + ], + "support_level": "default" + }, + { + "name": "Selu", + "description": "\n\nThe *Selu* op takes one input tensor $X$, an argument $alpha$, an argument $scale$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise *Selu* operation, defined as\n\n$$y=selu(x) =\\begin{cases}scale (\\alpha e^{x} - \\alpha) & x < 0\\\\scale * x & otherwise\\end{cases}$$\n\nThe default value of *alpha* is 1.6732632423543772848170429916717 and the default value of *scale* is 1.0507009873554804934193349852946. See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) for more information.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/selu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/selu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Selu\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 1.1613879 -0.27111396 -1.2076733 ]\n [ 1.3442237 -1.0701777 1.2070968 ]\n [ 0.23810555 0.9740916 -1.7872391 ]]\n\nY:\n [[ 1.2202715 -0.4174965 -1.2326177 ]\n [ 1.4123772 -1.1551634 1.2682979 ]\n [ 0.25017774 1.023479 -1.4637551 ]]\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 1.673263, + "description": "Alpha constant in equation.", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0507, + "description": "Scale constant in equation.", + "name": "scale", + "option": "optional", + "type": "float32" + } + ], + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor with same shape as input.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SeluGradient", + "description": "\nSeluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the selu function.\n", + "attributes": [ + { + "description": "(float) default to 1.6732~; affects the activation function itself.This should go with the weight initialization in the paper. See https://arxiv.org/abs/1706.02515", + "name": "alpha", + "option": "optional" + }, + { + "description": "(float) default to 1.0507~; affects the activation function itself.", + "name": "scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "input tensor", + "name": "Y" + }, + { + "description": "input tensor", + "name": "dY" + } + ], + "support_level": "default" + }, + { + "name": "SendTensor", + "description": "\nSends the tensor to another node.\n", + "attributes": [ + { + "description": "The rank to send the tensor to.", + "name": "dst", + "option": "optional" + }, + { + "description": "(int) a tag to send the tensor with.", + "name": "tag", + "option": "optional" + }, + { + "description": "(bool) if set, only send the content and assume that the receiver has already known the tensor's shape and information.", + "name": "raw_buffer", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allgathered.", + "name": "X" + }, + { + "description": "An int CPUtensor of size 1 specifying the rank. If given, this overrides the 'to' argument of the op.", + "name": "dst" + }, + { + "description": "An int CPUtensor of size 1 specifying the tag to send the tensor with. This overrides the 'tag' argument of the op.", + "name": "tag" + } + ], + "support_level": "default" + }, + { + "name": "SequenceMask", + "description": "\nMask op designed for use in attention mechanisms for sequence modeling tasks.\nSupports batching: given batch_dim, collapses dims 0 through batch_dim into a\nsingle dimension, e.g. if tensor dims are [4,2,1,3,4] and batch_dim=2, first\ncollapse tensor to [4*2*1,3,4], then mask each batch [i,:,:].\n\n\nTwo current operating modes:\n\n\n1) Given a 2D input tensor and 1D tensor of sequence lengths, for each row i in\nthe input tensor, set elements in that row to -inf if their column index\nj >= sequence_lengths[i]. This mode takes two inputs and argument mode =\n'sequence'\n\n\n2) Triangular mask. Given row index i and column index j, set elements to -inf\ngiven the following conditions:\n\n mode='upper', x_ij = -inf if j < i\n mode='lower', x_ij = -inf if j > i\n mode='upperdiag', x_ij = -inf if j <= i\n mode='lowerdiag', x_ij = -inf if j >= i\n\nThis mode takes one input.\n\n\n3) Window Mask. Given a 2D input tensor and 1D tensor of window centers,\nfor each row i in the input tensor, set elements in that row to -inf\nif their column index j outside [center - radius, center + radius].\nThis mode takes two inputs and argument mode = 'sequence'.\nArgument 'radius' should be provided.\n", + "attributes": [ + { + "description": "(string) Mode selection. Possible values: 'sequence', 'upper', 'lower', 'upperdiag', 'lowerdiag'", + "name": "mode", + "option": "optional" + }, + { + "description": "(int) Beginning axis of row elements. All dimensions to the left will be treated as row indices and those to the right (inclusive) will be treated as column indices in the 2D mask", + "name": "axis", + "option": "optional" + }, + { + "description": "(bool) operate in gradient mode", + "name": "grad", + "option": "optional" + }, + { + "description": "(int) radius of windows in window mode", + "name": "radius", + "option": "optional" + }, + { + "description": "(int) batch dimension of tensor (optional)", + "name": "batch", + "option": "optional" + }, + { + "description": "(int) used when mask should be repeated for one or more data dimensions (beginning at this axis). (currently only supported for sequence mode without batch argument)", + "name": "repeat_from_axis", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor to apply masking to", + "name": "input" + }, + { + "description": "1D Tensor of sequence lengths for mode #1", + "name": "sequence_lengths" + } + ], + "outputs": [ + { + "description": "Input tensor with masking applied", + "name": "masked_tensor" + } + ], + "support_level": "default" + }, + { + "name": "Shape", + "description": "\nProduce a 1D int64 tensor with the shape of the input tensor.\nIf called with an optional argument `axes`, the result will only\ncontain the dimensions of specified axes.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/shape_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Shape\",\n [\"X\"],\n [\"shape\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(2,3))))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"shape:\", workspace.FetchBlob(\"shape\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[3 2 5]\n [5 7 3]]\nshape: [2 3]\n\n```\n\n
\n\n ", + "attributes": [ + { + "description": "Array of interested axes.If given, this operator only returns the dimensions of the given axes.Otherwise, the operator returns the dimensions of all axes.", + "name": "axes", + "option": "optional", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor containing shape of input tensor.", + "name": "shape" + } + ], + "support_level": "default" + }, + { + "name": "Sigmoid", + "category": "Activation", + "description": "\nApply the Sigmoid function element-wise to the input tensor. This is often used\nas a non-linear activation function in a neural network. The sigmoid function is\ndefined as:\n\n$$Sigmoid(x) = \\frac{1}{1+\\exp(-x)}$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sigmoid_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sigmoid\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"sigmoid:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\ninput: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ]\nsigmoid: [0.8284105 0.57842743 0.85621804 0.80923885 0.10222916]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SigmoidCrossEntropyWithLogits", + "description": "\nGiven two matrices logits and targets, of same shape,\n(batch_size, num_classes), computes the sigmoid cross entropy between the two.\nReturns a tensor of shape (batch_size,) of losses for each example.\n", + "attributes": [ + { + "description": "default is false; if enabled, will use the log d trick to avoid the vanishing\ngradients early on; see Goodfellow et. al (2014)", + "name": "log_D_trick", + "option": "optional" + }, + { + "description": "default is false; if enabled, the model will be allowed to train on an unjoined\ndataset, where some examples might be false negative and might appear\nin the dataset later as (true) positive example.", + "name": "unjoined_lr_loss", + "option": "optional" + } + ], + "inputs": [ + { + "description": "matrix of logits for each example and class.", + "name": "logits" + }, + { + "description": "matrix of targets, same shape as logits.", + "name": "targets" + } + ], + "outputs": [ + { + "description": "Vector with the total xentropy for each example.", + "name": "xentropy" + } + ], + "support_level": "default" + }, + { + "name": "SigmoidCrossEntropyWithLogitsGradient", + "support_level": "default" + }, + { + "name": "SigmoidGradient", + "description": "\nSigmoidGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the sigmoid function.\n", + "support_level": "default" + }, + { + "name": "Sign", + "description": "\nComputes sign for each element of the input: -1, 0 or 1.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n\"Sign\",\n[\"X\"],\n[\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3, 3).astype(np.float32) - np.random.rand(3, 3).astype(np.float32)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[ 0.02816287 0.22408086 -0.30342305]\n[-0.18481976 0.03948995 0.39698976]\n[-0.63304734 -0.6919183 -0.31524038]]\nY:\n[[ 1. 1. -1.]\n[-1. 1. 1.]\n[-1. -1. -1.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Sin", + "description": "\nCalculates the sine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sin_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sin\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.8466114 0.1803606 0.5601509 0.04959291 0.64770824]\nY: [0.74903965 0.17938434 0.5313141 0.04957259 0.60336035]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the sine of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SinGradient", + "support_level": "default" + }, + { + "name": "Sinh", + "description": "\nCalculates the hyperbolic sine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sinh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sinh\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.98907769 0.52907848 0.03216429 0.94983935 0.47881418]\nY: [1.15841695 0.5541099 0.03216984 1.09924557 0.49732079]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic sine values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SinhGradient", + "support_level": "default" + }, + { + "name": "SinusoidPositionEncoding", + "description": "\nCalculates a sinusoid position encoding tensor as described\nin https://arxiv.org/abs/1706.03762. Takes a 2-D tensor\n(of size M x K) of positions as input, the embedding size\nas an argument, and outputs a position encoding tensor of\nsize (M x K x embedding_size). Here M is typically the max\nsequence length and K is typically the batch size.\nThe input tensor must satisfy input[m, 0] == input[m, k] for all k.\n\nEncoded as amplitude * SIN(pos/alpha^(i/embedding_size)) if i is even,\nelse amplitude * COS(pos/alpha^(i/embedding_size)). Here, pos is the position,\nalpha and amplitude are tuning parameters, i is the current dimension for\nthe embedding, and embedding_size is the number of total dimensions in\nthe embedding.\n", + "attributes": [ + { + "description": "Desired embedding size/number of dimensions -- defaults to 100", + "name": "embedding_size", + "option": "optional" + }, + { + "description": "Sinusoid tuning parameter -- defaults to 10000", + "name": "alpha", + "option": "optional" + }, + { + "description": "Amplitude of Sin/Cos output", + "name": "amplitude", + "option": "optional" + } + ], + "inputs": [ + { + "description": "2-D tensor of positions to be encoded", + "name": "positions" + } + ], + "outputs": [ + { + "description": "3-D tensor representing the positional encoding", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Size", + "description": "\nReturn a 1D tensor of type *int64* that contains the number of elements of the input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Size\",\n [\"X\"],\n [\"size\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"size:\", workspace.FetchBlob(\"size\"))\n\nworkspace.ResetWorkspace()\n\nworkspace.FeedBlob(\"X\", (np.random.rand(6,4)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"size:\", workspace.FetchBlob(\"size\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[3 7 0]\n [0 1 6]\n [5 0 8]]\nsize: 9\nX:\n[[0.92017884 0.32115368 0.68692035 0.64135016]\n [0.8723328 0.77830265 0.80688656 0.25524236]\n [0.37970216 0.76407047 0.85689564 0.30692883]\n [0.69352573 0.42531502 0.16415212 0.59209324]\n [0.52684188 0.37094846 0.60670079 0.6489272 ]\n [0.94715906 0.34800557 0.61898769 0.28947359]]\nsize: 24\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor to calculate number of elements.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* 1D tensor of type int64 that contains the number of elements in the input tensor *X*.", + "name": "size" + } + ], + "support_level": "default" + }, + { + "name": "Slice", + "category": "Tensor", + "description": "\nProduces a slice of the input tensor.\n\n- Currently, only slicing in a single dimension is supported.\n\n- Start and end indices are either passed as two 1D input tensors or using the `starts` and `ends` arguments.\n\n- If a negative value is passed for any of the start or end indices, it represents |value| - 1 elements before the end of that dimension. End indices are non-inclusive unless negative (end index -1 means up to and including the last element).\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/slice_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Slice\",\n [\"X\"],\n [\"Y\"],\n starts=(0,1),\n ends=(-1,3)\n)\n\nworkspace.FeedBlob(\"X\", np.array([[1,2,3,4],[5,6,7,8]]))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[1 2 3 4]\n [5 6 7 8]]\nY:\n[[2 3]\n [6 7]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*Tuple(int)*): list of starting indices", + "name": "starts", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): list of ending indices", + "name": "ends", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor*): tensor to extract slices from", + "name": "X" + }, + { + "description": "(*Tensor``*): 1D tensor of start-indices for each dimension of data (dimensions following the sliced one might be omitted)", + "name": "starts" + }, + { + "description": "(*Tensor``*): 1D tensor of end-indices for each dimension of data (dimensions following the sliced one might be omitted)", + "name": "ends" + } + ], + "outputs": [ + { + "description": "(*Tensor*): sliced output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SliceGradient", + "support_level": "default" + }, + { + "name": "Snapshot", + "support_level": "default" + }, + { + "name": "Softmax", + "category": "Activation", + "description": "\n\nApplies the Softmax function to an n-dimensional input Tensor rescaling them so\nthat the elements of the n-dimensional output Tensor lie in the range (0,1) and\nsum to 1. The softmax operator is typically the last layer in a classifier network,\nas its output can be interpreted as confidence probabilities of an input belonging\nto each class. The input is a 2-D tensor (Tensor) of size (batch_size x\ninput_feature_dimensions). The output tensor has the same shape and contains the\nsoftmax normalized values of the corresponding input. The softmax function is\ndefined as follows:\n\n$$softmax(x_i) = \\frac{\\exp(x_i)}{\\sum_{j} \\exp(x_j)}$$\n\nThe input does not need to explicitly be a 2D vector; rather, it will be coerced\ninto one. For an arbitrary n-dimensional tensor `X` in\n$[a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}]$, where k is the `axis` provided,\nthen `X` will be coerced into a 2-dimensional tensor with dimensions\n$[(a_0 * ... * a_{k-1}), (a_k * ... * a_{n-1})]$. For the default case where\n`axis`=1, the `X` tensor will be coerced into a 2D tensor of dimensions\n$[a_0, (a_1 * ... * a_{n-1})]$, where $a_0$ is often the batch size. In this\nsituation, we must have $a_0 = N$ and $a_1 * ... * a_{n-1} = D$. Each of these\ndimensions must be matched correctly, or else the operator will throw errors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softmax\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\ninput: [[ 0.0417839 0.61960053 -0.23150268 -0.64389366 -3.0000346 ]]\nsoftmax: [[0.24422921 0.43525138 0.18582782 0.12303016 0.01166145]]\n\n```\n\n
\n\n\n\n", + "attributes": [ + { + "default": 1, + "description": "Axis of the inputs when coerced to 2D matrix.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + }, + { + "description": "*(type: Tensor``)* Input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The softmax normalized output values with the same shape as input tensor.", + "name": "output" + }, + { + "description": "*(type: Tensor``)* The softmax normalized output tensor with the same shape as input tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SoftmaxGradient", + "support_level": "default" + }, + { + "name": "SoftmaxWithLoss", + "description": "\nCombined Softmax and Cross-Entropy loss operator. The operator first computes the softmax normalized values for each layer in the batch of the given input, then computes cross-entropy loss. This operator is numerically more stable than separate `Softmax` and `CrossEntropy` ops. The inputs are a 2-D tensor `logits` of size (batch_size x input_feature_dimensions), which represents the unscaled log probabilities, and a 1-dimensional integer `labels` tensor for ground truth. An optional third input blob (`weight_tensor`) can be used to weight the samples for the loss, which is useful if the training set is unbalanced. This operator outputs a `softmax` tensor which contains the probability for each label for each example (same shape is `logits` input), and a scalar `loss` value, which is the averaged cross-entropy loss between the softmax probabilities and the ground truth values. Use parameter `label_prob`=1 to enable inputting labels as a probability distribution.\n\nSoftmax cross-entropy loss function:\n\n$$loss(x, class) = -\\log{\\biggl(\\frac{\\exp(x[class])}{\\sum_{j} \\exp(x[j])}\\biggr)} = -x[class] + \\log{\\biggl(\\sum_{j} \\exp(x[j])\\biggr)}$$\n\nor if the `weight_tensor` has been passed:\n\n$$loss(x, class) = weight[class]\\biggl(-x[class] + \\log{\\biggl(\\sum_{j} \\exp(x[j])\\biggr)}\\biggr)$$\n\nThe `logits` input does not need to explicitly be a 2D vector; rather, it will be coerced into one. For an arbitrary n-dimensional tensor `X` in $[a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}]$, where k is the `axis` provided, then `X` will be coerced into a 2-dimensional tensor with dimensions $[(a_0 * ... * a_{k-1}), (a_k * ... * a_{n-1})]$. For the default case where `axis`=1, the `X` tensor will be coerced into a 2D tensor of dimensions $[a_0, (a_1 * ... * a_{n-1})]$, where $a_0$ is often the batch size. In this situation, we must have $a_0 = N$ and $a_1 * ... * a_{n-1} = D$. Each of these dimensions must be matched correctly, or else the operator will throw errors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_with_loss_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SoftmaxWithLoss\",\n [\"logits\", \"labels\"],\n [\"softmax\", \"avgloss\"]\n)\n\nworkspace.FeedBlob(\"logits\", np.random.randn(1, 5).astype(np.float32))\nworkspace.FeedBlob(\"labels\", np.asarray([4]).astype(np.int32))\nprint(\"logits:\", workspace.FetchBlob(\"logits\"))\nprint(\"labels:\", workspace.FetchBlob(\"labels\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"softmax\"))\nprint(\"avgloss:\", workspace.FetchBlob(\"avgloss\"))\n\n```\n\n**Result**\n\n```\n\nlogits: [[-0.3429451 -0.80375195 0.23104447 1.4569176 -0.5268362 ]]\nlabels: [4]\nsoftmax: [[0.09721052 0.0613179 0.17258129 0.58800864 0.0808817 ]]\navgloss: 2.5147676\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SoftmaxWithLoss\",\n [\"logits\", \"labels\"],\n [\"softmax\", \"avgloss\"],\n scale=5.0\n)\n\nworkspace.FeedBlob(\"logits\", np.asarray([[.1, .4, .7, 1.5, .2]]).astype(np.float32))\nworkspace.FeedBlob(\"labels\", np.asarray([4]).astype(np.int32))\nprint(\"logits:\", workspace.FetchBlob(\"logits\"))\nprint(\"labels:\", workspace.FetchBlob(\"labels\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"softmax\"))\nprint(\"avgloss:\", workspace.FetchBlob(\"avgloss\"))\n\n```\n\n**Result**\n\n```\n\nlogits: [[0.1 0.4 0.7 1.5 0.2]]\nlabels: [4]\nsoftmax: [[0.10715417 0.144643 0.19524762 0.4345316 0.11842369]]\navgloss: 10.667433\n\n```\n\n
\n\n", + "attributes": [ + { + "default": 0, + "description": "Setting to 1 enables inputting labels as probability distribution.", + "name": "label_prob", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Axis of the inputs when coerced to 2D.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "description": "Average loss output scaling factor (must be >= 0).", + "name": "scale", + "option": "optional", + "type": "float32" + }, + { + "default": "'NCHW'", + "description": "Order of blob dimensions (only 'NCHW' is supported currently).", + "name": "order", + "option": "optional", + "type": "string" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "logits" + }, + { + "description": "*(type: Tensor``)* Ground truth label tensor.", + "name": "labels" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Blob used to weight the samples for the loss.", + "name": "weight_tensor" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Softmax output tensor.", + "name": "softmax" + }, + { + "description": "*(type: float)* Averaged cross-entropy loss output.", + "name": "loss" + } + ], + "support_level": "default" + }, + { + "name": "SoftmaxWithLossGradient", + "support_level": "default" + }, + { + "name": "Softplus", + "description": "\nSoftplus takes one input data tensor $X$ and produces one output data tensor $Y,$ where the softplus function, $y = ln(e^x + 1)$, is applied to $X$ elementwise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softplus_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softplus_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softplus\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.5380011 0.65190786 0.55673236]\n [-0.16272168 0.5451048 0.30880353]\n [-0.76606876 -0.6238556 -0.40444514]]\n\nY:\n [[0.4598992 1.0713093 1.0097669 ]\n [0.61509246 1.0023911 0.8594219 ]\n [0.38174385 0.42909983 0.5112337 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data blob with same shape as input.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SoftplusGradient", + "support_level": "default" + }, + { + "name": "Softsign", + "description": "\n*Softsign* takes one input data tensor $X$ and produces one output data $Y,$ where the softsign function, $y = \\frac{x}{1+ |x|}$, is applied to $X$ elementwise. This operation can be done in an in-place fashion too, by providing the same input and output blobs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softsign_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softsign\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-1.3060539 0.7242748 -1.9907674 ]\n [-0.64802396 -0.03244735 0.7455406 ]\n [-0.298492 -0.5774271 2.8364444 ]]\n\nY:\n [[-0.5663588 0.420046 -0.6656376 ]\n [-0.39321268 -0.03142761 0.4271116 ]\n [-0.2298759 -0.36605626 0.739342 ]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output data blob with same shape as input", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SoftsignGradient", + "description": "\nCalculates the softsign gradient (sgn(x)/(1+|x|)^2) of the given input tensor\nelement-wise.\n", + "inputs": [ + { + "description": "1-D input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The softsign gradient (sgn(x)/(1+|x|)^2) values of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SortAndShuffle", + "description": "\nCompute the sorted indices given a field index to sort by and break the sorted\nindices into chunks of shuffle_size * batch_size and shuffle each chunk,\nfinally we shuffle between batches. If sort_by_field_idx is -1 we skip sort.\n\nFor example, we have data sorted as\n1,2,3,4,5,6,7,8,9,10,11,12\n\nand batchSize = 2 and shuffleSize = 3, when we shuffle we get:\n[3,1,4,6,5,2] [12,10,11,8,9,7]\n\nAfter this we will shuffle among different batches with size 2\n[3,1],[4,6],[5,2],[12,10],[11,8],[9,7]\n\nWe may end up with something like\n[9,7],[5,2],[12,10],[4,6],[3,1],[11,8]\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nSortAndShuffle is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing sorted indices.", + "name": "indices" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentMean", + "description": "\nApplies 'Mean' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentMean that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentMeanGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentRangeLogMeanExp", + "description": "\nApplies 'LogMeanExp' to each segment of input tensor. In order to allow for more\nefficient implementation of 'LogMeanExp', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nLogMeanExp computes the element-wise log of the mean of exponentials of input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentRangeLogMeanExpGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentRangeLogSumExp", + "description": "\nApplies 'LogSumExp' to each segment of input tensor. In order to allow for more\nefficient implementation of 'LogSumExp', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nLogSumExp computes the element-wise log of the sum of exponentials of input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentRangeLogSumExpGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentRangeMax", + "description": "\nApplies 'Max' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Max', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMax computation is done element-wise, so that each element of the output slice corresponds to the max value of the respective elements in the input slices. Operation doesn't change the shape of individual blocks. This implementation imitates torch nn.Max operator. If the maximum value occurs more than once, the operator will return the first occurrence of value. When computing the gradient using the backward propagation, the gradient input corresponding to the first occurrence of the maximum value will be used.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentRangeMaxGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentRangeMean", + "description": "\nApplies 'Mean' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Mean', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computation is done element-wise, so that each element of the output slice corresponds to the average value of the respective elements in the input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentRangeMeanGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentRangeSum", + "description": "\nApplies 'Sum' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Sum', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentRangeSumGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentSum", + "description": "\nApplies 'Sum' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentSum that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentSumGradient", + "support_level": "default" + }, + { + "name": "SortedSegmentWeightedSum", + "description": "\nApplies 'WeightedSum' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentWeightedSum that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SortedSegmentWeightedSumGradient", + "support_level": "default" + }, + { + "name": "SpaceToBatch", + "description": "\nZero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the batch dimension. After the zero-padding is according to the `pad` argument, both height and width of the input must be divisible by the `block_size`. Only \"NCHW\" order is currently supported.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/space_batch_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SpaceToBatch\",\n [\"X\"],\n [\"Y\"],\n pad=2,\n block_size=3\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(1,3,5,5).astype(np.float32))\nprint(\"X.shape:\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape:\", workspace.FetchBlob(\"Y\").shape)\n\n```\n\n**Result**\n\n```\n\nX.shape: (1, 3, 5, 5)\nY.shape: (9, 3, 3, 3)\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): exclusive axis that divides the first and second dimension of matrix `A` (default=0)", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): height/width of spatial blocks to be moved (default=2)", + "name": "block_size", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; only \"NCHW\" order is currently supported (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): input tensor (NCHW order)", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor (NCHW order)", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdadelta", + "description": "\n\nGiven inputs (param, moment, moment_delta, indices, grad, lr),\nruns the dense AdaDelta update on (param, grad, moment[indices],\n moment_delta[indices], lr), and returns (new_param, new_moment,\n new_moment_delta) as in the dense case.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 0.95, the squared gradient sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Average of squared gradients", + "name": "moment" + }, + { + "description": "Average of squared parameter updates", + "name": "moment_delta" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated average squared gradient", + "name": "output_moment" + }, + { + "description": "Updated average of squared parameter updates", + "name": "output_moment_delta" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagrad", + "description": "\n\nGiven inputs (param, moment, indices, grad, lr), runs the dense AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment_1" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsMeanGradient", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientMeanGradient (gradient of SparseLengthsMean) +\nSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsIndicesInGradientMeanGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsMeanGradientApprox", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientMeanGradient (gradient of SparseLengthsMean) +\nSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsIndicesInGradientMeanGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsSumGradient", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsIndicesInGradientSumGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsSumGradientApprox", + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsIndicesInGradientSumGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsWeightedSumGradient", + "description": "\n\nFused operator of SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + SparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere're auxiliary inputs (aux_param) for which gradient is computed\nand returns (aux_grad).\nYet additional input (lengths) is for fused\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox", + "description": "\n\nApproximately fused operator of\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + SparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere's race condition w.r.t. ordering between reading params and writing to\nparam, hence the name Approx.\nThere're auxiliary inputs (aux_param) for which gradient is computed and\nreturns (aux_grad).\nYet additional input (lengths) is for fused\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradients", + "name": "aux_grad" + } + ], + "support_level": "default" + }, + { + "name": "SparseAdam", + "description": "\n\n Computes the Adam Update for the sparse case.\n Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the dense\n Adam on (param, moment1[indices], momemnt2[indices], lr, iter) and returns\n (new_param, new_moment1, new_moment2) as in dense case.\n Adam can be customized as Rectified Adam (RAdam) by setting enableRAdam = true.\n\n ", + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default false", + "name": "enableRAdam", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + }, + { + "name": "SparseDropoutWithReplacement", + "description": "\n\n`SparseDropoutWithReplacement` takes a 1-d input tensor and a lengths tensor.\nValues in the Lengths tensor represent how many input elements consitute each\nexample in a given batch. The set of input values for an example will be\nreplaced with the single dropout value with probability given by the `ratio`\nargument.\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SparseDropoutWithReplacement\",\n [\"X\", \"Lengths\"],\n [\"Y\", \"OutputLengths\"],\n ratio=0.5,\n replacement_value=-1\n)\n\nworkspace.FeedBlob(\"X\", np.array([1, 2, 3, 4, 5]).astype(np.int64))\nworkspace.FeedBlob(\"Lengths\", np.array([2, 3]).astype(np.int32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Lengths:\", workspace.FetchBlob(\"Lengths\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"OutputLengths:\", workspace.FetchBlob(\"OutputLengths\"))\n```\n\n**Result**\n\n```\nX: [1, 2, 3, 4, 5]\nLengths: [2, 3]\nY: [1, 2, -1]\nOutputLengths: [2, 1]\n```\n\n
\n\n", + "attributes": [ + { + "default": 0.0, + "description": "Probability of an element to be replaced.", + "name": "ratio", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "Value elements are replaced with.", + "name": "replacement_value", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + }, + { + "description": "*(type: Tensor``)* Lengths tensor for input.", + "name": "Lengths" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + }, + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "OutputLengths" + } + ], + "support_level": "default" + }, + { + "name": "SparseFtrl", + "support_level": "default" + }, + { + "name": "SparseLengthsIndicesInGradientMeanGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsIndicesInGradientSumGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsIndicesInGradientWeightedSumGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsMean", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMean2BitRowwiseSparse", + "description": "\nPerforms SparseLengthsMean, but operating on 2-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMean4BitRowwiseSparse", + "description": "\nPerforms SparseLengthsMean, but operating on 4-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMean8BitRowwiseSparse", + "description": "\nPerforms SparseLengthsMean, but operating on 8-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 4-byte\nfp32 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMean8BitsRowwise", + "description": "\nVariation of SparseLengthsMean operator, where DATA is\nstored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMeanFused2BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 2-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMeanFused4BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 4-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMeanFused8BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 8-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsMeanGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsPositionalWeightedSum", + "description": "\nVariation of SparseLengthsWeightedSum operator, where, for each row,\nweights are accessed by indices [0..L-1], where L is the length of given row.\nThis is basically a fused operator of LengthsRangeFill + Gather +\nSparseWeightedSum\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of DATA", + "name": "WEIGHT" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSum2BitRowwiseSparse", + "description": "\nPerforms SparseLengthsSum, but operating on 2-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and 2-byte fp16 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSum4BitRowwiseSparse", + "description": "\nPerforms SparseLengthsSum, but operating on 4-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and 2-byte fp16 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSum8BitRowwiseSparse", + "description": "\nPerforms SparseLengthsSum, but operating on 8-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 4-byte\nfp32 scale and 4-byte fp32 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSum8BitsRowwise", + "description": "\nVariation of SparseLengthsSum operator, where DATA is\nstored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSumFused2BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n2-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSumFused4BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n4-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSumFused8BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n8-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsSumGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsSumSparseLookup", + "description": "\nThis op converts compressed indices of SparseLengthsSum*Sparse to\nuncompressed indices of SparseLengthsSum*. For compressed indices that maps\nto -1. It means it will correspond to a zero row in the uncompressed data.\nTherefore we will remove this indices and adjust the lengths.\n", + "inputs": [ + { + "description": "Integer vector containing compressed indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of INDICES", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction. Same size as INDICES.", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "Uncompressed indices", + "name": "output_indices" + }, + { + "description": "Adjusted lengths", + "name": "output_lengths" + }, + { + "description": "Adjusted weights", + "name": "output_weights" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedMean8BitsRowwise", + "description": "\nVariation of SparseLengthsWeightedMean operator, where\nDATA is stored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of INDICES", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSum2BitRowwiseSparse", + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 2-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n2-byte fp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSum4BitRowwiseSparse", + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 4-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n2-byte fp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSum8BitRowwiseSparse", + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 8-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n4-byte fp32 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSum8BitsRowwise", + "description": "\nVariation of SparseLengthsWeightedSum operator, where\nDATA is stored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of INDICES", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSumFused2BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 2-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSumFused4BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 4-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSumFused8BitRowwise", + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 8-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSumGradient", + "support_level": "default" + }, + { + "name": "SparseLengthsWeightedSumWithMainInputGradient", + "support_level": "default" + }, + { + "name": "SparseLpRegularizer", + "description": "\nGiven a sparse matrix, apply Lp regularization. Currently only L1 and L2 are implemented.\n", + "attributes": [ + { + "description": "Value of p in the Lp regularization to use. The default is 2.0.", + "name": "p", + "option": "optional" + }, + { + "description": "Value of lambda (multiplier for the regularization term). The default is 1e-5.", + "name": "reg_lambda", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be regularized", + "name": "param" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed (optional - not used, this argument is for backwards compatibility)", + "name": "grad" + } + ], + "outputs": [ + { + "description": "Regularized parameters", + "name": "output_param" + } + ], + "support_level": "default" + }, + { + "name": "SparseMomentumSGDUpdate", + "description": "\n\nPerforms a momentum SGD update analogous to MomentumSGDUpdate, but using a\nGradientSlice and indices into the full param and momentum tables. Both param\nand momentum should be in-place (corresponding inputs and outputs should be the\nsame blobs).\n\n\n\n", + "attributes": [ + { + "description": "Momentum hyperparameter.", + "name": "momentum", + "option": "optional" + }, + { + "description": "(boolean) Whether to use Nesterov Accelerated Gradient.", + "name": "nesterov", + "option": "optional" + } + ], + "inputs": [ + { + "description": "GradientSlice with gradients for updated indices.", + "name": "grad" + }, + { + "description": "Momentum blob, same shape as param.", + "name": "moment" + }, + { + "description": "Learning rate.", + "name": "lr" + }, + { + "description": "Full parameter blob.", + "name": "param" + }, + { + "description": "Indices (in first dimension of param) where updates are performed.", + "name": "indices" + } + ], + "outputs": [ + { + "description": "Adjusted gradient.", + "name": "output_grad" + }, + { + "description": "Updated momentum.", + "name": "output_moment" + }, + { + "description": "Updated parameter", + "name": "output_param" + } + ], + "support_level": "default" + }, + { + "name": "SparseNormalize", + "description": "\nGiven a sparse matrix, apply max_norm or constant_norm sparse regularization.\n", + "attributes": [ + { + "description": "A bool variable to control whether to use max norm or constant norm. When use_max_norm = false, constant norm is used so that all the embedding vectors are scaled to have a L2 norm equals to A (see blow argument norm=A). If use_max_norm = true, max norm is used so that embedding is scaled so that its l2 norm is no larger than A. If an embedding's norm is less than A originally, the embedding is left unchanged. The default is True.", + "name": "use_max_norm", + "option": "optional" + }, + { + "description": "L2 norm of the embedding. The default is 1.0.", + "name": "norm", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be normalized", + "name": "param" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed (optional - not used, this argument is for backwards compatibility)", + "name": "grad" + } + ], + "outputs": [ + { + "description": "Normalized parameters", + "name": "output_param" + } + ], + "support_level": "default" + }, + { + "name": "SparseSortedSegmentMean", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentMean that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseSortedSegmentMeanGradient", + "support_level": "default" + }, + { + "name": "SparseSortedSegmentSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentSum that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseSortedSegmentSumGradient", + "support_level": "default" + }, + { + "name": "SparseSortedSegmentWeightedSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentWeightedSum that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseSortedSegmentWeightedSumGradient", + "support_level": "default" + }, + { + "name": "SparseStorm", + "description": "\n\nThis operator implement the STORM (https://arxiv.org/abs/1905.10018)\noptimization algorithm. Given inputs (param, moment, grad_sq_sum, grad,\nindices, lr), computes the dense STORM update on (param, moment[indices],\ngrad_sq_sum, grad, lr), and returns (new_param, new_moment, new_grad_sq_sum)\nas in the dense case.\n", + "attributes": [ + { + "description": "Momentum hyperparameter, c in the original paper.", + "name": "momentum", + "option": "optional" + }, + { + "description": "denominator in adaptive learning rate, w in the original paper.", + "name": "beta", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated.", + "name": "param" + }, + { + "description": "Moment history.", + "name": "moment" + }, + { + "description": "Sum of observed squared gradients.", + "name": "grad_sq_sum" + }, + { + "description": "Gradients computed.", + "name": "grad" + }, + { + "description": "Sparse indices.", + "name": "indices" + }, + { + "description": "Learning rate, k in the original paper.", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters.", + "name": "output_param" + }, + { + "description": "Updated moment.", + "name": "output_moment" + }, + { + "description": "Updated sum of squared gradients.", + "name": "output_grad_sq_sum" + } + ], + "support_level": "default" + }, + { + "name": "SparseToDense", + "description": "\nConvert sparse representations to dense with given indices.\n\nTransforms a sparse representation of map represented as `indices`\nvector and `values` tensor into a compacted tensor where the first dimension\nis determined by the first dimension of the 3rd input if it is given or the\nmax index. Missing values are filled with zeros.\n\nThe op supports duplicated indices and performs summation over corresponding\nvalues. This behavior is useful for converting GradientSlices into dense\nrepresentation.\n\nAfter running this op:\n\n output[indices[i], :] += values[i] // sum over all indices[i] equal to the index\n output[j, ...] = 0 if j not in indices\n", + "inputs": [ + { + "description": "1-D int32/int64 tensor of concatenated ids of data", + "name": "indices" + }, + { + "description": "Data tensor, first dimension has to match `indices`, basic numeric types are supported", + "name": "values" + }, + { + "description": "Optional: if provided, the first dimension of output is the first dimension of this tensor.", + "name": "data_to_infer_dim" + } + ], + "outputs": [ + { + "description": "Output tensor of the same type as `values` of shape `[len(lengths), len(mask)] + shape(default_value)` (if `lengths` is not provided the first dimension is omitted)", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SparseToDenseMask", + "description": "\nConvert sparse representations to dense with given indices.\n\nTransforms a sparse representation of map represented as `indices`\nvector and `values` tensor into a compacted tensor where the first dimension\ncorresponds to each id provided in the mask argument. Missing values are filled\nwith the value of `default_value`. After running this op:\n\n output[j, :] = values[i] // where mask[j] == indices[i]\n output[j, ...] = default_value // when mask[j] doesn't appear in indices\n\nIf `lengths` is provided and not empty, an extra \"batch\" dimension is prepended\nto the output.\n\n`values` and `default_value` can have additional matching dimensions\n(the operation is performed on the entire subtensor in this case).\n\nFor example, if `lengths` is supplied and `values` is a 1-D vector of floats\nand `default_value` is a float scalar, the output is going to be a float\nmatrix of size `len(lengths) X len(mask)`.\n", + "attributes": [ + { + "description": "list(int) argument with desired ids on the 'dense' output dimension", + "name": "mask", + "option": "optional" + }, + { + "description": "bool whether to return presence mask, false by default", + "name": "return_presence_mask", + "option": "optional" + }, + { + "description": "int argument representing the maximum number of invalid row ids that can be skipped before returning an error. 50 by default", + "name": "max_skipped_indices", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1-D int32/int64 tensor of concatenated ids of data", + "name": "indices" + }, + { + "description": "Data tensor, first dimension has to match `indices`", + "name": "values" + }, + { + "description": "Default value for the output if the id is not present in `indices`. Must have the same type as `values` and the same shape, but without the first dimension", + "name": "default_value" + }, + { + "description": "Optional lengths to represent a batch of `indices` and `values`.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor of the same type as `values` of shape `[len(lengths), len(mask)] + shape(default_value)` (if `lengths` is not provided the first dimension is omitted)", + "name": "output" + }, + { + "description": "Bool tensor of shape `[len(lengths), len(mask)]` (if `lengths` is not provided the first dimension is omitted). True when a value for given id was present, false otherwise.", + "name": "presence_mask" + } + ], + "support_level": "default" + }, + { + "name": "SparseToDenseMaskGradient", + "description": "\nThe output is the gradient of the input value from SparseToDenseMask. The\ngradient for default_value has not been implemented.\n", + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentMean", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentMean).\n\nThis op is basically Gather and UnsortedSegmentMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentMeanGradient", + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentSum).\n\nThis op is basically Gather and UnsortedSegmentSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentSumGradient", + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentWeightedSum", + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentWeightedSum).\n\nThis op is basically Gather and UnsortedSegmentWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "SparseUnsortedSegmentWeightedSumGradient", + "support_level": "default" + }, + { + "name": "SparseWngrad", + "description": "\n\nThis operator implement the optimization algorithm\nin https://arxiv.org/abs/1803.02865 by Wu, Ward and Bottou.\nGiven inputs (param, seq_b, indices, grad, lr), runs the dense WnGrad\nupdate on (param, grad, seq_b, lr), and returns (new_param,\nnew_seq_b) as in the dense case.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "seq_b history", + "name": "seq_b" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated seq_b", + "name": "output_seq_b" + } + ], + "support_level": "default" + }, + { + "name": "SpatialBN", + "category": "Normalization", + "description": "\nApplies spatial batch normalization to the input tensor as described in the original paper, [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167). Be aware, this operator has two different output sets, depending on the value of *is_test*. According to the paper, the primary operation of spatial batch normalization is:\n\n$$Y = \\frac{X - \\mu_x}{\\sqrt{\\sigma^2_{x} + \\epsilon}}*\\gamma + b$$\n\nIn the equation, $\\mu_x$ is the *mean*, $X$ is the input data, $\\sigma^2_{x}$ is the *var*, $\\epsilon$ is *epsilon*, $\\gamma$ is the *scale*, $b$ is the *bias*, and $Y$ is the output data. The *momentum* arg also affects this calculation in the computation of the running mean and variance. The influence of *momentum* is as follows:\n\n$$running\\_mean = running\\_mean * momentum + mean * (1 - momentum)$$\n\n$$running\\_var = running\\_var * momentum + var * (1 - momentum)$$\n\nOutput when is_test = 0 (train mode): *Y, mean, var, saved_mean, saved_var*\n\nOutput when is_test = 1 (test mode): *Y*\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/spatial_batch_norm_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/spatial_batch_norm_op.h\n\n", + "attributes": [ + { + "default": 0, + "description": "If set to nonzero, run spatial batch normalization in test mode.", + "name": "is_test", + "type": "int64" + }, + { + "default": 1e-05, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "option": "optional", + "type": "float32" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "default": 0.9, + "description": "Factor used in computing the running mean and variance. e.g., running_mean = running_mean x momentum + mean x (1 - momentum)", + "name": "momentum", + "option": "optional", + "type": "float32" + }, + { + "default": 1, + "description": "Specifies the number of batches to apply normalization on. Requires specifying the optional sums and sumsq inputs that provide statistics across multiple batches from which mean and variance can be determined.", + "name": "num_batches", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "description": "The scale as a 1-dimensional tensor of size $C$ to be applied to the output.", + "name": "scale" + }, + { + "description": "The bias as a 1-dimensional tensor of size $C$ to be applied to the output.", + "name": "bias" + }, + { + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size $C$.", + "name": "mean" + }, + { + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size $C$.", + "name": "var" + }, + { + "description": "The input 4-dimensional tensor of shape $NCHW$ or $NHWC$ depending on the order parameter.", + "name": "X" + }, + { + "description": "*(optional)* Per-channel sums of elements to be used to determine the mean and variance for this batch.", + "name": "sums" + }, + { + "description": "*(optional)* Per-channel sum of elements squared per channel to be used to determine the variance for this batch.", + "name": "sumsq" + } + ], + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as $X$.", + "name": "Y" + }, + { + "description": "The running mean after the spatial BN operator. Must be in-place with the input *mean*. Should not be used for testing.", + "name": "mean" + }, + { + "description": "The running variance after the spatial BN operator. Must be in-place with the input *var*. Should not be used for testing.", + "name": "var" + }, + { + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean" + }, + { + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_var" + } + ], + "support_level": "default" + }, + { + "name": "SpatialBNGradient", + "support_level": "default" + }, + { + "name": "SpatialSoftmaxWithLoss", + "description": "\nCombined Spatial Softmax and Cross-Entropy loss operator.\nSimilar to SoftmaxWithLoss, this operator computes the spatial softmax\nnormalized values for each layer in the batch of the given input, after which\ncross-entropy loss is computed. This operator is numerically more stable than\nseparate Softmax and CrossEntropy ops. The inputs are a 2-D tensor\n(Tensor) of size (batch_size x input_feature_dimensions) and tensor of\nlabels (ground truth).\nOutput is tensor with the probability for each label in a pixel for each example\n(N x D x W x H) and averaged loss (scalar).\nFor spatial softmax, weighting is by x,y position of the input.\n", + "inputs": [ + { + "description": "Unscaled log probabilities", + "name": "logits" + }, + { + "description": "Ground truth", + "name": "labels" + }, + { + "description": "Optional blob to be used to weight the samples for the loss. With spatial set, weighting is by x,y of the input", + "name": "weight_tensor" + } + ], + "outputs": [ + { + "description": "Tensor with softmax cross entropy loss", + "name": "softmax" + }, + { + "description": "Average loss", + "name": "loss" + } + ], + "support_level": "default" + }, + { + "name": "SpatialSoftmaxWithLossGradient", + "support_level": "default" + }, + { + "name": "Split", + "description": "\nSplit an `input` tensor into a list of tensors, along the axis specified by the `axis` dimension. The lengths of the split can be specified using argument `split` or optional second input blob to the operator. Otherwise, the tensor is split to equal sized parts.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Split\",\n [\"input\"],\n [\"output_0\",\"output_1\",\"output_2\"],\n split=(3,2,4),\n axis=0\n)\n\nworkspace.FeedBlob(\"input\", np.random.randint(10, size=(9)))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output_0:\", workspace.FetchBlob(\"output_0\"))\nprint(\"output_1:\", workspace.FetchBlob(\"output_1\"))\nprint(\"output_2:\", workspace.FetchBlob(\"output_2\"))\n\n```\n\n**Result**\n\n```\n\ninput: [2 2 6 6 6 0 5 7 4]\noutput_0: [2 2 6]\noutput_1: [6 6]\noutput_2: [0 5 7 4]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): axis to split on", + "name": "axis", + "option": "optional" + }, + { + "description": "Pass non-zero integer to remove the axis specified in `axis` to all input tensors.", + "name": "add_axis", + "option": "optional", + "type": "int64" + }, + { + "description": "(*Tuple(int)*): length of each output", + "name": "split", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; either \"NCHW\" or \"NHWC\"", + "name": "order", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor*): tensor to split", + "name": "input" + }, + { + "description": "(*Tensor``*): [OPTIONAL] list of output lengths (see also arg `split`)", + "name": "split" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor", + "name": "[output_0, output_1, ...]" + } + ], + "support_level": "default" + }, + { + "name": "SplitByLengths", + "description": "\nSplit a tensor into a list of tensors, given a lengths input, along the specified\n'axis'. If `K` outputs are provided, the op assumes `len(lengths) % K == 0`.\nThe `input` will be split into `K` parts. Each part of length\n`sum(lengths[i*k:i*k+k))`\n\n
\n\n Example 1 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SplitByLengths\",\n [\"input\", \"lengths\"],\n [\"output_0\",\"output_1\",\"output_2\"],\n axis=0\n)\n\nworkspace.FeedBlob(\"input\", np.random.randint(10, size=(9)))\nworkspace.FeedBlob(\"lengths\", np.array([3,2,4], dtype=np.int32))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nprint(\"lengths:\", workspace.FetchBlob(\"lengths\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output_0:\", workspace.FetchBlob(\"output_0\"))\nprint(\"output_1:\", workspace.FetchBlob(\"output_1\"))\nprint(\"output_2:\", workspace.FetchBlob(\"output_2\"))\n\n```\n\n**Result**\n\n```\n\ninput: [2 2 6 6 6 0 5 7 4]\nlengths: [3 2 4]\noutput_0: [2 2 6]\noutput_1: [6 6]\noutput_2: [0 5 7 4]\n\n```\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SplitByLengths\",\n [\"input\", \"lengths\"],\n [\"output_0\",\"output_1\",\"output_2\"],\n axis=0,\n use_scaling_lengths=true,\n)\n\nworkspace.FeedBlob(\"input\", np.random.randint(10, size=(9)))\nworkspace.FeedBlob(\"lengths\", np.array([1,1,1], dtype=np.int32))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nprint(\"lengths:\", workspace.FetchBlob(\"lengths\"))\nprint(\"output_0:\", workspace.FetchBlob(\"output_0\"))\nprint(\"output_1:\", workspace.FetchBlob(\"output_1\"))\nprint(\"output_2:\", workspace.FetchBlob(\"output_2\"))\n\n```\n\n**Result**\n\n```\n\ninput: [2 2 6 6 6 0 5 7 4]\nlengths: [1 1 1]\noutput_0: [2 2 6]\noutput_1: [6 6 6]\noutput_2: [5 7 4]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "Which axis to split on", + "name": "axis", + "option": "optional" + }, + { + "description": "Either NHWC or NCWH, will split on C axis, defaults to NCHW", + "name": "order", + "option": "optional" + }, + { + "description": "(*bool*): Enables automatic scaling of the lengths values. When enabled will automatically find a value K >= 1, such that sum(lengths) * K == len(input).", + "name": "use_scaling_lengths", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The tensor to split", + "name": "input" + }, + { + "description": "The tensor `l_i` indicates the logic block of input.", + "name": "legnths" + } + ], + "support_level": "default" + }, + { + "name": "Sqr", + "description": "\nPerforms element-wise squaring ($x^2$) of input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sqr_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sqr\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[4. 6. 2.]\n [0. 1. 6.]\n [9. 2. 7.]]\nY:\n[[16. 36. 4.]\n [ 0. 1. 36.]\n [81. 4. 49.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "Sqrt", + "description": "\nPerforms element-wise square-root ($\\sqrt{x}$) of input tensor $X$.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sqrt_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sqrt\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[8. 3. 3.]\n [4. 0. 0.]\n [1. 2. 5.]]\nY:\n[[2.8284268 1.7320508 1.7320508 ]\n [1.9999999 0. 0. ]\n [0.99999994 1.4142134 2.236068 ]]\n\n```\n\n
\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SquaredL2Distance", + "description": "\nGiven two input float tensors X, Y, and produces one output float tensor\nof the L2 difference between X and Y that is computed as ||(X - Y)^2 / 2||.\n", + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor (must have the same shape as X)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "SquaredL2DistanceGradient", + "support_level": "default" + }, + { + "name": "SquareRootDivide", + "description": "\nGiven DATA tensor with first dimension N and SCALE vector of the same size N\nproduces an output tensor with same dimensions as DATA. Which consists of DATA\nslices. i-th slice is divided by sqrt(SCALE[i]) elementwise. If SCALE[i] == 0\noutput slice is identical to the input one (no scaling)\n\nExample:\n\n Data = [\n [2.0, 4.0],\n [9.0, 12.0]\n ]\n\n SCALE = [4, 9]\n\n OUTPUT = [\n [1.0, 2.0],\n [3.0, 4.0]\n ]\n\n", + "support_level": "default" + }, + { + "name": "Squeeze", + "category": "Transform", + "description": "\nThe *Squeeze* op removes single-dimensional entries from the shape of the input tensor *data,* and produces a single output tensor *squeezed*. The op also takes an argument *dims* with a list of dimensions to squeeze. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *ExpandDims* given the same *dims* argument.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Squeeze\",\n [\"data\"],\n [\"squeezed\"],\n dims=[0,1],\n)\n\nworkspace.FeedBlob(\"data\", np.zeros((1,1,100,100)).astype(np.float32))\nprint(\"data.shape:\", workspace.FetchBlob(\"data\").shape)\n\nworkspace.RunOperatorOnce(op)\nprint(\"squeezed.shape:\", workspace.FetchBlob(\"squeezed\").shape)\n\n```\n\n**Result**\n\n```\n\ndata.shape: (1, 1, 100, 100)\nsqueezed.shape: (100, 100)\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "List of dimensions of *data* to squeeze out.", + "name": "dims", + "option": "optional", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "squeezed" + } + ], + "support_level": "default" + }, + { + "name": "StatRegistryCreate", + "description": "\nCreate a StatRegistry object that will contain a map of performance counters\nkeyed by name. A StatRegistry is used to gather and retrieve performance\ncounts throughout the caffe2 codebase.\n", + "outputs": [ + { + "description": "A Blob pointing to the newly created StatRegistry.", + "name": "handle" + } + ], + "support_level": "default" + }, + { + "name": "StatRegistryExport", + "attributes": [ + { + "description": "(default true) Whether to atomically reset the counters afterwards.", + "name": "reset", + "option": "optional" + } + ], + "inputs": [ + { + "description": "If provided, export values from given StatRegistry.Otherwise, export values from the global singleton StatRegistry.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "1D string tensor with exported key names", + "name": "keys" + }, + { + "description": "1D int64 tensor with exported values", + "name": "values" + }, + { + "description": "The unix timestamp at counter retrieval.", + "name": "timestamps" + } + ], + "support_level": "default" + }, + { + "name": "StatRegistryUpdate", + "description": "\nUpdate the given StatRegistry, or the global StatRegistry,\nwith the values of counters for the given keys.\n", + "inputs": [ + { + "description": "1D string tensor with the key names to update.", + "name": "keys" + }, + { + "description": "1D int64 tensor with the values to update.", + "name": "values" + }, + { + "description": "If provided, update the given StatRegistry. Otherwise, update the global singleton.", + "name": "handle" + } + ], + "support_level": "default" + }, + { + "name": "StdDevPut", + "description": "\n Consume a value and pushes it to the global stat registry as an standard deviation.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + }, + { + "name": "StopGradient", + "description": "\nStopGradient is a helper operator that does no actual numerical computation,\nand in the gradient computation phase stops the gradient from being computed\nthrough it.\n", + "support_level": "default" + }, + { + "name": "StoreAdd", + "description": "\nAdd a value to a remote counter. If the key is not set, the store\ninitializes it to 0 and then performs the add operation. The operation\nreturns the resulting counter value.\n", + "attributes": [ + { + "description": "key of the counter (required)", + "name": "blob_name", + "option": "optional" + }, + { + "description": "value that is added (optional, default: 1)", + "name": "add_value", + "option": "optional" + } + ], + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "outputs": [ + { + "description": "the current value of the counter", + "name": "value" + } + ], + "support_level": "default" + }, + { + "name": "StoreGet", + "description": "\nGet a blob from a store. The key is the output blob's name. The key\ncan be overridden by specifying the 'blob_name' argument.\n", + "attributes": [ + { + "description": "alternative key for the blob (optional)", + "name": "blob_name", + "option": "optional" + } + ], + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "outputs": [ + { + "description": "data blob", + "name": "data" + } + ], + "support_level": "default" + }, + { + "name": "StoreSet", + "description": "\nSet a blob in a store. The key is the input blob's name and the value\nis the data in that blob. The key can be overridden by specifying the\n'blob_name' argument.\n", + "attributes": [ + { + "description": "alternative key for the blob (optional)", + "name": "blob_name", + "option": "optional" + } + ], + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + }, + { + "description": "data blob", + "name": "data" + } + ], + "support_level": "default" + }, + { + "name": "StoreWait", + "description": "\nWait for the specified blob names to be set. The blob names can be passed\neither as an input blob with blob names or as an argument.\n", + "attributes": [ + { + "description": "names of the blobs to wait for (optional)", + "name": "blob_names", + "option": "optional" + } + ], + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + }, + { + "description": "names of the blobs to wait for (optional)", + "name": "names" + } + ], + "support_level": "default" + }, + { + "name": "Storm", + "description": "\n\nComputes the STORM (https://arxiv.org/abs/1905.10018) update for an input\ngradient and accumulated history of gradients. Concretely, given inputs\n(param, moment, grad_sq_sum, grad, lr), computes:\n\n new_grad_sq_sum = grad_sq_sum + norm(grad)^2\n effective_lr = lr / (beta + new_grad_sq_sum)^1/3\n alpha = momentum * square(effective_lr)\n new_moment = grad + (1 - alpha) * (moment - grad)\n new_param = param + effective_lr * new_moment\n\nand returns (new_param, new_moment, new_grad_sq_sum).\n\nNote that due to caffe2 limitation, it is difficult to re-calculate gradient\nin the previous iteration using the current example. We simplied calculation\nfor new_moment by using the gradient from the current iteration.\n\n", + "attributes": [ + { + "description": "Momentum hyperparameter, c in the original paper.", + "name": "momentum", + "option": "optional" + }, + { + "description": "denominator in adaptive learning rate, w in the original paper.", + "name": "beta", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated.", + "name": "param" + }, + { + "description": "Moment history.", + "name": "moment" + }, + { + "description": "Sum of observed squared gradients.", + "name": "grad_sq_sum" + }, + { + "description": "Gradients computed.", + "name": "grad" + }, + { + "description": "Learning rate, k in the original paper.", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters.", + "name": "output_param" + }, + { + "description": "Updated moment.", + "name": "output_moment" + }, + { + "description": "Updated sum of squared gradients.", + "name": "output_grad_sq_sum" + } + ], + "support_level": "default" + }, + { + "name": "StringEndsWith", + "description": "\nPerforms the ends-with check on each string in the input tensor.\nReturns tensor of boolean of the same dimension of input.\n", + "attributes": [ + { + "description": "The suffix to check input strings against.", + "name": "suffix", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of bools of same shape as input.", + "name": "bools" + } + ], + "support_level": "default" + }, + { + "name": "StringEquals", + "description": "\nPerforms equality check on each string in the input tensor.\nReturns tensor of booleans of the same dimension as input.\n", + "attributes": [ + { + "description": "The text to check input strings equality against.", + "name": "text", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of bools of same shape as input.", + "name": "bools" + } + ], + "support_level": "default" + }, + { + "name": "StringIndexCreate", + "description": "\nCreates a dictionary that maps string keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "support_level": "default" + }, + { + "name": "StringJoin", + "description": "\nTakes a 1-D or a 2-D tensor as input and joins elements in each row with the\nprovided delimiter. Output is a 1-D tensor of size equal to the first dimension\nof the input. Each element in the output tensor is a string of concatenated\nelements corresponding to each row in the input tensor. For 1-D input, each\nelement is treated as a row.\n", + "attributes": [ + { + "description": "Delimiter for join (Default: \",\").", + "name": "delimiter", + "option": "optional" + }, + { + "description": "Axis for the join (either 0 or 1)", + "name": "axis", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1-D or 2-D tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "1-D tensor of strings created by joining row elements from the input tensor.", + "name": "strings" + } + ], + "support_level": "default" + }, + { + "name": "StringPrefix", + "description": "\nComputes the element-wise string prefix of the string tensor.\nInput strings that are shorter than prefix length will be returned unchanged.\nNOTE: Prefix is computed on number of bytes, which may lead to wrong behavior\nand potentially invalid strings for variable-length encodings such as utf-8.\n", + "attributes": [ + { + "description": "Maximum size of the prefix, in bytes.", + "name": "length", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of std::string containing prefixes for each input.", + "name": "prefixes" + } + ], + "support_level": "default" + }, + { + "name": "StringStartsWith", + "description": "\nPerforms the starts-with check on each string in the input tensor.\nReturns tensor of boolean of the same dimension of input.\n", + "attributes": [ + { + "description": "The prefix to check input strings against.", + "name": "prefix", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of bools of same shape as input.", + "name": "bools" + } + ], + "support_level": "default" + }, + { + "name": "StringSuffix", + "description": "\nComputes the element-wise string suffix of the string tensor.\nInput strings that are shorter than suffix length will be returned unchanged.\nNOTE: Prefix is computed on number of bytes, which may lead to wrong behavior\nand potentially invalid strings for variable-length encodings such as utf-8.\n", + "attributes": [ + { + "description": "Maximum size of the suffix, in bytes.", + "name": "length", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of std::string containing suffixes for each output.", + "name": "suffixes" + } + ], + "support_level": "default" + }, + { + "name": "StumpFunc", + "description": "\nConverts each input element into either high_ or low_value\nbased on the given threshold.\n", + "inputs": [ + { + "description": "tensor of float", + "name": "X" + } + ], + "outputs": [ + { + "description": "tensor of float", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "StumpFuncIndex", + "description": "\nSplit the elements and return the indices based on the given threshold.\n", + "inputs": [ + { + "description": "tensor of float", + "name": "X" + } + ], + "outputs": [ + { + "description": "tensor of int64 indices for elements below/equal threshold", + "name": "Index_Low" + }, + { + "description": "tensor of int64 indices for elements above threshold", + "name": "Index_High" + } + ], + "support_level": "default" + }, + { + "name": "Sub", + "description": "\nPerforms element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sub\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[10,12],[4,14]]))\nworkspace.FeedBlob(\"B\", np.array([[5,16],[1,19]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[10 12]\n [ 4 14]]\nB:\n[[ 5 16]\n [ 1 19]]\nC:\n[[ 5 -4]\n [ 3 -5]]\n\n```\n\n
\n\n\n", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "SubGradient", + "support_level": "default" + }, + { + "name": "Sum", + "description": "\nElement-wise sum of each of the input tensors. The first input tensor can be used\nin-place as the output tensor, in which case the sum will be done in place and\nresults will be accumulated the first input tensor. All inputs and outputs must\nhave the same shape and data type.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_sum_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sum\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA: [[1. 2.]\n [3. 4.]]\nB: [[5. 6.]\n [7. 8.]]\nC: [[1. 2.]\n [3. 4.]]\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sum\",\n [\"A\", \"B\"],\n [\"A\"], // inplace\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2,5],[8,3,4]]).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.array([[9,5,6],[6,7,8]]).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"A after Sum:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA: [[1. 2. 5.]\n [8. 3. 4.]]\nB: [[9. 5. 6.]\n [6. 7. 8.]]\nA after Sum: [[10. 7. 11.]\n [14. 10. 12.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First tensor to be added element-wise.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second tensor to be added element-wise.", + "name": "B" + }, + { + "description": "First of the input tensors. Can be inplace.", + "name": "data_0" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Sum of A and B.", + "name": "C" + }, + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum" + } + ], + "support_level": "default" + }, + { + "name": "SumElements", + "description": "\nSums the elements of the input tensor. Tensor type must be float32.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nsum_op = core.CreateOperator(\n \"SumElements\",\n [\"X\"],\n [\"Y\"]\n)\n\navg_op = core.CreateOperator(\n \"SumElements\",\n [\"X\"],\n [\"Y\"],\n average=True\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(sum_op)\nprint(\"Y (sum_op):\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(avg_op)\nprint(\"Y (avg_op):\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[7. 2. 5.]\n [9. 4. 2.]\n [1. 2. 5.]]\nY (sum_op): 37.0\nY (avg_op): 4.111111\n\n```\n\n
\n\n ", + "attributes": [ + { + "description": "(*bool*): set to True to compute the average of the elements rather than the sum", + "name": "average", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): blob pointing to an instance of a counter", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): Scalar tensor containing the sum (or average)", + "name": "sum" + } + ], + "support_level": "default" + }, + { + "name": "SumElementsGradient", + "support_level": "default" + }, + { + "name": "SumElementsInt", + "description": "Sums the integer elements of the input tensor.", + "inputs": [ + { + "description": "Tensor to sum up", + "name": "X" + } + ], + "outputs": [ + { + "description": "Scalar sum", + "name": "sum" + } + ], + "support_level": "default" + }, + { + "name": "SumInt", + "support_level": "default" + }, + { + "name": "Summarize", + "description": "\nSummarize computes four statistics of the input tensor (Tensor)- min,\nmax, mean and standard deviation. The output will be written to a 1-D tensor of\nsize 4 if an output tensor is provided. Else, if the argument 'to_file' is\ngreater than 0, the values are written to a log file in the root folder.\n", + "attributes": [ + { + "description": "(int, default 0) flag to indicate if the summarized statistics have to be written to a log file.", + "name": "to_file", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size 4 containing min, max, mean and standard deviation", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "SumReduceLike", + "description": "\nSumReduceLike operator takes 2 tensors as input. It performs reduce sum to the\nfirst input so that the output looks like the second one.\nIt assumes that the first input\nhas more dimensions than the second, and the dimensions of the second input is\nthe contiguous subset of the dimensions of the first.\nFor example, the following tensor shapes are supported:\n\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 2, 5), shape(B) = (2), with axis=0\n ", + "attributes": [ + { + "description": "If set, defines the starting dimension for reduction. Args `axis` and `axis_str` cannot be used simultaneously.", + "name": "axis", + "option": "optional" + }, + { + "description": "If set, it could only be N or C or H or W. `order` arg should also be provided. It defines the reduction dimensions on NCHW or NHWC. Args `axis` and `axis_str` cannot be used simultaneously.", + "name": "axis_str", + "option": "optional" + }, + { + "description": "Either NHWC or HCWH", + "name": "order", + "option": "optional" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as B", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "SumRelu", + "inputs": [ + { + "description": "First of the input tensors. Can be inplace.", + "name": "data_0" + } + ], + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum" + } + ], + "support_level": "default" + }, + { + "name": "SumSqrElements", + "description": "Sums the squares elements of the input tensor.", + "attributes": [ + { + "description": "whether to average or not", + "name": "average", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Tensor to sum up", + "name": "X" + } + ], + "outputs": [ + { + "description": "Scalar sum of squares", + "name": "sum" + } + ], + "support_level": "default" + }, + { + "name": "SwapBestPath", + "description": "\nGiven a sequence of indices and a matrix, enforce that these indices have the\nbest columnwise scores\nscore\n", + "inputs": [ + { + "description": "N*D predictions matrix", + "name": "predictions" + }, + { + "description": "N*1 vector holds the best path indices ", + "name": "bestPath" + } + ], + "outputs": [ + { + "description": "N*D updated predictions matrix", + "name": "new_predictions" + } + ], + "support_level": "default" + }, + { + "name": "Swish", + "description": "\nSwish takes one input data (Tensor) and produces one output data\n(Tensor) where the swish function, y = x / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "SwishGradient", + "description": "\nSwishGradient takes X, Y and dY and uses this to update dX according to the\nchain rule and derivatives of the swish function.\n", + "support_level": "default" + }, + { + "name": "Tan", + "description": "\nCalculates the tangent of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The tangent of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "TanGradient", + "support_level": "default" + }, + { + "name": "Tanh", + "description": "\nCalculates the hyperbolic tangent of the given input tensor element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/tanh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Tanh\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 2.032603 -2.3556721 -0.14955314]\n [ 0.39309832 -1.1020128 -0.92951244]\n [-0.62815386 0.21342885 1.4002231 ]]\n\nX:\n [[ 0.9662601 -0.982175 -0.14844811]\n [ 0.3740282 -0.8012209 -0.73036647]\n [-0.55677974 0.21024609 0.8853999 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1-D input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic tangent values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "TanhGradient", + "support_level": "default" + }, + { + "name": "TensorProtosDBInput", + "description": "\nTensorProtosDBInput is a simple input operator that basically reads things\nfrom a db where each key-value pair stores an index as key, and a TensorProtos\nobject as value. These TensorProtos objects should have the same size, and they\nwill be grouped into batches of the given size. The DB Reader is provided as\ninput to the operator and it returns as many output tensors as the size of the\nTensorProtos object. Each output will simply be a tensor containing a batch of\ndata with size specified by the 'batch_size' argument containing data from the\ncorresponding index in the TensorProtos objects in the DB.\n", + "attributes": [ + { + "description": "(int, default 0) the number of samples in a batch. The default value of 0 means that the operator will attempt to insert the entire data in a single output blob.", + "name": "batch_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "A pre-initialized DB reader. Typically, this is obtained by calling CreateDB operator with a db_name and a db_type. The resulting output blob is a DB Reader tensor", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor in which the batches of data are returned. The number of output tensors is equal to the size of (number of TensorProto's in) the TensorProtos objects stored in the DB as values. Each output tensor will be of size specified by the 'batch_size' argument of the operator", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "TensorVectorSize", + "description": "Get the size of the input vector", + "inputs": [ + { + "description": "std::unique_ptr >", + "name": "tensor vector" + } + ], + "outputs": [ + { + "description": "int32_t size", + "name": "size" + } + ], + "support_level": "default" + }, + { + "name": "TextFileReaderRead", + "description": "Read a batch of rows from the given text file reader instance. Expects the number of fields to be equal to the number of outputs. Each output is a 1D tensor containing the values for the given field for each row. When end of file is reached, returns empty tensors.", + "attributes": [ + { + "description": "Maximum number of rows to read.", + "name": "batch_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Pointer to an existing TextFileReaderInstance.", + "name": "handler" + } + ], + "support_level": "default" + }, + { + "name": "ThresholdedRelu", + "description": "\nThresholdedRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = x for x > alpha, y = 0\notherwise, is applied to the tensor elementwise.\n", + "attributes": [ + { + "description": "(float) defaults to 1.0.", + "name": "alpha", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "ThresholdedReluGradient", + "description": "\nThresholdedReluGradient takes both Y and dY and uses this to update dX\naccording to the chain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + }, + { + "name": "ThrowChildThreadException", + "support_level": "default" + }, + { + "name": "ThrowException", + "support_level": "default" + }, + { + "name": "Tile", + "description": "\nConstructs a tensor by tiling a given tensor along a specified axis. This operation creates a new tensor by replicating the input tensor a number of times specified by the `tiles` argument along the `axis` dimension. The output tensor's `axis` dimension has $(X.dims(axis) * tiles)$ elements.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/tile_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Tile\",\n [\"X\", \"tiles\", \"axis\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(5,5)))\nworkspace.FeedBlob(\"tiles\", np.array([5]).astype(np.int32))\nworkspace.FeedBlob(\"axis\", np.array([1]).astype(np.int32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[9 1 7 1 3]\n [2 3 6 2 5]\n [0 9 2 6 4]\n [5 8 1 5 9]\n [2 0 1 3 7]]\nY:\n[[9 1 7 1 3 9 1 7 1 3 9 1 7 1 3 9 1 7 1 3 9 1 7 1 3]\n [2 3 6 2 5 2 3 6 2 5 2 3 6 2 5 2 3 6 2 5 2 3 6 2 5]\n [0 9 2 6 4 0 9 2 6 4 0 9 2 6 4 0 9 2 6 4 0 9 2 6 4]\n [5 8 1 5 9 5 8 1 5 9 5 8 1 5 9 5 8 1 5 9 5 8 1 5 9]\n [2 0 1 3 7 2 0 1 3 7 2 0 1 3 7 2 0 1 3 7 2 0 1 3 7]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*int*): number of replicas", + "name": "tiles", + "option": "optional" + }, + { + "description": "(*int*): axis to replicate along", + "name": "axis", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): [OPTIONAL] number of replicas (overrides `tiles` argument)", + "name": "tiles" + }, + { + "description": "(*Tensor``*): [OPTIONAL] axis to replicate along (overrides `axis` argument)", + "name": "axis" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "TileGradient", + "support_level": "default" + }, + { + "name": "TimerBegin", + "description": "\nStart a wallclock timer, returning a scalar tensor containing a pointer to it. The timer is stopped by calling **TimerEnd**.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "attributes": [ + { + "description": "(*str*): name of the timer object; if not set use output name", + "name": "counter_name", + "option": "optional" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): pointer to a timer object", + "name": "timer" + } + ], + "support_level": "default" + }, + { + "name": "TimerEnd", + "description": "\nStop a timer started with **TimerBegin**. Publishes a CAFFE_EVENT.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "support_level": "default" + }, + { + "name": "TimerGet", + "description": "\nQueries the current time of a timer object in nanoseconds.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): scalar containing time in nanoseconds", + "name": "nanos" + } + ], + "support_level": "default" + }, + { + "name": "TimerGetAndEnd", + "description": "\nQueries the current time of a timer in nanos, stops the timer publishing a CAFFE_EVENT.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\ntimerbegin_op = core.CreateOperator(\n \"TimerBegin\",\n [],\n [\"timer\"]\n)\n\ntimerget_op = core.CreateOperator(\n \"TimerGet\",\n [\"timer\"],\n [\"nanos\"]\n)\n\ntimerend_op = core.CreateOperator(\n \"TimerEnd\",\n [\"timer\"],\n []\n)\n\ntimergetandend_op = core.CreateOperator(\n \"TimerGetAndEnd\",\n [\"timer\"],\n [\"nanos\"]\n)\n\n// Test TimerBegin/TimerGet/TimerEnd\nworkspace.RunOperatorOnce(timerbegin_op)\nprint(\"timer:\", workspace.FetchBlob(\"timer\"))\nworkspace.RunOperatorOnce(timerget_op)\nprint(\"nanos:\", workspace.FetchBlob(\"nanos\"))\nworkspace.RunOperatorOnce(timerend_op)\n\n\n// Test TimerBegin/TimerGetAndEnd\nworkspace.RunOperatorOnce(timerbegin_op)\nprint(\"timer:\", workspace.FetchBlob(\"timer\"))\nworkspace.RunOperatorOnce(timergetandend_op)\nprint(\"nanos:\", workspace.FetchBlob(\"nanos\"))\n\n```\n\n**Result**\n\n```\n\ntimer: b'timer, a C++ native class of type caffe2::TimerInstance*.'\nnanos: 361140\ntimer: b'timer, a C++ native class of type caffe2::TimerInstance*.'\nnanos: [252250]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): scalar tensor containing time in nanoseconds", + "name": "nanos" + } + ], + "support_level": "default" + }, + { + "name": "TopK", + "description": "\nRetrieve the top-K elements of the last dimension.\nGiven an input tensor of shape $(a_1, a_2, ..., a_n, r)$. `k` can be passed as an integer argument or a 1D tensor containing a single integer.\nReturns up to three outputs:\n\n1. Value tensor of shape $(a_1, a_2, ..., a_n, k)$ which contains the values of the top k elements along the last dimension\n2. Index tensor of shape $(a_1, a_2, ..., a_n, k)$ which contains the indices of the top k elements (original indices from the input tensor).\n3. [OPTIONAL] Flattened index tensor of shape $(a_1 * a_2 * ... * a_n * k,)$.\n\nGiven two equivalent values, this operator uses the indices along the last dimension as a tiebreaker. That is, the element with the lower index will appear first.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/top_k.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"TopK\",\n [\"X\"],\n [\"Values\", \"Indices\", \"Flattened_indices\"],\n k=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Values:\", workspace.FetchBlob(\"Values\"))\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\nprint(\"Flattened_indices:\", workspace.FetchBlob(\"Flattened_indices\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[6. 7. 0.]\n [8. 7. 7.]\n [1. 5. 6.]]\n\n [[0. 6. 1.]\n [2. 8. 4.]\n [1. 2. 9.]]\n\n [[4. 3. 7.]\n [0. 1. 7.]\n [0. 1. 8.]]]\nValues:\n[[[7. 6.]\n [8. 7.]\n [6. 5.]]\n\n [[6. 1.]\n [8. 4.]\n [9. 2.]]\n\n [[7. 4.]\n [7. 1.]\n [8. 1.]]]\nIndices:\n[[[1 0]\n [0 1]\n [2 1]]\n\n [[1 2]\n [1 2]\n [2 1]]\n\n [[2 0]\n [2 1]\n [2 1]]]\nFlattened_indices: [ 1 0 3 4 8 7 10 11 13 14 17 16 20 18 23 22 26 25]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): input tensor of shape $(a_1, a_2, ..., a_n, r)$", + "name": "X" + }, + { + "description": "(*int*): number of top elements to retrieve", + "name": "k" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor of shape $(a_1, a_2, ..., a_n, k)$", + "name": "Values" + }, + { + "description": "(*Tensor``*): tensor of indices of shape $(a_1, a_2, ..., a_n, k)$; indices values refer to each element's index in the last dimension of the `X` input tensor", + "name": "Indices" + }, + { + "description": "(*Tensor``*): tensor of indices of shape $(a_1 * a_2 * ... * a_n * k,)$; indices values refer to each element's index in the flattened input tensor `X`", + "name": "Flattened_indices" + } + ], + "support_level": "default" + }, + { + "name": "TopKGradient", + "support_level": "default" + }, + { + "name": "Transpose", + "category": "Transform", + "description": "\nTranspose the input tensor by permuting the axes of the input according\nto the `axes` argument. Similar to numpy's\n[transpose](https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html)\nfunction.\n\nFor example, when axes=(1, 0, 2), given an input tensor of shape\n(1, 2, 3), the output shape will be (2, 1, 3).\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/transpose_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Transpose\",\n [\"X\"],\n [\"Y\"],\n axes=(0,3,1,2)\n)\n\nx = np.random.rand(1,32,32,3)\nworkspace.FeedBlob(\"X\", x)\nprint(\"X.shape (NHWC order):\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape (NCHW order):\", workspace.FetchBlob(\"Y\").shape)\n```\n\n**Result**\n\n```\nX.shape (NHWC order): (1, 32, 32, 3)\nY.shape (NCHW order): (1, 3, 32, 32)\n```\n\n
\n\n", + "attributes": [ + { + "description": "Order to permute axes of input tensor. Reverses the dimensions by default.", + "name": "axes", + "option": "optional" + } + ], + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Transposed output.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "TrimDataset", + "description": "\nTrim the given dataset inplace, given the dataset blobs and the field specs.\nTrimming happens such that the dataset will contain the largest possible number\nof records that is a multiple of the 'multiple_of' argument.\n", + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "TT", + "description": "\nThe TT-layer serves as a low-rank decomposition of a fully connected layer. The\ninputs are the same as to a fully connected layer, but the number of parameters\nare greatly reduced and forward computation time can be drastically reduced\nespecially for layers with large weight matrices. The multiplication is computed\nas a product of the input vector with each of the cores that make up the TT\nlayer. Given the input sizes (inp_sizes), output sizes(out_sizes), and the ranks\nof each of the cores (tt_ranks), the ith core will have size:\n\n inp_sizes[i] * tt_ranks[i] * tt_ranks[i + 1] * out_sizes[i].\n\nThe complexity of the computation is dictated by the sizes of inp_sizes,\nout_sizes, and tt_ranks, where there is the trade off between accuracy of the\nlow-rank decomposition and the speed of the computation.\n", + "attributes": [ + { + "description": "(int[]) Input sizes of cores. Indicates the input size of the individual cores; the size of the input vector X must match the product of the inp_sizes array.", + "name": "inp_sizes", + "option": "optional" + }, + { + "description": "(int[]) Output sizes of cores. Indicates the output size of the individual cores; the size of the output vector Y must match the product of the out_sizes array.", + "name": "out_sizes", + "option": "optional" + }, + { + "description": "(int[]) Ranks of cores. Indicates the ranks of the individual cores; lower rank means larger compression, faster computation but reduce accuracy.", + "name": "tt_ranks", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor from previous layer with size (M x K), where M is the batch size and K is the input size.", + "name": "X" + }, + { + "description": "1D blob containing the bias vector", + "name": "b" + }, + { + "description": "1D blob containing each individual cores with sizes specified above.", + "name": "cores" + } + ], + "outputs": [ + { + "description": "Output tensor from previous layer with size (M x N), where M is the batch size and N is the output size.", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "TTLinearGradient", + "support_level": "default" + }, + { + "name": "TTSparseLengthsSum", + "description": "\nThis operator introduce a new, parameter efficient embedding layer, termed TT embedding, which\ncan be plugged in into any model and trained end-to-end. The benefits of our compressed TT layer\nare twofold. Firstly, instead of storing huge embedding matrix, it stores a sequence of much smaller\n2-dimensional and 3-dimensional tensors, necessary for reconstructing the required embeddings,\nwhich allows compressing the model significantly at the cost of a negligible performance drop.\nSecondly, the overall number of parameters can be relatively small (and constant) during the whole\ntraining stage, which allows to use larger batches or train efficiently in a case of limited resources.\n", + "attributes": [ + { + "description": "vector: factorization of voc size", + "name": "factor_i", + "option": "optional" + }, + { + "description": "vector: factorization of emb size", + "name": "factor_j", + "option": "optional" + }, + { + "description": "int[] Ranks of cores", + "name": "ranks", + "option": "optional" + }, + { + "description": "int: the size of each embedding entry", + "name": "emb_size", + "option": "optional" + } + ], + "inputs": [ + { + "description": "tensor core 0", + "name": "core0" + }, + { + "description": "tensor core 1", + "name": "core1" + }, + { + "description": "tensor core 2", + "name": "core2" + }, + { + "description": "index for embedding", + "name": "index" + }, + { + "description": "segment lengths", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Aggregated tensor", + "name": "OUTPUT" + }, + { + "description": "intermediate mm result from core0 for backward path", + "name": "core0_output" + }, + { + "description": "intermediate mm result from core1 for backward path", + "name": "core1_output" + }, + { + "description": "the index for each core", + "name": "indices" + } + ], + "support_level": "default" + }, + { + "name": "TTSparseLengthsSumGradient", + "support_level": "default" + }, + { + "name": "UniformFill", + "description": "\nFill the output tensor with float samples from uniform distribution [`min`, `max`].\n\n- The range can be defined either by arguments or input blobs. `min` and `max` are inclusive.\n - If the range is given by input blobs, you also need to give the shape as input.\n - When the range is given as arguments, this operator enforces min <= max. When the range is given as inputs, the constraint is not enforced.\n - When the range is given as inputs and max < min, the first dimension of the output is set to 0. This behavior is allowed so that dynamically sampling indices into a dynamically sized tensor is possible.\n- The shape of the output can be given as argument or input.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop_1 = core.CreateOperator(\n \"UniformFill\",\n [],\n [\"output\"],\n min=5.5,\n max=10.5,\n shape=(3,3)\n)\n\nop_2 = core.CreateOperator(\n \"UniformFill\",\n [\"shape\", \"min\", \"max\"],\n [\"output\"],\n input_as_shape=1\n)\n\n// Test arg-based op\nworkspace.RunOperatorOnce(op_1)\nprint(\"output (op_1):\\n\", workspace.FetchBlob(\"output\"))\n\n// Test input-based op\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"shape\", np.array([5,5]))\nworkspace.FeedBlob(\"min\", np.array(13.8, dtype=np.float32))\nworkspace.FeedBlob(\"max\", np.array(19.3, dtype=np.float32))\nworkspace.RunOperatorOnce(op_2)\nprint(\"output (op_2):\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\noutput (op_1):\n [[8.894862 8.225005 6.7890406]\n [9.588293 7.1072135 7.7234955]\n [8.210596 6.0202913 9.665462 ]]\noutput (op_2):\n [[18.965155 15.603871 15.038921 17.14872 18.134571]\n [18.84237 17.845276 19.214737 16.970337 15.494069]\n [18.754795 16.724329 15.311974 16.962536 18.60965 ]\n [15.186268 15.264773 18.73341 19.077969 14.237255]\n [15.917589 15.844325 16.248466 17.006554 17.502048]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "(*float*): minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "(*float*): maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): shape of the output, do not set when `input_as_shape`=1", + "name": "shape", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to use the first input as shape; `shape` input must be in CPU context", + "name": "input_as_shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): 1-D tensor of the shape of the output, must be used with `input_as_shape` argument", + "name": "shape" + }, + { + "description": "(*Tensor``*): scalar tensor containing minimum value, inclusive", + "name": "min" + }, + { + "description": "(*Tensor``*): scalar tensor containing maximum value, inclusive", + "name": "max" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): filled output tensor", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "UniformIntFill", + "description": "\nFill the output tensor with int32 samples from uniform distribution [`min`, `max`].\n\n- The range can be defined either by arguments or input blobs. `min` and `max` are inclusive.\n - If the range is given by input blobs, you also need to give the shape as input.\n - When the range is given as arguments, this operator enforces min <= max. When the range is given as inputs, the constraint is not enforced.\n - When the range is given as inputs and max < min, the first dimension of the output is set to 0. This behavior is allowed so that dynamically sampling indices into a dynamically sized tensor is possible.\n- The shape of the output can be given as argument or input.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop_1 = core.CreateOperator(\n \"UniformIntFill\",\n [],\n [\"output\"],\n min=5,\n max=10,\n shape=(3,3)\n)\n\nop_2 = core.CreateOperator(\n \"UniformIntFill\",\n [\"shape\", \"min\", \"max\"],\n [\"output\"],\n input_as_shape=1\n)\n\n// Test arg-based op\nworkspace.RunOperatorOnce(op_1)\nprint(\"output (op_1):\\n\", workspace.FetchBlob(\"output\"))\n\n// Test input-based op\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"shape\", np.array([5,5]))\nworkspace.FeedBlob(\"min\", np.array(13, dtype=np.int32))\nworkspace.FeedBlob(\"max\", np.array(19, dtype=np.int32))\nworkspace.RunOperatorOnce(op_2)\nprint(\"output (op_2):\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\noutput (op_1):\n [[ 6 10 7]\n [ 5 10 6]\n [ 7 5 10]]\noutput (op_2):\n [[19 13 15 13 13]\n [14 17 14 15 15]\n [17 14 19 13 13]\n [17 18 16 13 18]\n [14 15 16 18 16]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "description": "(*int*): minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "(*int*): maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): shape of the output, do not set when `input_as_shape`=1", + "name": "shape", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to use the first input as shape; `shape` input must be in CPU context", + "name": "input_as_shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "(*Tensor``*): 1-D tensor of the shape of the output, must be used with `input_as_shape` argument", + "name": "shape" + }, + { + "description": "(*Tensor``*): scalar tensor containing minimum value, inclusive", + "name": "min" + }, + { + "description": "(*Tensor``*): scalar tensor containing maximum value, inclusive", + "name": "max" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): filled output tensor", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Unique", + "description": "\nDeduplicates input indices vector and optionally produces reverse remapping.\nThere's no guarantees on the ordering of the output indices.\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 indices.", + "name": "indices" + } + ], + "outputs": [ + { + "description": "1D tensor of deduped entries.", + "name": "unique_indices" + }, + { + "description": "(optional) mapping from `indices` to `unique_indices`. This has the same shape as `indices`. Its elements are the indices into `unique_indices` such that `Gather(['unique_indices', 'remapping'])` yields `indices`.", + "name": "remapping" + } + ], + "support_level": "default" + }, + { + "name": "UniqueUniformFill", + "description": "\nFill the output tensor with uniform samples between min and max (inclusive).\nIf the second input is given, its elements will be excluded from uniform\nsampling. Using the second input will require you to provide shape via the first\ninput.\n", + "attributes": [ + { + "description": "Minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "Maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor.Strictly must be one of the types from DataType enum in TensorProto.This only supports INT32 and INT64 now. If not set, assume INT32", + "name": "dtype", + "option": "optional" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob. Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor to provide shape information", + "name": "input" + }, + { + "description": "(optional) Avoid elements in this tensor. Elements must be unique.", + "name": "avoid" + } + ], + "outputs": [ + { + "description": "Output tensor of unique uniform samples", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "UnPackRecords", + "description": "\nGiven a packed dataset (packed by the PackRecordsOp) and the `fields` argument\ndescribing the datasets schema, return the original dataset format. Number of\nreturned tensors is equal to the number of fields in the `fields` argument.\n\nThe first input is the packed tensor to be unpacked. Optionally, you can provide\nprototype tensors to give the expected shapes of the output tensors. This is\nhelpful when you expected to unpack empty tensor, e.g., output of a sampling\nprocess.\n", + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "inputs": [ + { + "description": "The tensor to be unpacked", + "name": "packed_tensor" + } + ], + "support_level": "default" + }, + { + "name": "UnpackRNNSequence", + "description": "\nThis is the reverse operator for PackRNNSequence. It maps the packed values\nback to sequence values based on the length blob. Each number from length blob\nrepresents the corresponding values that has been grouped. The dimension\nfor each pack is the same as the maximum number from the length blob (padding\nwith zero was implemented for smaller length value). The overall output\ndimension is: M * D, where M is the sum of lengths, and D is the dimension of\neach feature value. The following example shows the input and output of\nthis operator:\n\n\nGiven:\n values = [\n [v1, v3, v6, v7],\n [v2, v4, 0, v8],\n [0, v5, 0, 0 ],\n ]\n lengths = [2, 3, 1, 2]\n\n\nOutput:\n output = [v1, v2, v3, v4, v5, v6, v7, v8];\n\n\nOne application for this operator is the transfer data from the format of RNN\nback to sequence values. Note that the gradient operator of\nUnpackRNNSequence is PackRNNSequence.\n", + "inputs": [ + { + "description": "Data tensor, contains the packed features", + "name": "values" + }, + { + "description": "lengths with each number representing the pack size.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor before packing", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "UnpackSegments", + "description": "Map N+1 dim tensor to N dim based on length blob", + "attributes": [ + { + "description": "The pre-defined max_length for the packed segments", + "name": "max_length", + "option": "optional" + } + ], + "inputs": [ + { + "description": "1-d int/long tensor contains the length in each of the input.", + "name": "lengths" + }, + { + "description": "N+1 dim Tensor.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "N dim Tensor", + "name": "packed_tensor" + } + ], + "support_level": "default" + }, + { + "name": "UnsafeCoalesce", + "description": "\nCoalesce the N inputs into N outputs and a single coalesced output blob.\nThis allows operations that operate over multiple small kernels (e.g.\nbiases in a deep CNN) to be coalesced into a single larger operation,\namortizing the kernel launch overhead, synchronization costs for\ndistributed computation, etc.\nThe operator:\n- computes the total size of the coalesced blob by summing the input sizes\n- allocates the coalesced output blob as the total size\n- copies the input vectors into the coalesced blob, at the correct offset.\n- aliases each Output(i) to- point into the coalesced blob, at the corresponding offset for Input(i).\nThis is 'unsafe' as the output vectors are aliased, so use with\ncaution.\n", + "support_level": "default" + }, + { + "name": "UnsortedSegmentMean", + "description": "\nApplies 'Mean' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentMean).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "UnsortedSegmentMeanGradient", + "support_level": "default" + }, + { + "name": "UnsortedSegmentSum", + "description": "\nApplies 'Sum' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentSum).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "UnsortedSegmentSumGradient", + "support_level": "default" + }, + { + "name": "UnsortedSegmentWeightedSum", + "description": "\nApplies 'WeightedSum' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentWeightedSum).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + }, + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + }, + { + "name": "UnsortedSegmentWeightedSumGradient", + "support_level": "default" + }, + { + "name": "UpsampleBilinear", + "description": "\nResizes the spatial dimensions of the input using bilinear\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X" + }, + { + "description": "1D, 2-element, Scales tensor, [height_scale, width_scale]", + "name": "scales" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + }, + { + "name": "UpsampleBilinearGradient", + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "VariableLengthSequencePadding", + "description": "\nSuper special-case operator. Used to pad a tensor to mimic pytorch's\npad_packed_sequence.\n\nGiven an input tensor INPUT of size NxBxM and an input tensor LENS\nof size B, where\n\nN = maximum sequence length\nB = batch size\nM = hidden size\n\nset each element of INPUT to zero if it is is past the end of the\ncorresponding sequence (i.e. if LENS[j] > i for an index (i,j,k)).\n\n", + "support_level": "default" + }, + { + "name": "ViterbiPath", + "description": "\nGiven a predictions matrix and a transitions matrix, get the path with the best\nscore\n", + "inputs": [ + { + "description": "N*D predictions matrix", + "name": "predictions" + }, + { + "description": "D*D transitions matrix", + "name": "transitions" + } + ], + "outputs": [ + { + "description": "N*1 vector holds the best path indices", + "name": "viterbi_path" + } + ], + "support_level": "default" + }, + { + "name": "WallClockTime", + "description": "Time since epoch in nanoseconds.", + "outputs": [ + { + "description": "The time in nanoseconds.", + "name": "time" + } + ], + "support_level": "default" + }, + { + "name": "WeightedMultiSampling", + "description": "\nThe operator performs sampling based on the input sampling weights.\nAll weights are cummulative probability thus sorted. The output is\na 1-D tensor (Tensor). If two inputs are given, the second input\nis used to provide shape of the output sample tensor. Otherwise, we use\nargument `num_samples` to determine the number of samples to generate.\n", + "attributes": [ + { + "description": "number of samples to sample from the input data", + "name": "num_samples", + "option": "optional" + } + ], + "inputs": [ + { + "description": "An optional 1-D Tensor.Input cumulative sampling probability (such as [0.2, 0.5, 0.8, 1.5]). All weights must be non-negative numbers. Note that the last value of CDF is not necessary 1. If the last value is not 1, all values in sampling_cdf will be scaled by this number.", + "name": "sampling_cdf" + }, + { + "description": "Tensor whose shape will be applied to output.", + "name": "shape_tensor (optional)" + } + ], + "outputs": [ + { + "description": "The output tensor contains indices sampled from distribution givenby the weight vector in the input tensorThe output is a 1-D Tensor of size determined by argument`num_samples` or the second input tensor.", + "name": "sampled_indexes" + } + ], + "support_level": "default" + }, + { + "name": "WeightedSample", + "description": "\nThe operator performs sampling based on the input sampling weights for\neach batch. All weights must be non-negative numbers.\nThe input is a 2-D tensor (Tensor) of size (batch_size x weights_dim).\nFor each batch, an index is randomly sampled from the distribution given by\nthe weights of the corresponding batch.\nThe output is a 1-D tensor (Tensor) of size (batch_size x 1) and\ncontains the index(es) of the sampled output.\n", + "inputs": [ + { + "description": "A 2-D Tensor of size (batch_size x weights_dim).All weights must be non-negative numbers.", + "name": "sampling_weights" + }, + { + "description": "An optional 2-D Tensor of size (batch_size x weights_dim).Its values correspond to the sampling weights.", + "name": "sampling_values" + } + ], + "outputs": [ + { + "description": "The output tensor contains index(es) sampled from distribution givenby the weight vector(s) in the input tensorThe output is a 1-D Tensor of size (batch_size x 1)", + "name": "sampled_indexes" + }, + { + "description": "The output tensor contains value(s) selected by the sampled index(es)It is a 1-D Tensor of size (batch_size x 1)", + "name": "sampled_values" + } + ], + "support_level": "default" + }, + { + "name": "WeightedSampleDequeueBlobs", + "description": "\nDequeue the blobs from multiple queues. When one of queues is closed and empty,\nthe output status will be set to true which can be used as exit criteria for\nexecution step.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "attributes": [ + { + "description": "Weights for sampling from multiple queues", + "name": "weights", + "option": "optional" + }, + { + "description": "The index of the blob (among the output blob list) that will be used to store the index of the table chosen to read the current batch.", + "name": "table_idx_blob", + "option": "optional" + } + ], + "support_level": "default" + }, + { + "name": "WeightedSigmoidCrossEntropyWithLogits", + "description": "\nGiven three matrices: logits, targets, weights, all of the same shape,\n(batch_size, num_classes), computes the weighted sigmoid cross entropy between\nlogits and targets. Specifically, at each position r,c, this computes\nweights[r, c] * crossentropy(sigmoid(logits[r, c]), targets[r, c]), and then\naverages over each row.\nReturns a tensor of shape (batch_size,) of losses for each example.\n", + "inputs": [ + { + "description": "matrix of logits for each example and class.", + "name": "logits" + }, + { + "description": "matrix of targets, same shape as logits.", + "name": "targets" + }, + { + "description": "matrix of weights, same shape as logits.", + "name": "weights" + } + ], + "outputs": [ + { + "description": "Vector with the total xentropy for each example.", + "name": "xentropy" + } + ], + "support_level": "default" + }, + { + "name": "WeightedSigmoidCrossEntropyWithLogitsGradient", + "support_level": "default" + }, + { + "name": "WeightedSum", + "description": "\nElement-wise weighted sum of several data, weight tensor pairs.\nInput should be in the form X_0, weight_0, X_1, weight_1, ... where X_i all\nhave the same shape, and weight_i are size 1 tensors that specifies the weight\nof each vector. Note that if one wants to do in-place computation, it could\nonly be done with X_0 also as the output, but not other X_i.\n", + "inputs": [ + { + "description": "Weight of the first input in the sum.", + "name": "weight_0" + } + ], + "outputs": [ + { + "description": "Result containing weighted elem-wise sum of inputs.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "WeightedSumGradient", + "support_level": "default" + }, + { + "name": "WeightScale", + "description": "\nEvery `stepsize` iterations, multiply the weights by a constant `scale`:\n nw = w * scale\n", + "attributes": [ + { + "description": "Every iteration number to do weight scaling", + "name": "stepsize", + "option": "optional" + }, + { + "description": "After iter passes this bound, do not perform the weight rescaling", + "name": "upper_bound_iter", + "option": "optional" + }, + { + "description": "The multiplicative factor applied to weights.", + "name": "scale", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Current weights", + "name": "w" + }, + { + "description": "Training Iteration", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated weights", + "name": "nw" + } + ], + "support_level": "default" + }, + { + "name": "Where", + "description": "\nOperator Where takes three input data (Tensor, Tensor, Tensor) and\nproduces one output data (Tensor) where z = c ? x : y is applied elementwise.\n", + "inputs": [ + { + "description": "input tensor containing booleans", + "name": "C" + }, + { + "description": "input tensor", + "name": "X" + }, + { + "description": "input tensor", + "name": "Y" + } + ], + "outputs": [ + { + "description": "output tensor", + "name": "Z" + } + ], + "support_level": "default" + }, + { + "name": "While", + "description": "\n'While' control operator, first input is a scalar boolean blob that stores loop's\ncondition value. Accepts 'loop_net' (required) and 'cond_net' (optional) arguments for\nloop's body and condition subnets respectively. If condition subnet is specified,\nit is executed before the first and after each iteration. Subnets are executed in\nthe same workspace as 'While'.\n ", + "attributes": [ + { + "description": "Net executed on each iteration", + "name": "loop_net", + "option": "optional" + }, + { + "description": "Net to (re)compute condition value", + "name": "cond_net", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Scalar boolean condition", + "name": "condition" + } + ], + "support_level": "default" + }, + { + "name": "Wngrad", + "description": "\n\nComputes the WnGrad update for an input gradient and accumulated\nhistory. This operator implement the optimization algorithm\nin https://arxiv.org/abs/1803.02865 by Wu, Ward and Bottou.\nConcretely, given inputs (param, grad, seq_b, learning_rate),\ncomputes\n\n new_seq_b = seq_b + 1 / seq_b * norm(grad)^2\n effective_lr = learning_rate / (new_seq_b + epsilon)\n update = learning_rate * grad / (new_seq_b + epsilon)\n new_param = param + update\nand returns (new_param, new_seq_b).\n\nOptionally returns effective_lr and update as well.\n\n", + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Seq_b history", + "name": "seq_b" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated seq_b", + "name": "output_seq_b" + }, + { + "description": "(optional) Effective learning rate", + "name": "output_effective_lr" + }, + { + "description": "(optional) Actual update that is applied.", + "name": "output_update" + } + ], + "support_level": "default" + }, + { + "name": "XavierFill", + "description": "\nThis op fills an output tensor with values sampled from a uniform distribution with the range determined by the desired shape of the output. Rather, than specifying the range of values manually, the novelty of Xavier Fill is that it automatically scales the range of the distribution it draws from based on the size of the desired output tensor. For more information check out the paper [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf). The output tensor shape is specified by the *shape* argument. However, if *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: Do not set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"XavierFill\",\n [],\n [\"out\"],\n shape=[3,3],\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [[-0.8412168 0.33207083 -0.88418937]\n [ 0.43059897 -0.8340702 0.07781601]\n [ 0.93261135 -0.24542928 -0.3980782 ]]\n\n```\n\n
\n\n", + "attributes": [ + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor of random values drawn from an automatically scaled uniform distribution, based on the size of the output tensor. If the shape argument is set, this is the shape specified by the shape argument, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Xor", + "description": "\nPerforms element-wise logical operation **xor** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Xor\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[ True True True]\n [False False True]\n [False True False]]\nB:\n[[False False False]\n [ True True True]\n [False False False]]\nC:\n[[ True True True]\n [ True True False]\n [False True False]]\n\n```\n\n
\n\n ", + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + }, + { + "name": "YellowFin", + "description": "\n\nComputes the YellowFin update (https://arxiv.org/abs/1706.03471) and performs\nmomentum SGD optimization step. lr and mu are not being shared between\nparameters. curv_win, g_avg, g2_avg and scalars_memory are just auxiliary\nmemory for computing moving averages (see the publication). Takes arguments\nbeta: coefficient for moving averages,\ncurv_win_width: timeframe when average squared gradient is being stored,\nepsilon: for numerical purposes,\nnesterov and zero_debias for debias of moving average.\n\n", + "attributes": [ + { + "description": "Default 0.999", + "name": "beta", + "option": "optional" + }, + { + "description": "Default 20", + "name": "curv_win_width", + "option": "optional" + }, + { + "description": "Default 1e-6", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default false", + "name": "nesterov", + "option": "optional" + }, + { + "description": "Default true", + "name": "zero_debias", + "option": "optional" + } + ], + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Momentum", + "name": "moment" + }, + { + "description": "Learning rate", + "name": "lr" + }, + { + "description": "Momentum coefficient", + "name": "mu" + }, + { + "description": "Memory for latest curvature ranges", + "name": "curv_win" + }, + { + "description": "Moving average of gradient", + "name": "g_avg" + }, + { + "description": "Moving average of squared gradient", + "name": "g2_avg" + }, + { + "description": "Memory for stateful scalars", + "name": "scalars_memory" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "Iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Parameters to be updated", + "name": "output_param" + }, + { + "description": "Momentum", + "name": "output_moment" + }, + { + "description": "Output learning rate", + "name": "output_lr" + }, + { + "description": "Output momentum coefficient", + "name": "output_mu" + }, + { + "description": "Output memory for latest curvature ranges", + "name": "output_curv_win" + }, + { + "description": "Output moving average of gradient", + "name": "output_g_avg" + }, + { + "description": "Output moving average of squared gradient", + "name": "output_g2_avg" + }, + { + "description": "Output memory for stateful scalars", + "name": "output_scalars_memory" + } + ], + "support_level": "default" + }, + { + "name": "ZeroGradient", + "description": "\nZeroGradient operators doesn't produce any output blobs. One can use\nthis operator to produce 0 gradient for the input blob.\n", + "support_level": "default" + }, + { + "name": "DecayAdagrad", + "description": "\n\nComputes the DecayAdagrad update for an\ninput gradient and momentum parameters. Concretely, given inputs\n(param, m1, m2, c, grad, lr, iters),\n\n t = iters + 1\n m1_o = (beta1 * m1) + (1 - beta1) * grad\n m2_o = m2 + np.square(grad)\n c = 1.0 or (1 - power(beta1, t))\n grad_o = m1_o / c / (sqrt(m2_o) + epsilon)\n param_o = param + lr * (grad_o + weight_decay * param)\n\nand returns (param_o, m1_o, m2_o)\n\n", + "attributes": [ + { + "name": "beta1", + "description": "Default 0.9", + "option": "optional" + }, + { + "name": "beta2", + "description": "Default 0.999", + "option": "optional" + }, + { + "name": "epsilon", + "description": "Default 1e-5", + "option": "optional" + }, + { + "name": "weight_decay", + "description": "Default 0.0", + "option": "optional" + }, + { + "name": "bias_correction_first", + "description": "Default True", + "option": "optional" + } + ], + "inputs": [ + { + "name": "param", + "description": "Parameters to be updated" + }, + { + "name": "moment_1", + "description": "First moment history" + }, + { + "name": "moment_2", + "description": "Second moment history" + }, + { + "name": "grad", + "description": "Gradient computed" + }, + { + "name": "lr", + "description": "learning rate" + }, + { + "name": "iter", + "description": "iteration number" + } + ], + "outputs": [ + { + "name": "output_param", + "description": "Updated parameters" + }, + { + "name": "output_moment_1", + "description": "Updated first moment" + }, + { + "name": "output_moment_2", + "description": "Updated second moment" + } + ], + "support_level": "default" + }, + { + "name": "Log1p", + "description": "\nCalculates Log1p of the given input tensor element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/log1p_op.cc\n", + "inputs": [ + { + "name": "input", + "description": "Input data blob to be operated on." + } + ], + "outputs": [ + { + "name": "output", + "description": "Output data blob with same shape as input" + } + ], + "support_level": "default" + }, + { + "name": "Log1pGradient", + "support_level": "default" + }, + { + "name": "SparseItemwiseDropoutWithReplacement", + "description": "\n\n`SparseItemwiseDropoutWithReplacement` takes a 1-d input tensor and a lengths tensor.\nValues in the Lengths tensor represent how many input elements consitute each\nexample in a given batch. The each input value in the tensor of an example can be\nreplaced with the replacement value with probability given by the `ratio`\nargument.\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SparseItemwiseDropoutWithReplacement\",\n [\"X\", \"Lengths\"],\n [\"Y\", \"OutputLengths\"],\n ratio=0.5,\n replacement_value=-1\n)\n\nworkspace.FeedBlob(\"X\", np.array([1, 2, 3, 4, 5]).astype(np.int64))\nworkspace.FeedBlob(\"Lengths\", np.array([2, 3]).astype(np.int32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Lengths:\", workspace.FetchBlob(\"Lengths\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"OutputLengths:\", workspace.FetchBlob(\"OutputLengths\"))\n```\n\n**Result**\n\n```\nX: [1, 2, 3, 4, 5]\nLengths: [2, 3]\nY: [1, 2, -1]\nOutputLengths: [2, 1]\n```\n\n
\n\n", + "attributes": [ + { + "name": "ratio", + "type": "float32", + "default": 0.0, + "description": "Probability of an element to be replaced.", + "option": "optional" + }, + { + "name": "replacement_value", + "type": "int64", + "default": 0, + "description": "Value elements are replaced with.", + "option": "optional" + } + ], + "inputs": [ + { + "name": "X", + "description": "*(type: Tensor``)* Input data tensor." + }, + { + "name": "Lengths", + "description": "*(type: Tensor``)* Lengths tensor for input." + } + ], + "outputs": [ + { + "name": "Y", + "description": "*(type: Tensor``)* Output tensor." + }, + { + "name": "OutputLengths", + "description": "*(type: Tensor``)* Output tensor." + } + ], + "support_level": "default" + }, + { + "name": "EstimateAllBlobSizes", + "description": "\nReturns two outputs: a 1D tensor of strings containing the names\nof each blob in the active workspace, and a 1D tensor of integers containing the\nestimated serialized size of each blob (in bytes).\n", + "attributes": [ + { + "name": "include_shared", + "description": "(bool, default true) Whether to include blobs inherited from parent workspaces.", + "option": "optional" + }, + { + "name": "options", + "description": "(string, default empty) A BlobSerializationOptions message specifying options for how specific blobs should be serialized.", + "option": "optional" + } + ], + "outputs": [ + { + "name": "blob_names", + "description": "1D tensor of strings containing blob names." + }, + { + "name": "blob_sizes", + "description": "1D tensor of int64_t containing blob sizes." + } + ], + "support_level": "default" + }, + { + "name": "SmartDecaySparseAdam", + "description": "\n\n Computes the Adam Update for the sparse case.\n Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the dense\n Adam on (param, moment1[indices], momemnt2[indices], lr, iter) and returns\n (new_param, new_moment1, new_moment2) as in dense case.\n Adam can be customized as Rectified Adam (RAdam) by setting enableRAdam = true.\n\n ", + "attributes": [ + { + "name": "beta1", + "description": "Default 0.9", + "option": "optional" + }, + { + "name": "beta2", + "description": "Default 0.999", + "option": "optional" + }, + { + "name": "epsilon", + "description": "Default 1e-5", + "option": "optional" + } + ], + "inputs": [ + { + "name": "param", + "description": "Parameters to be updated" + }, + { + "name": "moment_1", + "description": "First moment history" + }, + { + "name": "moment_2", + "description": "Second moment history" + }, + { + "name": "last_seen", + "description": "Minibatch index when each weight was last seen" + }, + { + "name": "indices", + "description": "Sparse indices" + }, + { + "name": "grad", + "description": "Gradient computed" + }, + { + "name": "lr", + "description": "learning rate" + }, + { + "name": "iter", + "description": "iteration number" + } + ], + "outputs": [ + { + "name": "output_param", + "description": "Updated parameters" + }, + { + "name": "output_moment_1", + "description": "Updated first moment" + }, + { + "name": "output_moment_2", + "description": "Updated second moment" + }, + { + "name": "output_last_seen", + "description": "Updated minibatch index when each weight was last seen" + } + ], + "support_level": "default" + } +] diff --git a/caffe2-proto.js b/caffe2-proto.js new file mode 100644 index 00000000000..27df8991cb6 --- /dev/null +++ b/caffe2-proto.js @@ -0,0 +1,1773 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('caffe2'); + +$root.caffe2 = {}; + +$root.caffe2.TensorProto = class TensorProto { + + constructor() { + this.dims = []; + this.float_data = []; + this.int32_data = []; + this.string_data = []; + this.double_data = []; + this.int64_data = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + case 2: + message.data_type = reader.int32(); + break; + case 15: + message.data_format = reader.uint32(); + break; + case 3: + message.float_data = reader.floats(message.float_data, tag); + break; + case 4: + message.int32_data = reader.array(message.int32_data, () => reader.int32(), tag); + break; + case 5: + message.byte_data = reader.bytes(); + break; + case 6: + message.string_data.push(reader.bytes()); + break; + case 9: + message.double_data = reader.doubles(message.double_data, tag); + break; + case 10: + message.int64_data = reader.array(message.int64_data, () => reader.int64(), tag); + break; + case 13: + message.raw_data = reader.bytes(); + break; + case 7: + message.name = reader.string(); + break; + case 8: + message.device_detail = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 11: + message.segment = $root.caffe2.TensorProto.Segment.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "data_format": + message.data_format = reader.uint32(); + break; + case "float_data": + reader.array(message.float_data, () => reader.float()); + break; + case "int32_data": + reader.array(message.int32_data, () => reader.int32()); + break; + case "byte_data": + message.byte_data = reader.bytes(); + break; + case "string_data": + reader.array(message.string_data, () => reader.bytes()); + break; + case "double_data": + reader.array(message.double_data, () => reader.double()); + break; + case "int64_data": + reader.array(message.int64_data, () => reader.int64()); + break; + case "raw_data": + message.raw_data = reader.bytes(); + break; + case "name": + message.name = reader.string(); + break; + case "device_detail": + message.device_detail = $root.caffe2.DeviceOption.decodeText(reader); + break; + case "segment": + message.segment = $root.caffe2.TensorProto.Segment.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorProto.prototype.data_type = 1; +$root.caffe2.TensorProto.prototype.data_format = 0; +$root.caffe2.TensorProto.prototype.byte_data = new Uint8Array([]); +$root.caffe2.TensorProto.prototype.raw_data = new Uint8Array([]); +$root.caffe2.TensorProto.prototype.name = ""; +$root.caffe2.TensorProto.prototype.device_detail = null; +$root.caffe2.TensorProto.prototype.segment = null; + +$root.caffe2.TensorProto.DataType = { + "UNDEFINED": 0, + "FLOAT": 1, + "INT32": 2, + "BYTE": 3, + "STRING": 4, + "BOOL": 5, + "UINT8": 6, + "INT8": 7, + "UINT16": 8, + "INT16": 9, + "INT64": 10, + "FLOAT16": 12, + "DOUBLE": 13, + "ZERO_COLLISION_HASH": 14, + "REBATCHING_BUFFER": 15 +}; + +$root.caffe2.TensorProto.SerializationFormat = { + "FMT_PROTOBUF": 0, + "FMT_BFLOAT16": 1 +}; + +$root.caffe2.TensorProto.Segment = class Segment { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorProto.Segment(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.begin = reader.int64(); + break; + case 2: + message.end = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'begin')) { + throw new Error("Excepted 'begin'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'end')) { + throw new Error("Excepted 'end'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorProto.Segment(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "begin": + message.begin = reader.int64(); + break; + case "end": + message.end = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "begin")) { + throw new Error("Excepted 'begin'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "end")) { + throw new Error("Excepted 'end'."); + } + return message; + } +}; + +$root.caffe2.TensorProto.Segment.prototype.begin = protobuf.Int64.create(0); +$root.caffe2.TensorProto.Segment.prototype.end = protobuf.Int64.create(0); + +$root.caffe2.QTensorProto = class QTensorProto { + + constructor() { + this.dims = []; + this.data = []; + this.scales = []; + this.biases = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.QTensorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + case 2: + message.precision = reader.int32(); + break; + case 3: + message.scale = reader.double(); + break; + case 4: + message.bias = reader.double(); + break; + case 5: + message.is_signed = reader.bool(); + break; + case 6: + message.data = reader.array(message.data, () => reader.int32(), tag); + break; + case 7: + message.name = reader.string(); + break; + case 8: + message.data_type = reader.int32(); + break; + case 9: + message.scales = reader.doubles(message.scales, tag); + break; + case 10: + message.biases = reader.doubles(message.biases, tag); + break; + case 11: + message.axis = reader.int32(); + break; + case 12: + message.is_multiparam = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'precision')) { + throw new Error("Excepted 'precision'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'scale')) { + throw new Error("Excepted 'scale'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'bias')) { + throw new Error("Excepted 'bias'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'is_signed')) { + throw new Error("Excepted 'is_signed'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.QTensorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + case "precision": + message.precision = reader.int32(); + break; + case "scale": + message.scale = reader.double(); + break; + case "bias": + message.bias = reader.double(); + break; + case "is_signed": + message.is_signed = reader.bool(); + break; + case "data": + reader.array(message.data, () => reader.int32()); + break; + case "name": + message.name = reader.string(); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "scales": + reader.array(message.scales, () => reader.double()); + break; + case "biases": + reader.array(message.biases, () => reader.double()); + break; + case "axis": + message.axis = reader.int32(); + break; + case "is_multiparam": + message.is_multiparam = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "precision")) { + throw new Error("Excepted 'precision'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "scale")) { + throw new Error("Excepted 'scale'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "bias")) { + throw new Error("Excepted 'bias'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "is_signed")) { + throw new Error("Excepted 'is_signed'."); + } + return message; + } +}; + +$root.caffe2.QTensorProto.prototype.precision = 0; +$root.caffe2.QTensorProto.prototype.scale = 0; +$root.caffe2.QTensorProto.prototype.bias = 0; +$root.caffe2.QTensorProto.prototype.is_signed = false; +$root.caffe2.QTensorProto.prototype.name = ""; +$root.caffe2.QTensorProto.prototype.data_type = 2; +$root.caffe2.QTensorProto.prototype.axis = 0; +$root.caffe2.QTensorProto.prototype.is_multiparam = false; + +$root.caffe2.TensorProtos = class TensorProtos { + + constructor() { + this.protos = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorProtos(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.protos.push($root.caffe2.TensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorProtos(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "protos": + message.protos.push($root.caffe2.TensorProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorShape = class TensorShape { + + constructor() { + this.dims = []; + this.unknown_dims = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorShape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + case 2: + message.data_type = reader.int32(); + break; + case 3: + message.unknown_dims = reader.array(message.unknown_dims, () => reader.int32(), tag); + break; + case 4: + message.unknown_shape = reader.bool(); + break; + case 5: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorShape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "unknown_dims": + reader.array(message.unknown_dims, () => reader.int32()); + break; + case "unknown_shape": + message.unknown_shape = reader.bool(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorShape.prototype.data_type = 1; +$root.caffe2.TensorShape.prototype.unknown_shape = false; +$root.caffe2.TensorShape.prototype.name = ""; + +$root.caffe2.TensorShapes = class TensorShapes { + + constructor() { + this.shapes = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorShapes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapes.push($root.caffe2.TensorShape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorShapes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shapes": + message.shapes.push($root.caffe2.TensorShape.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorBoundShape = class TensorBoundShape { + + constructor() { + this.dim_type = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorBoundShape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe2.TensorShape.decode(reader, reader.uint32()); + break; + case 2: + message.dim_type = reader.array(message.dim_type, () => reader.int32(), tag); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.shape_is_final = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorBoundShape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe2.TensorShape.decodeText(reader); + break; + case "dim_type": + reader.array(message.dim_type, () => reader.enum($root.caffe2.TensorBoundShape.DimType)); + break; + case "name": + message.name = reader.string(); + break; + case "shape_is_final": + message.shape_is_final = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorBoundShape.prototype.shape = null; +$root.caffe2.TensorBoundShape.prototype.name = ""; +$root.caffe2.TensorBoundShape.prototype.shape_is_final = false; + +$root.caffe2.TensorBoundShape.DimType = { + "UNKNOWN": 0, + "CONSTANT": 1, + "BATCH": 2, + "BATCH_OF_FEATURE_MAX": 3, + "BATCH_OF_FEATURE_MAX_DEFAULT": 4, + "FEATURE_MAX": 5, + "FEATURE_MAX_DEFAULT": 6 +}; + +$root.caffe2.TensorBoundShapes = class TensorBoundShapes { + + constructor() { + this.shapes = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.TensorBoundShapes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapes.push($root.caffe2.TensorBoundShape.decode(reader, reader.uint32())); + break; + case 2: + message.max_batch_size = reader.int64(); + break; + case 3: + message.max_feature_len = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.TensorBoundShapes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shapes": + message.shapes.push($root.caffe2.TensorBoundShape.decodeText(reader)); + break; + case "max_batch_size": + message.max_batch_size = reader.int64(); + break; + case "max_feature_len": + message.max_feature_len = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.TensorBoundShapes.prototype.max_batch_size = protobuf.Int64.create(0); +$root.caffe2.TensorBoundShapes.prototype.max_feature_len = protobuf.Int64.create(0); + +$root.caffe2.AOTConfig = class AOTConfig { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.AOTConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.max_batch_size = reader.int64(); + break; + case 2: + message.max_seq_size = reader.int64(); + break; + case 3: + message.in_batch_broadcast = reader.bool(); + break; + case 4: + message.onnxifi_blacklist_ops = reader.string(); + break; + case 5: + message.onnxifi_min_ops = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'max_batch_size')) { + throw new Error("Excepted 'max_batch_size'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'max_seq_size')) { + throw new Error("Excepted 'max_seq_size'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'in_batch_broadcast')) { + throw new Error("Excepted 'in_batch_broadcast'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.AOTConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "max_batch_size": + message.max_batch_size = reader.int64(); + break; + case "max_seq_size": + message.max_seq_size = reader.int64(); + break; + case "in_batch_broadcast": + message.in_batch_broadcast = reader.bool(); + break; + case "onnxifi_blacklist_ops": + message.onnxifi_blacklist_ops = reader.string(); + break; + case "onnxifi_min_ops": + message.onnxifi_min_ops = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "max_batch_size")) { + throw new Error("Excepted 'max_batch_size'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "max_seq_size")) { + throw new Error("Excepted 'max_seq_size'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "in_batch_broadcast")) { + throw new Error("Excepted 'in_batch_broadcast'."); + } + return message; + } +}; + +$root.caffe2.AOTConfig.prototype.max_batch_size = protobuf.Int64.create(0); +$root.caffe2.AOTConfig.prototype.max_seq_size = protobuf.Int64.create(0); +$root.caffe2.AOTConfig.prototype.in_batch_broadcast = false; +$root.caffe2.AOTConfig.prototype.onnxifi_blacklist_ops = ""; +$root.caffe2.AOTConfig.prototype.onnxifi_min_ops = 0; + +$root.caffe2.Argument = class Argument { + + constructor() { + this.floats = []; + this.ints = []; + this.strings = []; + this.tensors = []; + this.nets = []; + this.qtensors = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.Argument(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.f = reader.float(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.s = reader.bytes(); + break; + case 10: + message.t = $root.caffe2.TensorProto.decode(reader, reader.uint32()); + break; + case 8: + message.n = $root.caffe2.NetDef.decode(reader, reader.uint32()); + break; + case 5: + message.floats = reader.floats(message.floats, tag); + break; + case 6: + message.ints = reader.array(message.ints, () => reader.int64(), tag); + break; + case 7: + message.strings.push(reader.bytes()); + break; + case 11: + message.tensors.push($root.caffe2.TensorProto.decode(reader, reader.uint32())); + break; + case 9: + message.nets.push($root.caffe2.NetDef.decode(reader, reader.uint32())); + break; + case 12: + message.qtensors.push($root.caffe2.QTensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.Argument(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "f": + message.f = reader.float(); + break; + case "i": + message.i = reader.int64(); + break; + case "s": + message.s = reader.bytes(); + break; + case "t": + message.t = $root.caffe2.TensorProto.decodeText(reader); + break; + case "n": + message.n = $root.caffe2.NetDef.decodeText(reader); + break; + case "floats": + reader.array(message.floats, () => reader.float()); + break; + case "ints": + reader.array(message.ints, () => reader.int64()); + break; + case "strings": + reader.array(message.strings, () => reader.bytes()); + break; + case "tensors": + message.tensors.push($root.caffe2.TensorProto.decodeText(reader)); + break; + case "nets": + message.nets.push($root.caffe2.NetDef.decodeText(reader)); + break; + case "qtensors": + message.qtensors.push($root.caffe2.QTensorProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.Argument.prototype.name = ""; +$root.caffe2.Argument.prototype.f = 0; +$root.caffe2.Argument.prototype.i = protobuf.Int64.create(0); +$root.caffe2.Argument.prototype.s = new Uint8Array([]); +$root.caffe2.Argument.prototype.t = null; +$root.caffe2.Argument.prototype.n = null; + +$root.caffe2.DeviceTypeProto = { + "PROTO_CPU": 0, + "PROTO_CUDA": 1, + "PROTO_MKLDNN": 2, + "PROTO_OPENGL": 3, + "PROTO_OPENCL": 4, + "PROTO_IDEEP": 5, + "PROTO_HIP": 6, + "PROTO_FPGA": 7, + "PROTO_ORT": 8, + "PROTO_XLA": 9, + "PROTO_MPS": 10, + "PROTO_COMPILE_TIME_MAX_DEVICE_TYPES": 11 +}; + +$root.caffe2.DeviceOption = class DeviceOption { + + constructor() { + this.extra_info = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.DeviceOption(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device_type = reader.int32(); + break; + case 2: + message.device_id = reader.int32(); + break; + case 3: + message.random_seed = reader.uint32(); + break; + case 4: + message.node_name = reader.string(); + break; + case 5: + message.numa_node_id = reader.int32(); + break; + case 6: + message.extra_info.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.DeviceOption(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "device_type": + message.device_type = reader.int32(); + break; + case "device_id": + message.device_id = reader.int32(); + break; + case "random_seed": + message.random_seed = reader.uint32(); + break; + case "node_name": + message.node_name = reader.string(); + break; + case "numa_node_id": + message.numa_node_id = reader.int32(); + break; + case "extra_info": + reader.array(message.extra_info, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.DeviceOption.prototype.device_type = 0; +$root.caffe2.DeviceOption.prototype.device_id = 0; +$root.caffe2.DeviceOption.prototype.random_seed = 0; +$root.caffe2.DeviceOption.prototype.node_name = ""; +$root.caffe2.DeviceOption.prototype.numa_node_id = 0; + +$root.caffe2.OperatorDef = class OperatorDef { + + constructor() { + this.input = []; + this.output = []; + this.arg = []; + this.control_input = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.OperatorDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input.push(reader.string()); + break; + case 2: + message.output.push(reader.string()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.type = reader.string(); + break; + case 5: + message.arg.push($root.caffe2.Argument.decode(reader, reader.uint32())); + break; + case 6: + message.device_option = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 7: + message.engine = reader.string(); + break; + case 8: + message.control_input.push(reader.string()); + break; + case 9: + message.is_gradient_op = reader.bool(); + break; + case 10: + message.debug_info = reader.string(); + break; + case 11: + message.domain = reader.string(); + break; + case 12: + message.op_version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.OperatorDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "arg": + message.arg.push($root.caffe2.Argument.decodeText(reader)); + break; + case "device_option": + message.device_option = $root.caffe2.DeviceOption.decodeText(reader); + break; + case "engine": + message.engine = reader.string(); + break; + case "control_input": + reader.array(message.control_input, () => reader.string()); + break; + case "is_gradient_op": + message.is_gradient_op = reader.bool(); + break; + case "debug_info": + message.debug_info = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "op_version": + message.op_version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.OperatorDef.prototype.name = ""; +$root.caffe2.OperatorDef.prototype.type = ""; +$root.caffe2.OperatorDef.prototype.device_option = null; +$root.caffe2.OperatorDef.prototype.engine = ""; +$root.caffe2.OperatorDef.prototype.is_gradient_op = false; +$root.caffe2.OperatorDef.prototype.debug_info = ""; +$root.caffe2.OperatorDef.prototype.domain = ""; +$root.caffe2.OperatorDef.prototype.op_version = protobuf.Int64.create(0); + +$root.caffe2.MapFieldEntry = class MapFieldEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.MapFieldEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.val = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'key')) { + throw new Error("Excepted 'key'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'val')) { + throw new Error("Excepted 'val'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.MapFieldEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "val": + message.val = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "key")) { + throw new Error("Excepted 'key'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "val")) { + throw new Error("Excepted 'val'."); + } + return message; + } +}; + +$root.caffe2.MapFieldEntry.prototype.key = ""; +$root.caffe2.MapFieldEntry.prototype.val = ""; + +$root.caffe2.BackendOptions = class BackendOptions { + + constructor() { + this.option = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.BackendOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backend_name = reader.string(); + break; + case 2: + message.option.push($root.caffe2.MapFieldEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'backend_name')) { + throw new Error("Excepted 'backend_name'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.BackendOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "backend_name": + message.backend_name = reader.string(); + break; + case "option": + message.option.push($root.caffe2.MapFieldEntry.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "backend_name")) { + throw new Error("Excepted 'backend_name'."); + } + return message; + } +}; + +$root.caffe2.BackendOptions.prototype.backend_name = ""; + +$root.caffe2.PartitionInfo = class PartitionInfo { + + constructor() { + this.device_id = []; + this.backend_options = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.PartitionInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.device_id = reader.array(message.device_id, () => reader.int32(), tag); + break; + case 3: + message.extra_info = reader.string(); + break; + case 4: + message.backend_options.push($root.caffe2.BackendOptions.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.PartitionInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "device_id": + reader.array(message.device_id, () => reader.int32()); + break; + case "extra_info": + message.extra_info = reader.string(); + break; + case "backend_options": + message.backend_options.push($root.caffe2.BackendOptions.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + return message; + } +}; + +$root.caffe2.PartitionInfo.prototype.name = ""; +$root.caffe2.PartitionInfo.prototype.extra_info = ""; + +$root.caffe2.NetDef = class NetDef { + + constructor() { + this.op = []; + this.arg = []; + this.external_input = []; + this.external_output = []; + this.partition_info = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.NetDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.op.push($root.caffe2.OperatorDef.decode(reader, reader.uint32())); + break; + case 3: + message.type = reader.string(); + break; + case 4: + message.num_workers = reader.int32(); + break; + case 5: + message.device_option = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 6: + message.arg.push($root.caffe2.Argument.decode(reader, reader.uint32())); + break; + case 7: + message.external_input.push(reader.string()); + break; + case 8: + message.external_output.push(reader.string()); + break; + case 9: + message.partition_info.push($root.caffe2.PartitionInfo.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.NetDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "op": + message.op.push($root.caffe2.OperatorDef.decodeText(reader)); + break; + case "type": + message.type = reader.string(); + break; + case "num_workers": + message.num_workers = reader.int32(); + break; + case "device_option": + message.device_option = $root.caffe2.DeviceOption.decodeText(reader); + break; + case "arg": + message.arg.push($root.caffe2.Argument.decodeText(reader)); + break; + case "external_input": + reader.array(message.external_input, () => reader.string()); + break; + case "external_output": + reader.array(message.external_output, () => reader.string()); + break; + case "partition_info": + message.partition_info.push($root.caffe2.PartitionInfo.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.NetDef.prototype.name = ""; +$root.caffe2.NetDef.prototype.type = ""; +$root.caffe2.NetDef.prototype.num_workers = 0; +$root.caffe2.NetDef.prototype.device_option = null; + +$root.caffe2.ExecutionStep = class ExecutionStep { + + constructor() { + this.substep = []; + this.network = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.ExecutionStep(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.substep.push($root.caffe2.ExecutionStep.decode(reader, reader.uint32())); + break; + case 3: + message.network.push(reader.string()); + break; + case 4: + message.num_iter = reader.int64(); + break; + case 5: + message.criteria_network = reader.string(); + break; + case 7: + message.report_net = reader.string(); + break; + case 8: + message.report_interval = reader.int32(); + break; + case 11: + message.run_every_ms = reader.int64(); + break; + case 6: + message.concurrent_substeps = reader.bool(); + break; + case 9: + message.should_stop_blob = reader.string(); + break; + case 10: + message.only_once = reader.bool(); + break; + case 12: + message.create_workspace = reader.bool(); + break; + case 13: + message.num_concurrent_instances = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.ExecutionStep(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "substep": + message.substep.push($root.caffe2.ExecutionStep.decodeText(reader)); + break; + case "network": + reader.array(message.network, () => reader.string()); + break; + case "num_iter": + message.num_iter = reader.int64(); + break; + case "criteria_network": + message.criteria_network = reader.string(); + break; + case "report_net": + message.report_net = reader.string(); + break; + case "report_interval": + message.report_interval = reader.int32(); + break; + case "run_every_ms": + message.run_every_ms = reader.int64(); + break; + case "concurrent_substeps": + message.concurrent_substeps = reader.bool(); + break; + case "should_stop_blob": + message.should_stop_blob = reader.string(); + break; + case "only_once": + message.only_once = reader.bool(); + break; + case "create_workspace": + message.create_workspace = reader.bool(); + break; + case "num_concurrent_instances": + message.num_concurrent_instances = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.ExecutionStep.prototype.name = ""; +$root.caffe2.ExecutionStep.prototype.num_iter = protobuf.Int64.create(0); +$root.caffe2.ExecutionStep.prototype.criteria_network = ""; +$root.caffe2.ExecutionStep.prototype.report_net = ""; +$root.caffe2.ExecutionStep.prototype.report_interval = 0; +$root.caffe2.ExecutionStep.prototype.run_every_ms = protobuf.Int64.create(0); +$root.caffe2.ExecutionStep.prototype.concurrent_substeps = false; +$root.caffe2.ExecutionStep.prototype.should_stop_blob = ""; +$root.caffe2.ExecutionStep.prototype.only_once = false; +$root.caffe2.ExecutionStep.prototype.create_workspace = false; +$root.caffe2.ExecutionStep.prototype.num_concurrent_instances = 0; + +$root.caffe2.PlanDef = class PlanDef { + + constructor() { + this.network = []; + this.execution_step = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.PlanDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.network.push($root.caffe2.NetDef.decode(reader, reader.uint32())); + break; + case 3: + message.execution_step.push($root.caffe2.ExecutionStep.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.PlanDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "network": + message.network.push($root.caffe2.NetDef.decodeText(reader)); + break; + case "execution_step": + message.execution_step.push($root.caffe2.ExecutionStep.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.PlanDef.prototype.name = ""; + +$root.caffe2.BlobProto = class BlobProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.BlobProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.tensor = $root.caffe2.TensorProto.decode(reader, reader.uint32()); + break; + case 4: + message.content = reader.bytes(); + break; + case 5: + message.qtensor = $root.caffe2.QTensorProto.decode(reader, reader.uint32()); + break; + case 6: + message.content_num_chunks = reader.int32(); + break; + case 7: + message.content_chunk_id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.BlobProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "tensor": + message.tensor = $root.caffe2.TensorProto.decodeText(reader); + break; + case "content": + message.content = reader.bytes(); + break; + case "qtensor": + message.qtensor = $root.caffe2.QTensorProto.decodeText(reader); + break; + case "content_num_chunks": + message.content_num_chunks = reader.int32(); + break; + case "content_chunk_id": + message.content_chunk_id = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.BlobProto.prototype.name = ""; +$root.caffe2.BlobProto.prototype.type = ""; +$root.caffe2.BlobProto.prototype.tensor = null; +$root.caffe2.BlobProto.prototype.content = new Uint8Array([]); +$root.caffe2.BlobProto.prototype.qtensor = null; +$root.caffe2.BlobProto.prototype.content_num_chunks = 0; +$root.caffe2.BlobProto.prototype.content_chunk_id = 0; + +$root.caffe2.DBReaderProto = class DBReaderProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.DBReaderProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.source = reader.string(); + break; + case 3: + message.db_type = reader.string(); + break; + case 4: + message.key = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.DBReaderProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "source": + message.source = reader.string(); + break; + case "db_type": + message.db_type = reader.string(); + break; + case "key": + message.key = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.DBReaderProto.prototype.name = ""; +$root.caffe2.DBReaderProto.prototype.source = ""; +$root.caffe2.DBReaderProto.prototype.db_type = ""; +$root.caffe2.DBReaderProto.prototype.key = ""; + +$root.caffe2.BlobSerializationOptions = class BlobSerializationOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.caffe2.BlobSerializationOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blob_name_regex = reader.string(); + break; + case 2: + message.chunk_size = reader.int64(); + break; + case 3: + message.float_format = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.BlobSerializationOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "blob_name_regex": + message.blob_name_regex = reader.string(); + break; + case "chunk_size": + message.chunk_size = reader.int64(); + break; + case "float_format": + message.float_format = reader.enum($root.caffe2.BlobSerializationOptions.FloatFormat); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.caffe2.BlobSerializationOptions.prototype.blob_name_regex = ""; +$root.caffe2.BlobSerializationOptions.prototype.chunk_size = protobuf.Int64.create(0); +$root.caffe2.BlobSerializationOptions.prototype.float_format = 0; + +$root.caffe2.BlobSerializationOptions.FloatFormat = { + "FLOAT_DEFAULT": 0, + "FLOAT_PROTOBUF": 1, + "FLOAT_BFLOAT16": 2 +}; + +$root.caffe2.SerializationOptions = class SerializationOptions { + + constructor() { + this.options = []; + } + + static decode(reader, length) { + const message = new $root.caffe2.SerializationOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.options.push($root.caffe2.BlobSerializationOptions.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.caffe2.SerializationOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "options": + message.options.push($root.caffe2.BlobSerializationOptions.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; diff --git a/caffe2.js b/caffe2.js new file mode 100644 index 00000000000..64062a622f2 --- /dev/null +++ b/caffe2.js @@ -0,0 +1,503 @@ + +import * as protobuf from './protobuf.js'; + +const caffe2 = {}; + +caffe2.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'pb') { + const tags = context.tags('pb'); + if (tags.size > 0 && + Array.from(tags.keys()).every((tag) => tag <= 9) && + Array.from(tags.values()).every((type) => type <= 4)) { + if (tags.size === 1 && tags.get(2) === 2 && identifier.endsWith('saved_model.pb')) { + return undefined; + } + const schema = [[1,2],[2,2],[3,2],[4,0],[5,2],[6,2],[7,2],[8,2],[9,2]]; + if (schema.every(([key, value]) => !tags.has(key) || tags.get(key) === value)) { + const stream = context.stream; + if (stream.length > 3) { + const buffer = stream.peek(Math.min(stream.length, 67)); + const [signature, size] = buffer; + if (signature == 0x0A) { + if (size < 64 && + buffer.length > 2 + size + 1 && + buffer.slice(2, 2 + size).every((c) => c >= 32 && c <= 127) && + buffer[2 + size] == 0x12) { + return 'caffe2.pb'; + } + } + if (signature == 0x12) { + return 'caffe2.pb'; + } + } + } + } + } + if (extension === 'pbtxt' || extension === 'prototxt') { + const tags = context.tags('pbtxt'); + if (tags.has('op') && !tags.has('op.attr') && !tags.has('op.graph_op_name') && !tags.has('op.endpoint')) { + return 'caffe2.pbtxt'; + } + } + return undefined; + } + + async open(context, target) { + await context.require('./caffe2-proto'); + const metadata = await context.metadata('caffe2-metadata.json'); + const identifier = context.identifier; + const parts = identifier.split('.'); + const extension = parts.pop().toLowerCase(); + const base = parts.join('.'); + switch (target) { + case 'caffe2.pbtxt': { + const openText = (predictBuffer, initBuffer, initTextFormat) => { + let predict_net = null; + let init_net = null; + try { + caffe2.proto = protobuf.get('caffe2').caffe2; + const reader = protobuf.TextReader.open(predictBuffer); + reader.field = function(tag, message) { + if (message instanceof caffe2.proto.DeviceOption) { + message[tag] = this.read(); + return; + } + throw new Error(`Unknown field '${tag}' ${this.location()}`); + }; + predict_net = caffe2.proto.NetDef.decodeText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new caffe2.Error(`File text format is not caffe2.NetDef (${message.replace(/\.$/, '')}).`); + } + try { + caffe2.proto = protobuf.get('caffe2').caffe2; + if (initBuffer) { + if (initTextFormat) { + const reader = protobuf.TextReader.open(initBuffer); + init_net = caffe2.proto.NetDef.decodeText(reader); + } else { + const reader = protobuf.BinaryReader.open(initBuffer); + init_net = caffe2.proto.NetDef.decode(reader); + } + } + } catch (error) { + // continue regardless of error + } + return new caffe2.Model(metadata, predict_net, init_net); + }; + if (base.toLowerCase().endsWith('init_net') || base.toLowerCase().startsWith('init_net')) { + try { + const name = identifier.replace('init_net', 'predict_net'); + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openText(buffer, context.stream.peek(), true); + } catch (error) { + return openText(context.stream.peek(), null, true); + } + } + if (base.toLowerCase().endsWith('predict_net') || base.toLowerCase().startsWith('predict_net')) { + try { + const name = identifier.replace('predict_net', 'init_net').replace(/\.pbtxt/, '.pb'); + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openText(context.stream.peek(), buffer, false); + } catch (error) { + try { + const name = identifier.replace('predict_net', 'init_net'); + const content = await context.fetch(name); + const buffer = content.stream.read(); + return openText(context.stream.peek(), buffer, true); + } catch (error) { + return openText(context.stream.peek(), null, true); + } + } + } + try { + const name = `${base}_init.pb`; + const content = await context.fetch(name); + const buffer = content.stream.read(); + return openText(context.stream.peek(), buffer, false); + } catch (error) { + return openText(context.stream.peek(), null, false); + } + } + case 'caffe2.pb': { + const openBinary = (predictBuffer, initBuffer) => { + let predict_net = null; + let init_net = null; + try { + caffe2.proto = protobuf.get('caffe2').caffe2; + const reader = protobuf.BinaryReader.open(predictBuffer); + predict_net = caffe2.proto.NetDef.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new caffe2.Error(`File format is not caffe2.NetDef (${message.replace(/\.$/, '')}).`); + } + try { + if (initBuffer) { + caffe2.proto = protobuf.get('caffe2').caffe2; + const reader = protobuf.BinaryReader.open(initBuffer); + init_net = caffe2.proto.NetDef.decode(reader); + } + } catch (error) { + // continue regardless of error + } + return new caffe2.Model(metadata, predict_net, init_net); + }; + if (base.toLowerCase().endsWith('init_net')) { + try { + const name = `${base.replace(/init_net$/, '')}predict_net.${extension}`; + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openBinary(buffer, context.stream.peek()); + } catch (error) { + return openBinary(context.stream.peek(), null); + } + } + if (base.toLowerCase().endsWith('_init')) { + try { + const name = `${base.replace(/_init$/, '')}.${extension}`; + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openBinary(buffer, context.stream.peek()); + } catch (error) { + return openBinary(context.stream.peek(), null); + } + } + if (base.toLowerCase().endsWith('predict_net') || base.toLowerCase().startsWith('predict_net')) { + try { + const name = identifier.replace('predict_net', 'init_net'); + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return openBinary(context.stream.peek(), buffer); + } catch (error) { + return openBinary(context.stream.peek(), null); + } + } + try { + const file = `${base}_init.${extension}`; + const content = await context.fetch(file, null); + const buffer = content.stream.peek(); + return openBinary(context.stream.peek(), buffer); + } catch (error) { + return openBinary(context.stream.peek(), null); + } + } + default: { + throw new caffe2.Error(`Unsupported Caffe2 format '${target}'.`); + } + } + } +}; + +caffe2.Model = class { + + constructor(metadata, predict_net, init_net) { + this.format = 'Caffe2'; + this.domain = predict_net.domain || null; + const graph = new caffe2.Graph(metadata, predict_net, init_net); + this.graphs = [ graph ]; + } +}; + +caffe2.Graph = class { + + constructor(metadata, netDef, init) { + this.name = netDef.name || ''; + this.type = netDef.type || ''; + this.nodes = []; + const initializers = new Set(); + const tensors = new Map(); + for (const name of netDef.external_input) { + tensors.set(name, new caffe2.Tensor(name, {})); + } + if (init) { + const dataTypes = new Map([ + [ 'GivenTensorFill', 'float32' ], + [ 'GivenTensorDoubleFill', 'float64' ], + [ 'GivenTensorBoolFill', 'boolean' ], + [ 'GivenTensorByteStringToUInt8Fill', 'uint8' ], + [ 'GivenTensorInt16Fill', 'int16' ], + [ 'GivenTensorSInt16Fill', 'int16' ], + [ 'GivenTensorIntFill', 'int32' ], + [ 'GivenTensorInt64Fill', 'int64' ], + [ 'GivenTensorStringFill', 'string' ], + [ 'Int8GivenIntTensorFill', 'int32' ], + [ 'Int8GivenTensorFill', 'int8' ], + [ 'XavierFill', null ], + [ 'ConstantFill', null ] + ]); + for (const op of init.op) { + if (op.output && op.output.length == 1) { + /* eslint-disable prefer-destructuring */ + const name = op.output[0]; + /* eslint-enable prefer-destructuring */ + const tensor = {}; + for (const arg of op.arg) { + tensor[arg.name] = arg; + } + if (!dataTypes.has(op.type)) { + throw new caffe2.Error(`Unsupported init op '${op.type}'.`); + } + tensor.dataType = dataTypes.get(op.type); + if (tensor.values && tensor.values.floats && (tensor.values.floats.length !== 1 || tensor.values.floats[0] !== 0)) { + initializers.add(name); + } + tensors.set(name, new caffe2.Tensor(name, tensor)); + } + } + } + const scope = {}; + let index = 0; + for (const op of netDef.op) { + op.input = op.input.map((input) => scope[input] ? scope[input] : input); + op.output = op.output.map((output) => { + if (scope[output]) { + const next = `${output}\n${index}`; // custom argument id + scope[output] = next; + return next; + } + scope[output] = output; + return output; + }); + index++; + } + const values = new Map(); + values.map = (name, type, tensor) => { + if (!values.has(name)) { + values.set(name, new caffe2.Value(name, type || null, tensor || null)); + } else if (type || tensor) { + throw new caffe2.Value(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + for (const op of netDef.op) { + let index = 0; + for (const name of op.input) { + if (index > 0 && tensors.has(name)) { + if (!values.has(name)) { + values.set(name, new caffe2.Value(name, null, tensors.get(name))); + } + initializers.add(name); + } + index++; + } + } + for (const op of netDef.op) { + for (const name of op.output) { + if (tensors.has(name)) { + initializers.add(name); + } + } + } + let lastNode = null; + let lastOutput = null; + for (const op of netDef.op) { + const node = new caffe2.Node(metadata, op, values); + if (op.input.length == 1 && + op.output.length >= 1 && + op.input[0].split('\n').shift() == op.output[0].split('\n').shift() && + lastNode && + lastOutput == op.input[0].split('\n').shift()) { + lastNode.chain.push(node); + } else { + this.nodes.push(node); + lastNode = null; + lastOutput = null; + if (op.output.length == 1) { + lastNode = node; + lastOutput = op.output[0].split('\n').shift(); + } + } + } + this.inputs = []; + for (const input of netDef.external_input) { + if (netDef.external_input.length > 1 && initializers.has(input)) { + continue; + } + const argument = new caffe2.Argument(input, [ values.map(input) ]); + this.inputs.push(argument); + } + this.outputs = []; + for (const output of netDef.external_output) { + const argument = new caffe2.Argument(output, [ values.map(output) ]); + this.outputs.push(argument); + } + } +}; + +caffe2.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +caffe2.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new caffe2.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer && initializer.type ? initializer.type : null; + this.quantization = initializer && initializer.quantization ? initializer.quantization : null; + this.initializer = initializer || null; + } +}; + +caffe2.Node = class { + + constructor(metadata, op, values) { + this.name = op.name || ''; + this.device = op.engine || ''; + this.metadata = metadata; + this.chain = []; + this.type = metadata.type(op.type); + this.attributes = op.arg.map((arg) => new caffe2.Attribute(metadata, this.type.name, arg)); + const inputs = op.input; + const outputs = op.output; + this.inputs = []; + let inputIndex = 0; + if (this.type && this.type.inputs) { + for (const inputDef of this.type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => values.map(id)); + this.inputs.push(new caffe2.Argument(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } else { + this.inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new caffe2.Argument(inputName, [ values.map(input) ]); + })); + } + this.outputs = []; + let outputIndex = 0; + if (this.type && this.type.outputs) { + for (const outputDef of this.type.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => values.map(id)); + this.outputs.push(new caffe2.Argument(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } else { + this.outputs.push(...outputs.slice(outputIndex).map((output, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new caffe2.Argument(outputName, [ values.map(output) ]); + })); + } + } +}; + +caffe2.Attribute = class { + + constructor(metadata, type, arg) { + this.name = arg.name; + if (arg.floats && arg.floats.length > 0) { + this.value = arg.floats; + } else if (arg.ints && arg.ints.length > 0) { + this.value = arg.ints; + } else if (arg.nets && arg.nets.length > 0) { + this.value = arg.nets.map((net) => new caffe2.Graph(metadata, net, null)); + this.type = 'graph[]'; + } else if (arg.n) { + this.value = new caffe2.Graph(metadata, arg.n, null); + this.type = 'graph'; + } else if (arg.i != 0) { + this.value = arg.i; + } else { + this.value = arg.i; + } + metadata = metadata.attribute(type, arg.name); + if (metadata) { + if (Object.prototype.hasOwnProperty.call(metadata, 'type')) { + this.type = metadata.type; + if (this.type == 'boolean') { + this.value = this.value !== 0 && this.value.toString() !== '0' ? true : false; + } + } + } + + if (metadata) { + if (metadata.visible === false) { + this.visible = false; + } else if (metadata.default !== undefined) { + if (this.value == metadata.default || (this.value && this.value.toString() == metadata.default.toString())) { + this.visible = false; + } + } + } + } +}; + +caffe2.Tensor = class { + + constructor(name, tensor) { + this.name = name; + const shape = tensor.shape && tensor.shape.ints ? tensor.shape.ints : null; + this.type = new caffe2.TensorType(tensor.dataType, new caffe2.TensorShape(shape)); + this.values = null; + this.category = 'Initializer'; + this.encoding = '|'; + if (tensor.Y_scale !== undefined || tensor.Y_zero_point !== undefined) { + this.quantization = { + type: 'linear', + scale: [ tensor.Y_scale ? tensor.Y_scale.f : 0 ], + offset: [ tensor.Y_zero_point ? tensor.Y_zero_point.i.toNumber() : 0 ] + }; + } + if (tensor.values) { + switch (this.type.dataType) { + case 'float32': this.values = tensor.values.floats; break; + case 'boolean': this.values = tensor.values.ints; break; + case 'int8': this.values = new Int8Array(tensor.values.s); break; + case 'int32': this.values = tensor.values.ints; break; + default: break; + } + } + } +}; + +caffe2.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = shape; + } + + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +caffe2.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +caffe2.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Caffe2 model.'; + } +}; + +export const ModelFactory = caffe2.ModelFactory; diff --git a/cambricon.js b/cambricon.js new file mode 100644 index 00000000000..f7dbc78c7af --- /dev/null +++ b/cambricon.js @@ -0,0 +1,32 @@ + +const cambricon = {}; + +cambricon.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream) { + const buffer = stream.peek(Math.min(20, stream.length)); + const text = Array.from(buffer).map((c) => String.fromCharCode(c)).join(''); + if (text.startsWith('\x7fMEF') || text.startsWith('cambricon_offline')) { + return 'cambricon'; + } + } + return ''; + } + + async open(/* context, match */) { + throw new cambricon.Error("File contains undocumented Cambricon data."); + } +}; + +cambricon.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Cambricon model.'; + } +}; + +export const ModelFactory = cambricon.ModelFactory; + diff --git a/circle-metadata.json b/circle-metadata.json new file mode 100644 index 00000000000..084ef0660ca --- /dev/null +++ b/circle-metadata.json @@ -0,0 +1,1197 @@ +[ + { + "name": "Add", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "pot_scale_int16", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "ArgMax", + "attributes": [ + { "name": "output_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "dimension", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ArgMin", + "attributes": [ + { "name": "output_type", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "AveragePool2D", + "category": "Pool", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ], + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "BatchMatMul", + "attributes": [ + { "name": "adjoint_lhs", "type": "boolean", "default": false }, + { "name": "adjoint_rhs", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "BidirectionalSequenceLSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "merge_outputs", "type": "boolean", "default": false }, + { "name": "time_major", "type": "boolean", "default": true }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "BidirectionalSequenceRNN", + "attributes": [ + { "name": "time_major", "type": "boolean", "default": false }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "merge_outputs", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "Bucketize", + "attributes": [ + { "name": "boundaries", "type": "float32[]", "default": 0 } + ] + }, + { + "name": "Call", + "attributes": [ + { "name": "subgraph", "type": "uint32", "default": 0 } + ] + }, + { + "name": "CallOnce", + "attributes": [ + { "name": "init_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "Cast", + "attributes": [ + { "name": "in_data_type", "type": "TensorType", "default": "FLOAT32" }, + { "name": "out_data_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConcatEmbeddings", + "attributes": [ + { "name": "num_channels", "type": "int32", "default": 0 }, + { "name": "num_columns_per_channel", "type": "int32[]", "default": 0 }, + { "name": "embedding_dim_per_channel", "type": "int32[]", "default": 0 } + ] + }, + { + "name": "Concatenation", + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "inputs", "list": true } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Conv2D", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME", "description": "`SAME`|`VALID`" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE", "description": "`NONE`|`RELU`|`RELU6`" }, + { "name": "stride_w", "type": "int32", "default": 0, "description": "stride of the filter window" }, + { "name": "stride_h", "type": "int32", "default": 0, "description": "stride of the filter window" }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "description": "4D tensor" }, + { "name": "filter" }, + { "name": "bias", "description": "(optional)" } + ], + "outputs": [ + { "name": "output", "description": "result of 2D convolution of the input tensor" } + ] + }, + { + "name": "Cumsum", + "attributes": [ + { "name": "exclusive", "type": "boolean", "default": false }, + { "name": "reverse", "type": "boolean", "default": false } + ] + }, + { + "name": "Densify", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DepthToSpace", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "DepthwiseConv2D", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "depth_multiplier", "type": "int32", "default": 0 }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Dequantize", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Div", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "EmbeddingLookupSparse", + "attributes": [ + { "name": "combiner", "type": "CombinerType", "default": "SUM" } + ] + }, + { + "name": "Exp", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ExpandDims", + "inputs": [ + { "name": "input" }, + { "name": "axis_param" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ExtractFeatures", + "inputs": [ + { "name": "ngrams" } + ], + "outputs": [ + { "name": "features" }, + { "name": "weights" } + ] + }, + { + "name": "FakeQuant", + "attributes": [ + { "name": "min", "type": "float32", "default": 0 }, + { "name": "max", "type": "float32", "default": 0 }, + { "name": "num_bits", "type": "int32", "default": 0 }, + { "name": "narrow_range", "type": "boolean", "default": false } + ] + }, + { + "name": "Fill", + "inputs": [ + { "name": "dims", "type": "T" }, + { "name": "value", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "FullyConnected", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "weights_format", "type": "FullyConnectedOptionsWeightsFormat", "default": "DEFAULT" }, + { "name": "keep_num_dims", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Gather", + "category": "Transform", + "attributes": [ + { "name": "axis", "default": 0, "type": "int32" }, + { "name": "batch_dims", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "positions" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Gelu", + "category": "Activation", + "attributes": [ + { "name": "approximate", "type": "boolean", "default": false } + ] + }, + { + "name": "HardSwish", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Hashtable", + "attributes": [ + { "name": "table_id", "type": "int32", "default": 0 }, + { "name": "key_dtype", "type": "TensorType", "default": "FLOAT32" }, + { "name": "value_dtype", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "HashtableLookup", + "inputs": [ + { "name": "key" }, + { "name": "keys" }, + { "name": "values" } + ], + "outputs": [ + { "name": "value" }, + { "name": "hits" } + ] + }, + { + "name": "If", + "attributes": [ + { "name": "then_subgraph_index", "type": "int32", "default": 0 }, + { "name": "else_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "InstanceNorm", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + }, + { + "name": "LeakyRelu", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 } + ] + }, + { + "name": "LocalResponseNormalization", + "category": "Normalization", + "attributes": [ + { "name": "radius", "type": "int32", "default": 0 }, + { "name": "bias", "type": "float32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LogicalOr", + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Logistic", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LogSoftmax", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "LSHProjection", + "attributes": [ + { "name": "type", "type": "LSHProjectionType", "default": "UNKNOWN" } + ], + "inputs": [ + { "name": "hash" }, + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "kernel_type", "type": "LSTMKernelType", "default": "FULL" }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T", "description": "Input tensor." }, + { "name": "input_input_weights", "type": "T", "option": "optional", "description": "Input to input weights tensor.", "visible": false }, + { "name": "input_forget_weights", "type": "T", "description": "Input to forget weights tensor.", "visible": false }, + { "name": "input_cell_weights", "type": "T", "description": "Input to cell weights tensor.", "visible": false }, + { "name": "input_output_weights", "type": "T", "description": "Input to output weights tensor.", "visible": false }, + { "name": "recurrent_input_weights", "type": "T", "option": "optional", "description": "Recurrent to input weights tensor.", "visible": false }, + { "name": "recurrent_forget_weights", "type": "T", "description": "Recurrent to forget weights tensor.", "visible": false }, + { "name": "recurrent_cell_weights", "type": "T", "description": "Recurrent to cell weights tensor.", "visible": false }, + { "name": "recurrent_output_weights", "type": "T", "description": "Recurrent to output weights tensor.", "visible": false }, + { "name": "cell_input_weights", "type": "T", "option": "optional", "description": "Cell to input weights tensor.", "visible": false }, + { "name": "cell_forget_weights", "type": "T", "option": "optional", "description": "Cell to forget weights tensor.", "visible": false }, + { "name": "cell_output_weights", "type": "T", "option": "optional", "description": "Cell to output weights tensor.", "visible": false }, + { "name": "input_gate_bias", "type": "T", "option": "optional", "description": "Input gate bias tensor.", "visible": false }, + { "name": "forget_gate_bias", "type": "T", "description": "Forget gate bias tensor.", "visible": false }, + { "name": "cell_gate_bias", "type": "T", "description": "Cell gate bias tensor.", "visible": false }, + { "name": "output_gate_bias", "type": "T", "description": "Output gate bias tensor.", "visible": false }, + { "name": "projection_weights", "type": "T", "option": "optional", "description": "Projection weights tensor.", "visible": false }, + { "name": "projection_bias", "type": "T", "option": "optional", "description": "Projection bias tensor.", "visible": false } + ], + "outputs": [ + { "name": "scratch", "type": "T" }, + { "name": "output_state", "type": "T" }, + { "name": "cell_state", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Maximum", + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MaxPool2D", + "category": "Pool", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ], + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Mean", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Minimum", + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MirrorPad", + "attributes": [ + { "name": "mode", "type": "MirrorPadMode", "default": "REFLECT" } + ] + }, + { + "name": "Mul", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Normalize", + "category": "Normalization", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NotEqual", + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "OneHot", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Pack", + "attributes": [ + { "name": "values_count", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Predict", + "inputs": [ + { "name": "hashes" }, + { "name": "keys" }, + { "name": "labels" }, + { "name": "weights" } + ], + "outputs": [ + { "name": "label" }, + { "name": "weight" } + ] + }, + { + "name": "Prelu", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "slope", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Quantize", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Range", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "start", "type": "T" }, + { "name": "limit", "type": "T" }, + { "name": "delta", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceMax", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceMin", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceWindow", + "attributes": [ + { "name": "reduce_function", "type": "ReduceWindowFunction", "default": "UNSUPPORTED" } + ] + }, + { + "name": "Relu", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Relu6", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "attributes": [ + { "name": "new_shape", "type": "int32[]", "default": 0 } + ], + "inputs": [ + { "name": "data", "type": "T" }, + { "name": "shape", "type": "T" } + ], + "outputs": [ + { "name": "reshaped", "type": "T" } + ] + }, + { + "name": "ResizeBilinear", + "attributes": [ + { "name": "align_corners", "default": false, "type": "boolean" }, + { "name": "new_height", "type": "int32", "default": 0 }, + { "name": "new_width", "type": "int32", "default": 0 }, + { "name": "half_pixel_centers", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ResizeNearestNeighbor", + "attributes": [ + { "name": "align_corners", "type": "boolean", "default": false }, + { "name": "half_pixel_centers", "type": "boolean", "default": false } + ] + }, + { + "name": "ReverseSequence", + "attributes": [ + { "name": "seq_dim", "type": "int32", "default": 0 }, + { "name": "batch_dim", "type": "int32", "default": 0 } + ] + }, + { + "name": "RNN", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "X", "type": "T" }, + { "name": "W", "type": "T" }, + { "name": "R", "type": "T" }, + { "name": "b", "type": "T" } + ], + "outputs": [ + { "name": "hidden", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Rsqrt", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Shape", + "attributes": [ + { "name": "out_type", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "Sin", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SkipGram", + "inputs": [ + { "name": "inputs" } + ], + "outputs": [ + { "name": "ngrams" } + ], + "attributes": [ + { "name": "ngram_size", "type": "int32", "default": 0 }, + { "name": "max_skip_size", "type": "int32", "default": 0 }, + { "name": "include_all_ngrams", "type": "boolean", "default": false } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "SpaceToDepth", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "SparseToDense", + "attributes": [ + { "name": "validate_indices", "type": "boolean", "default": false } + ] + }, + { + "name": "Split", + "category": "Tensor", + "inputs": [ + { "name": "axis" }, + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "num_splits", "type": "int32", "default": 0 } + ] + }, + { + "name": "SplitV", + "attributes": [ + { "name": "num_splits", "type": "int32", "default": 0 } + ] + }, + { + "name": "Squeeze", + "category": "Transform", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "squeeze_dims", "type": "int32[]", "default": 0 } + ] + }, + { + "name": "StablehloBroadcastInDim", + "attributes": [ + { "name": "broadcast_dimensions", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloCompare", + "attributes": [ + { "name": "comparison_direction", "type": "StablehloComparisonDirection", "default": "STABLEHLO_COMPARISON_DIRECTION_EQ" }, + { "name": "compare_type", "type": "StablehloComparisonType", "default": "STABLEHLO_COMPARISON_TYPE_NOTYPE" } + ] + }, + { + "name": "StablehloConcatenate", + "attributes": [ + { "name": "dimension", "type": "int64", "default": 0 } + ] + }, + { + "name": "StablehloConvolution", + "attributes": [ + { "name": "window_strides", "type": "int64[]", "default": 0 }, + { "name": "padding", "type": "int64[]", "default": 0 }, + { "name": "lhs_dilation", "type": "int64[]", "default": 0 }, + { "name": "rhs_dilation", "type": "int64[]", "default": 0 }, + { "name": "window_reversal", "type": "boolean", "default": false }, + { "name": "input_batch_dimension", "type": "int64", "default": 0 }, + { "name": "input_feature_dimension", "type": "int64", "default": 0 }, + { "name": "input_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "kernel_input_feature_dimension", "type": "int64", "default": 0 }, + { "name": "kernel_output_feature_dimension", "type": "int64", "default": 0 }, + { "name": "kernel_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "output_batch_dimension", "type": "int64", "default": 0 }, + { "name": "output_feature_dimension", "type": "int64", "default": 0 }, + { "name": "output_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "feature_group_count", "type": "int64", "default": 0 }, + { "name": "batch_group_count", "type": "int64", "default": 0 }, + { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloCustomCall", + "attributes": [ + { "name": "call_target_name", "type": "string", "default": null }, + { "name": "has_side_effect", "type": "boolean", "default": false }, + { "name": "backend_config", "type": "string", "default": null }, + { "name": "api_version", "type": "int32", "default": 0 }, + { "name": "called_computations", "type": "int32[]", "default": 0 }, + { "name": "custom_attributes", "type": "uint8[]", "default": 0 } + ] + }, + { + "name": "StablehloDotGeneral", + "attributes": [ + { "name": "lhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "lhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloDynamicSlice", + "attributes": [ + { "name": "slice_sizes", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloGather", + "attributes": [ + { "name": "offset_dims", "type": "int64[]", "default": 0 }, + { "name": "collapsed_slice_dims", "type": "int64[]", "default": 0 }, + { "name": "start_index_map", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "slice_sizes", "type": "int64[]", "default": 0 }, + { "name": "indices_are_sorted", "type": "boolean", "default": false } + ] + }, + { + "name": "StablehloIota", + "attributes": [ + { "name": "iota_dimension", "type": "int64", "default": 0 } + ] + }, + { + "name": "StablehloPad", + "attributes": [ + { "name": "edge_padding_low", "type": "int64[]", "default": 0 }, + { "name": "edge_padding_high", "type": "int64[]", "default": 0 }, + { "name": "interior_padding", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloReduce", + "attributes": [ + { "name": "dimensions", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloReduceWindow", + "attributes": [ + { "name": "window_dimensions", "type": "int64[]", "default": 0 }, + { "name": "window_strides", "type": "int64[]", "default": 0 }, + { "name": "base_dilations", "type": "int64[]", "default": 0 }, + { "name": "window_dilations", "type": "int64[]", "default": 0 }, + { "name": "padding", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloRngBitGenerator", + "attributes": [ + { "name": "algorithm", "type": "RngAlgorithm", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloScatter", + "attributes": [ + { "name": "indices_are_sorted", "type": "boolean", "default": false }, + { "name": "update_window_dims", "type": "int64[]", "default": 0 }, + { "name": "inserted_window_dims", "type": "int64[]", "default": 0 }, + { "name": "scatter_dims_to_operand_dims", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "unique_indices", "type": "boolean", "default": false }, + { "name": "update_computation_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloSlice", + "attributes": [ + { "name": "start_indices", "type": "int64[]", "default": 0 }, + { "name": "limit_indices", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloSort", + "attributes": [ + { "name": "dimension", "type": "int64", "default": 0 }, + { "name": "is_stable", "type": "boolean", "default": false }, + { "name": "comparator_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloTranspose", + "attributes": [ + { "name": "permutation", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloWhile", + "attributes": [ + { "name": "cond_subgraph_index", "type": "int32", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "begin_mask", "type": "int32", "default": 0 }, + { "name": "end_mask", "type": "int32", "default": 0 }, + { "name": "ellipsis_mask", "type": "int32", "default": 0 }, + { "name": "new_axis_mask", "type": "int32", "default": 0 }, + { "name": "shrink_axis_mask", "type": "int32", "default": 0 }, + { "name": "offset", "type": "boolean", "default": false } + ] + }, + { + "name": "Sub", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "pot_scale_int16", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Sum", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "SVDF", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "rank", "type": "int32", "default": 0 }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "feature", "type": "T" }, + { "name": "time", "type": "T" }, + { "name": "bias", "type": "T" }, + { "name": "state", "type": "T" } + ], + "outputs": [ + { "name": "state_out", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Tanh", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Tile", + "inputs": [ + { "name": "input" }, + { "name": "multipliers" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Transpose", + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "perm" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "TransposeConv", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "output_shape" }, + { "name": "weights" }, + { "name": "input" }, + { "name": "bias", "optional": true } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "UnidirectionalSequenceLSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "time_major", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false }, + { "name": "diagonal_recurrent_tensors", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T", "description": "Input tensor." }, + { "name": "input_input_weights", "type": "T", "option": "optional", "description": "Input to input weights tensor.", "visible": false }, + { "name": "input_forget_weights", "type": "T", "description": "Input to forget weights tensor.", "visible": false }, + { "name": "input_cell_weights", "type": "T", "description": "Input to cell weights tensor.", "visible": false }, + { "name": "input_output_weights", "type": "T", "description": "Input to output weights tensor.", "visible": false }, + { "name": "recurrent_input_weights", "type": "T", "option": "optional", "description": "Recurrent to input weights tensor.", "visible": false }, + { "name": "recurrent_forget_weights", "type": "T", "description": "Recurrent to forget weights tensor.", "visible": false }, + { "name": "recurrent_cell_weights", "type": "T", "description": "Recurrent to cell weights tensor.", "visible": false }, + { "name": "recurrent_output_weights", "type": "T", "description": "Recurrent to output weights tensor.", "visible": false }, + { "name": "cell_input_weights", "type": "T", "option": "optional", "description": "Cell to input weights tensor.", "visible": false }, + { "name": "cell_forget_weights", "type": "T", "option": "optional", "description": "Cell to forget weights tensor.", "visible": false }, + { "name": "cell_output_weights", "type": "T", "option": "optional", "description": "Cell to output weights tensor.", "visible": false }, + { "name": "input_gate_bias", "type": "T", "option": "optional", "description": "Input gate bias tensor.", "visible": false }, + { "name": "forget_gate_bias", "type": "T", "description": "Forget gate bias tensor.", "visible": false }, + { "name": "cell_gate_bias", "type": "T", "description": "Cell gate bias tensor.", "visible": false }, + { "name": "output_gate_bias", "type": "T", "description": "Output gate bias tensor.", "visible": false }, + { "name": "projection_weights", "type": "T", "option": "optional", "description": "Projection weights tensor.", "visible": false }, + { "name": "projection_bias", "type": "T", "option": "optional", "description": "Projection bias tensor.", "visible": false }, + { "name": "output_state_in", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_state_in", "type": "T", "option": "optional", "visible": false }, + { "name": "activation", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_clip", "type": "T", "option": "optional", "visible": false }, + { "name": "proj_clip", "type": "T", "option": "optional", "visible": false }, + { "name": "time_major", "type": "T", "option": "optional", "visible": false }, + { "name": "input_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "forget_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "output_layer_norm_weights", "type": "T", "option": "optional", "visible": false } + ] + }, + { + "name": "Unique", + "attributes": [ + { "name": "idx_out_type", "type": "TensorType", "default": "INT32" } + ] + }, + { + "name": "Unpack", + "attributes": [ + { "name": "num", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "UnsortedSegmentProd", + "attributes": [ + { "name": "num_segments", "type": "int32", "default": 0 } + ] + }, + { + "name": "VarHandle", + "attributes": [ + { "name": "container", "type": "string", "default": null }, + { "name": "shared_name", "type": "string", "default": null } + ] + }, + { + "name": "While", + "attributes": [ + { "name": "cond_subgraph_index", "type": "int32", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + } +] \ No newline at end of file diff --git a/circle-schema.js b/circle-schema.js new file mode 100644 index 00000000000..59c3476697b --- /dev/null +++ b/circle-schema.js @@ -0,0 +1,3462 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('circle'); + +$root.circle = $root.circle || {}; + +$root.circle.TensorType = { + FLOAT32: 0, + FLOAT16: 1, + INT32: 2, + UINT8: 3, + INT64: 4, + STRING: 5, + BOOL: 6, + INT16: 7, + COMPLEX64: 8, + INT8: 9, + FLOAT64: 10, + COMPLEX128: 11, + UINT64: 12, + RESOURCE: 13, + VARIANT: 14, + UINT32: 15, + UINT16: 16, + INT4: 17 +}; + +$root.circle.CustomQuantization = class CustomQuantization { + + static decode(reader, position) { + const $ = new $root.circle.CustomQuantization(); + $.custom = reader.typedArray(position, 4, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.CustomQuantization(); + $.custom = reader.typedArray(json.custom, Uint8Array); + return $; + } +}; + +$root.circle.QuantizationDetails = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.circle.CustomQuantization.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'CustomQuantization': return $root.circle.CustomQuantization.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.circle.QuantizationParameters = class QuantizationParameters { + + static decode(reader, position) { + const $ = new $root.circle.QuantizationParameters(); + $.min = reader.typedArray(position, 4, Float32Array); + $.max = reader.typedArray(position, 6, Float32Array); + $.scale = reader.typedArray(position, 8, Float32Array); + $.zero_point = reader.int64s_(position, 10); + $.details = reader.union(position, 12, $root.circle.QuantizationDetails.decode); + $.quantized_dimension = reader.int32_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.QuantizationParameters(); + $.min = reader.typedArray(json.min, Float32Array); + $.max = reader.typedArray(json.max, Float32Array); + $.scale = reader.typedArray(json.scale, Float32Array); + $.zero_point = reader.array(json.zero_point); + $.details = $root.circle.QuantizationDetails.decodeText(reader, json.details, json.details_type); + $.quantized_dimension = reader.value(json.quantized_dimension, 0); + return $; + } +}; + +$root.circle.DimensionType = { + DENSE: 0, + SPARSE_CSR: 1 +}; + +$root.circle.Int32Vector = class Int32Vector { + + static decode(reader, position) { + const $ = new $root.circle.Int32Vector(); + $.values = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Int32Vector(); + $.values = reader.typedArray(json.values, Int32Array); + return $; + } +}; + +$root.circle.Uint16Vector = class Uint16Vector { + + static decode(reader, position) { + const $ = new $root.circle.Uint16Vector(); + $.values = reader.typedArray(position, 4, Uint16Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Uint16Vector(); + $.values = reader.typedArray(json.values, Uint16Array); + return $; + } +}; + +$root.circle.Uint8Vector = class Uint8Vector { + + static decode(reader, position) { + const $ = new $root.circle.Uint8Vector(); + $.values = reader.typedArray(position, 4, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Uint8Vector(); + $.values = reader.typedArray(json.values, Uint8Array); + return $; + } +}; + +$root.circle.SparseIndexVector = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.circle.Int32Vector.decode(reader, position); + case 2: return $root.circle.Uint16Vector.decode(reader, position); + case 3: return $root.circle.Uint8Vector.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'Int32Vector': return $root.circle.Int32Vector.decodeText(reader, json); + case 'Uint16Vector': return $root.circle.Uint16Vector.decodeText(reader, json); + case 'Uint8Vector': return $root.circle.Uint8Vector.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.circle.DimensionMetadata = class DimensionMetadata { + + static decode(reader, position) { + const $ = new $root.circle.DimensionMetadata(); + $.format = reader.int8_(position, 4, 0); + $.dense_size = reader.int32_(position, 6, 0); + $.array_segments = reader.union(position, 8, $root.circle.SparseIndexVector.decode); + $.array_indices = reader.union(position, 12, $root.circle.SparseIndexVector.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.DimensionMetadata(); + $.format = $root.circle.DimensionType[json.format]; + $.dense_size = reader.value(json.dense_size, 0); + $.array_segments = $root.circle.SparseIndexVector.decodeText(reader, json.array_segments, json.array_segments_type); + $.array_indices = $root.circle.SparseIndexVector.decodeText(reader, json.array_indices, json.array_indices_type); + return $; + } +}; + +$root.circle.SparsityParameters = class SparsityParameters { + + static decode(reader, position) { + const $ = new $root.circle.SparsityParameters(); + $.traversal_order = reader.typedArray(position, 4, Int32Array); + $.block_map = reader.typedArray(position, 6, Int32Array); + $.dim_metadata = reader.tableArray(position, 8, $root.circle.DimensionMetadata.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SparsityParameters(); + $.traversal_order = reader.typedArray(json.traversal_order, Int32Array); + $.block_map = reader.typedArray(json.block_map, Int32Array); + $.dim_metadata = reader.objectArray(json.dim_metadata, $root.circle.DimensionMetadata.decodeText); + return $; + } +}; + +$root.circle.VariantSubType = class VariantSubType { + + static decode(reader, position) { + const $ = new $root.circle.VariantSubType(); + $.shape = reader.typedArray(position, 4, Int32Array); + $.type = reader.int8_(position, 6, 0); + $.has_rank = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.VariantSubType(); + $.shape = reader.typedArray(json.shape, Int32Array); + $.type = $root.circle.TensorType[json.type]; + $.has_rank = reader.value(json.has_rank, false); + return $; + } +}; + +$root.circle.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.circle.Tensor(); + $.shape = reader.typedArray(position, 4, Int32Array); + $.type = reader.int8_(position, 6, 0); + $.buffer = reader.uint32_(position, 8, 0); + $.name = reader.string_(position, 10, null); + $.quantization = reader.table(position, 12, $root.circle.QuantizationParameters.decode); + $.is_variable = reader.bool_(position, 14, false); + $.sparsity = reader.table(position, 16, $root.circle.SparsityParameters.decode); + $.shape_signature = reader.typedArray(position, 18, Int32Array); + $.has_rank = reader.bool_(position, 20, false); + $.variant_tensors = reader.tableArray(position, 22, $root.circle.VariantSubType.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Tensor(); + $.shape = reader.typedArray(json.shape, Int32Array); + $.type = $root.circle.TensorType[json.type]; + $.buffer = reader.value(json.buffer, 0); + $.name = reader.value(json.name, null); + $.quantization = reader.object(json.quantization, $root.circle.QuantizationParameters.decodeText); + $.is_variable = reader.value(json.is_variable, false); + $.sparsity = reader.object(json.sparsity, $root.circle.SparsityParameters.decodeText); + $.shape_signature = reader.typedArray(json.shape_signature, Int32Array); + $.has_rank = reader.value(json.has_rank, false); + $.variant_tensors = reader.objectArray(json.variant_tensors, $root.circle.VariantSubType.decodeText); + return $; + } +}; + +$root.circle.BuiltinOperator = { + BCQ_GATHER: -4, + BCQ_FULLY_CONNECTED: -3, + INSTANCE_NORM: -2, + ADD: 0, + AVERAGE_POOL_2D: 1, + CONCATENATION: 2, + CONV_2D: 3, + DEPTHWISE_CONV_2D: 4, + DEPTH_TO_SPACE: 5, + DEQUANTIZE: 6, + EMBEDDING_LOOKUP: 7, + FLOOR: 8, + FULLY_CONNECTED: 9, + HASHTABLE_LOOKUP: 10, + L2_NORMALIZATION: 11, + L2_POOL_2D: 12, + LOCAL_RESPONSE_NORMALIZATION: 13, + LOGISTIC: 14, + LSH_PROJECTION: 15, + LSTM: 16, + MAX_POOL_2D: 17, + MUL: 18, + RELU: 19, + RELU_N1_TO_1: 20, + RELU6: 21, + RESHAPE: 22, + RESIZE_BILINEAR: 23, + RNN: 24, + SOFTMAX: 25, + SPACE_TO_DEPTH: 26, + SVDF: 27, + TANH: 28, + CONCAT_EMBEDDINGS: 29, + SKIP_GRAM: 30, + CALL: 31, + CUSTOM: 32, + EMBEDDING_LOOKUP_SPARSE: 33, + PAD: 34, + UNIDIRECTIONAL_SEQUENCE_RNN: 35, + GATHER: 36, + BATCH_TO_SPACE_ND: 37, + SPACE_TO_BATCH_ND: 38, + TRANSPOSE: 39, + MEAN: 40, + SUB: 41, + DIV: 42, + SQUEEZE: 43, + UNIDIRECTIONAL_SEQUENCE_LSTM: 44, + STRIDED_SLICE: 45, + BIDIRECTIONAL_SEQUENCE_RNN: 46, + EXP: 47, + TOPK_V2: 48, + SPLIT: 49, + LOG_SOFTMAX: 50, + DELEGATE: 51, + BIDIRECTIONAL_SEQUENCE_LSTM: 52, + CAST: 53, + PRELU: 54, + MAXIMUM: 55, + ARG_MAX: 56, + MINIMUM: 57, + LESS: 58, + NEG: 59, + PADV2: 60, + GREATER: 61, + GREATER_EQUAL: 62, + LESS_EQUAL: 63, + SELECT: 64, + SLICE: 65, + SIN: 66, + TRANSPOSE_CONV: 67, + SPARSE_TO_DENSE: 68, + TILE: 69, + EXPAND_DIMS: 70, + EQUAL: 71, + NOT_EQUAL: 72, + LOG: 73, + SUM: 74, + SQRT: 75, + RSQRT: 76, + SHAPE: 77, + POW: 78, + ARG_MIN: 79, + FAKE_QUANT: 80, + REDUCE_PROD: 81, + REDUCE_MAX: 82, + PACK: 83, + LOGICAL_OR: 84, + ONE_HOT: 85, + LOGICAL_AND: 86, + LOGICAL_NOT: 87, + UNPACK: 88, + REDUCE_MIN: 89, + FLOOR_DIV: 90, + REDUCE_ANY: 91, + SQUARE: 92, + ZEROS_LIKE: 93, + FILL: 94, + FLOOR_MOD: 95, + RANGE: 96, + RESIZE_NEAREST_NEIGHBOR: 97, + LEAKY_RELU: 98, + SQUARED_DIFFERENCE: 99, + MIRROR_PAD: 100, + ABS: 101, + SPLIT_V: 102, + UNIQUE: 103, + CEIL: 104, + REVERSE_V2: 105, + ADD_N: 106, + GATHER_ND: 107, + COS: 108, + WHERE: 109, + RANK: 110, + ELU: 111, + REVERSE_SEQUENCE: 112, + MATRIX_DIAG: 113, + QUANTIZE: 114, + MATRIX_SET_DIAG: 115, + ROUND: 116, + HARD_SWISH: 117, + IF: 118, + WHILE: 119, + NON_MAX_SUPPRESSION_V4: 120, + NON_MAX_SUPPRESSION_V5: 121, + SCATTER_ND: 122, + SELECT_V2: 123, + DENSIFY: 124, + SEGMENT_SUM: 125, + BATCH_MATMUL: 126, + PLACEHOLDER_FOR_GREATER_OP_CODES: 127, + CUMSUM: 128, + CALL_ONCE: 129, + BROADCAST_TO: 130, + RFFT2D: 131, + CONV_3D: 132, + IMAG: 133, + REAL: 134, + COMPLEX_ABS: 135, + HASHTABLE: 136, + HASHTABLE_FIND: 137, + HASHTABLE_IMPORT: 138, + HASHTABLE_SIZE: 139, + REDUCE_ALL: 140, + CONV_3D_TRANSPOSE: 141, + VAR_HANDLE: 142, + READ_VARIABLE: 143, + ASSIGN_VARIABLE: 144, + BROADCAST_ARGS: 145, + RANDOM_STANDARD_NORMAL: 146, + BUCKETIZE: 147, + RANDOM_UNIFORM: 148, + MULTINOMIAL: 149, + GELU: 150, + DYNAMIC_UPDATE_SLICE: 151, + RELU_0_TO_1: 152, + UNSORTED_SEGMENT_PROD: 153, + UNSORTED_SEGMENT_MAX: 154, + UNSORTED_SEGMENT_SUM: 155, + ATAN2: 156, + UNSORTED_SEGMENT_MIN: 157, + SIGN: 158, + BITCAST: 159, + BITWISE_XOR: 160, + RIGHT_SHIFT: 161, + STABLEHLO_LOGISTIC: 162, + STABLEHLO_ADD: 163, + STABLEHLO_DIVIDE: 164, + STABLEHLO_MULTIPLY: 165, + STABLEHLO_MAXIMUM: 166, + STABLEHLO_RESHAPE: 167, + STABLEHLO_CLAMP: 168, + STABLEHLO_CONCATENATE: 169, + STABLEHLO_BROADCAST_IN_DIM: 170, + STABLEHLO_CONVOLUTION: 171, + STABLEHLO_SLICE: 172, + STABLEHLO_CUSTOM_CALL: 173, + STABLEHLO_REDUCE: 174, + STABLEHLO_ABS: 175, + STABLEHLO_AND: 176, + STABLEHLO_COSINE: 177, + STABLEHLO_EXPONENTIAL: 178, + STABLEHLO_FLOOR: 179, + STABLEHLO_LOG: 180, + STABLEHLO_MINIMUM: 181, + STABLEHLO_NEGATE: 182, + STABLEHLO_OR: 183, + STABLEHLO_POWER: 184, + STABLEHLO_REMAINDER: 185, + STABLEHLO_RSQRT: 186, + STABLEHLO_SELECT: 187, + STABLEHLO_SUBTRACT: 188, + STABLEHLO_TANH: 189, + STABLEHLO_SCATTER: 190, + STABLEHLO_COMPARE: 191, + STABLEHLO_CONVERT: 192, + STABLEHLO_DYNAMIC_SLICE: 193, + STABLEHLO_DYNAMIC_UPDATE_SLICE: 194, + STABLEHLO_PAD: 195, + STABLEHLO_IOTA: 196, + STABLEHLO_DOT_GENERAL: 197, + STABLEHLO_REDUCE_WINDOW: 198, + STABLEHLO_SORT: 199, + STABLEHLO_WHILE: 200, + STABLEHLO_GATHER: 201, + STABLEHLO_TRANSPOSE: 202, + DILATE: 203, + STABLEHLO_RNG_BIT_GENERATOR: 204, + REDUCE_WINDOW: 205 +}; + +$root.circle.BuiltinOptions = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.circle.Conv2DOptions.decode(reader, position); + case 2: return $root.circle.DepthwiseConv2DOptions.decode(reader, position); + case 3: return $root.circle.ConcatEmbeddingsOptions.decode(reader, position); + case 4: return $root.circle.LSHProjectionOptions.decode(reader, position); + case 5: return $root.circle.Pool2DOptions.decode(reader, position); + case 6: return $root.circle.SVDFOptions.decode(reader, position); + case 7: return $root.circle.RNNOptions.decode(reader, position); + case 8: return $root.circle.FullyConnectedOptions.decode(reader, position); + case 9: return $root.circle.SoftmaxOptions.decode(reader, position); + case 10: return $root.circle.ConcatenationOptions.decode(reader, position); + case 11: return $root.circle.AddOptions.decode(reader, position); + case 12: return $root.circle.L2NormOptions.decode(reader, position); + case 13: return $root.circle.LocalResponseNormalizationOptions.decode(reader, position); + case 14: return $root.circle.LSTMOptions.decode(reader, position); + case 15: return $root.circle.ResizeBilinearOptions.decode(reader, position); + case 16: return $root.circle.CallOptions.decode(reader, position); + case 17: return $root.circle.ReshapeOptions.decode(reader, position); + case 18: return $root.circle.SkipGramOptions.decode(reader, position); + case 19: return $root.circle.SpaceToDepthOptions.decode(reader, position); + case 20: return $root.circle.EmbeddingLookupSparseOptions.decode(reader, position); + case 21: return $root.circle.MulOptions.decode(reader, position); + case 22: return $root.circle.PadOptions.decode(reader, position); + case 23: return $root.circle.GatherOptions.decode(reader, position); + case 24: return $root.circle.BatchToSpaceNDOptions.decode(reader, position); + case 25: return $root.circle.SpaceToBatchNDOptions.decode(reader, position); + case 26: return $root.circle.TransposeOptions.decode(reader, position); + case 27: return $root.circle.ReducerOptions.decode(reader, position); + case 28: return $root.circle.SubOptions.decode(reader, position); + case 29: return $root.circle.DivOptions.decode(reader, position); + case 30: return $root.circle.SqueezeOptions.decode(reader, position); + case 31: return $root.circle.SequenceRNNOptions.decode(reader, position); + case 32: return $root.circle.StridedSliceOptions.decode(reader, position); + case 33: return $root.circle.ExpOptions.decode(reader, position); + case 34: return $root.circle.TopKV2Options.decode(reader, position); + case 35: return $root.circle.SplitOptions.decode(reader, position); + case 36: return $root.circle.LogSoftmaxOptions.decode(reader, position); + case 37: return $root.circle.CastOptions.decode(reader, position); + case 38: return $root.circle.DequantizeOptions.decode(reader, position); + case 39: return $root.circle.MaximumMinimumOptions.decode(reader, position); + case 40: return $root.circle.ArgMaxOptions.decode(reader, position); + case 41: return $root.circle.LessOptions.decode(reader, position); + case 42: return $root.circle.NegOptions.decode(reader, position); + case 43: return $root.circle.PadV2Options.decode(reader, position); + case 44: return $root.circle.GreaterOptions.decode(reader, position); + case 45: return $root.circle.GreaterEqualOptions.decode(reader, position); + case 46: return $root.circle.LessEqualOptions.decode(reader, position); + case 47: return $root.circle.SelectOptions.decode(reader, position); + case 48: return $root.circle.SliceOptions.decode(reader, position); + case 49: return $root.circle.TransposeConvOptions.decode(reader, position); + case 50: return $root.circle.SparseToDenseOptions.decode(reader, position); + case 51: return $root.circle.TileOptions.decode(reader, position); + case 52: return $root.circle.ExpandDimsOptions.decode(reader, position); + case 53: return $root.circle.EqualOptions.decode(reader, position); + case 54: return $root.circle.NotEqualOptions.decode(reader, position); + case 55: return $root.circle.ShapeOptions.decode(reader, position); + case 56: return $root.circle.PowOptions.decode(reader, position); + case 57: return $root.circle.ArgMinOptions.decode(reader, position); + case 58: return $root.circle.FakeQuantOptions.decode(reader, position); + case 59: return $root.circle.PackOptions.decode(reader, position); + case 60: return $root.circle.LogicalOrOptions.decode(reader, position); + case 61: return $root.circle.OneHotOptions.decode(reader, position); + case 62: return $root.circle.LogicalAndOptions.decode(reader, position); + case 63: return $root.circle.LogicalNotOptions.decode(reader, position); + case 64: return $root.circle.UnpackOptions.decode(reader, position); + case 65: return $root.circle.FloorDivOptions.decode(reader, position); + case 66: return $root.circle.SquareOptions.decode(reader, position); + case 67: return $root.circle.ZerosLikeOptions.decode(reader, position); + case 68: return $root.circle.FillOptions.decode(reader, position); + case 69: return $root.circle.BidirectionalSequenceLSTMOptions.decode(reader, position); + case 70: return $root.circle.BidirectionalSequenceRNNOptions.decode(reader, position); + case 71: return $root.circle.UnidirectionalSequenceLSTMOptions.decode(reader, position); + case 72: return $root.circle.FloorModOptions.decode(reader, position); + case 73: return $root.circle.RangeOptions.decode(reader, position); + case 74: return $root.circle.ResizeNearestNeighborOptions.decode(reader, position); + case 75: return $root.circle.LeakyReluOptions.decode(reader, position); + case 76: return $root.circle.SquaredDifferenceOptions.decode(reader, position); + case 77: return $root.circle.MirrorPadOptions.decode(reader, position); + case 78: return $root.circle.AbsOptions.decode(reader, position); + case 79: return $root.circle.SplitVOptions.decode(reader, position); + case 80: return $root.circle.UniqueOptions.decode(reader, position); + case 81: return $root.circle.ReverseV2Options.decode(reader, position); + case 82: return $root.circle.AddNOptions.decode(reader, position); + case 83: return $root.circle.GatherNdOptions.decode(reader, position); + case 84: return $root.circle.CosOptions.decode(reader, position); + case 85: return $root.circle.WhereOptions.decode(reader, position); + case 86: return $root.circle.RankOptions.decode(reader, position); + case 87: return $root.circle.ReverseSequenceOptions.decode(reader, position); + case 88: return $root.circle.MatrixDiagOptions.decode(reader, position); + case 89: return $root.circle.QuantizeOptions.decode(reader, position); + case 90: return $root.circle.MatrixSetDiagOptions.decode(reader, position); + case 91: return $root.circle.HardSwishOptions.decode(reader, position); + case 92: return $root.circle.IfOptions.decode(reader, position); + case 93: return $root.circle.WhileOptions.decode(reader, position); + case 94: return $root.circle.DepthToSpaceOptions.decode(reader, position); + case 95: return $root.circle.NonMaxSuppressionV4Options.decode(reader, position); + case 96: return $root.circle.NonMaxSuppressionV5Options.decode(reader, position); + case 97: return $root.circle.ScatterNdOptions.decode(reader, position); + case 98: return $root.circle.SelectV2Options.decode(reader, position); + case 99: return $root.circle.DensifyOptions.decode(reader, position); + case 100: return $root.circle.SegmentSumOptions.decode(reader, position); + case 101: return $root.circle.BatchMatMulOptions.decode(reader, position); + case 102: return $root.circle.CumsumOptions.decode(reader, position); + case 103: return $root.circle.CallOnceOptions.decode(reader, position); + case 104: return $root.circle.BroadcastToOptions.decode(reader, position); + case 105: return $root.circle.Rfft2dOptions.decode(reader, position); + case 106: return $root.circle.Conv3DOptions.decode(reader, position); + case 107: return $root.circle.HashtableOptions.decode(reader, position); + case 108: return $root.circle.HashtableFindOptions.decode(reader, position); + case 109: return $root.circle.HashtableImportOptions.decode(reader, position); + case 110: return $root.circle.HashtableSizeOptions.decode(reader, position); + case 111: return $root.circle.VarHandleOptions.decode(reader, position); + case 112: return $root.circle.ReadVariableOptions.decode(reader, position); + case 113: return $root.circle.AssignVariableOptions.decode(reader, position); + case 114: return $root.circle.RandomOptions.decode(reader, position); + case 115: return $root.circle.BucketizeOptions.decode(reader, position); + case 116: return $root.circle.GeluOptions.decode(reader, position); + case 117: return $root.circle.DynamicUpdateSliceOptions.decode(reader, position); + case 118: return $root.circle.UnsortedSegmentProdOptions.decode(reader, position); + case 119: return $root.circle.UnsortedSegmentMaxOptions.decode(reader, position); + case 120: return $root.circle.UnsortedSegmentMinOptions.decode(reader, position); + case 121: return $root.circle.UnsortedSegmentSumOptions.decode(reader, position); + case 122: return $root.circle.ATan2Options.decode(reader, position); + case 123: return $root.circle.SignOptions.decode(reader, position); + case 124: return $root.circle.BitcastOptions.decode(reader, position); + case 125: return $root.circle.BitwiseXorOptions.decode(reader, position); + case 126: return $root.circle.RightShiftOptions.decode(reader, position); + case 252: return $root.circle.BCQGatherOptions.decode(reader, position); + case 253: return $root.circle.BCQFullyConnectedOptions.decode(reader, position); + case 254: return $root.circle.InstanceNormOptions.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'Conv2DOptions': return $root.circle.Conv2DOptions.decodeText(reader, json); + case 'DepthwiseConv2DOptions': return $root.circle.DepthwiseConv2DOptions.decodeText(reader, json); + case 'ConcatEmbeddingsOptions': return $root.circle.ConcatEmbeddingsOptions.decodeText(reader, json); + case 'LSHProjectionOptions': return $root.circle.LSHProjectionOptions.decodeText(reader, json); + case 'Pool2DOptions': return $root.circle.Pool2DOptions.decodeText(reader, json); + case 'SVDFOptions': return $root.circle.SVDFOptions.decodeText(reader, json); + case 'RNNOptions': return $root.circle.RNNOptions.decodeText(reader, json); + case 'FullyConnectedOptions': return $root.circle.FullyConnectedOptions.decodeText(reader, json); + case 'SoftmaxOptions': return $root.circle.SoftmaxOptions.decodeText(reader, json); + case 'ConcatenationOptions': return $root.circle.ConcatenationOptions.decodeText(reader, json); + case 'AddOptions': return $root.circle.AddOptions.decodeText(reader, json); + case 'L2NormOptions': return $root.circle.L2NormOptions.decodeText(reader, json); + case 'LocalResponseNormalizationOptions': return $root.circle.LocalResponseNormalizationOptions.decodeText(reader, json); + case 'LSTMOptions': return $root.circle.LSTMOptions.decodeText(reader, json); + case 'ResizeBilinearOptions': return $root.circle.ResizeBilinearOptions.decodeText(reader, json); + case 'CallOptions': return $root.circle.CallOptions.decodeText(reader, json); + case 'ReshapeOptions': return $root.circle.ReshapeOptions.decodeText(reader, json); + case 'SkipGramOptions': return $root.circle.SkipGramOptions.decodeText(reader, json); + case 'SpaceToDepthOptions': return $root.circle.SpaceToDepthOptions.decodeText(reader, json); + case 'EmbeddingLookupSparseOptions': return $root.circle.EmbeddingLookupSparseOptions.decodeText(reader, json); + case 'MulOptions': return $root.circle.MulOptions.decodeText(reader, json); + case 'PadOptions': return $root.circle.PadOptions.decodeText(reader, json); + case 'GatherOptions': return $root.circle.GatherOptions.decodeText(reader, json); + case 'BatchToSpaceNDOptions': return $root.circle.BatchToSpaceNDOptions.decodeText(reader, json); + case 'SpaceToBatchNDOptions': return $root.circle.SpaceToBatchNDOptions.decodeText(reader, json); + case 'TransposeOptions': return $root.circle.TransposeOptions.decodeText(reader, json); + case 'ReducerOptions': return $root.circle.ReducerOptions.decodeText(reader, json); + case 'SubOptions': return $root.circle.SubOptions.decodeText(reader, json); + case 'DivOptions': return $root.circle.DivOptions.decodeText(reader, json); + case 'SqueezeOptions': return $root.circle.SqueezeOptions.decodeText(reader, json); + case 'SequenceRNNOptions': return $root.circle.SequenceRNNOptions.decodeText(reader, json); + case 'StridedSliceOptions': return $root.circle.StridedSliceOptions.decodeText(reader, json); + case 'ExpOptions': return $root.circle.ExpOptions.decodeText(reader, json); + case 'TopKV2Options': return $root.circle.TopKV2Options.decodeText(reader, json); + case 'SplitOptions': return $root.circle.SplitOptions.decodeText(reader, json); + case 'LogSoftmaxOptions': return $root.circle.LogSoftmaxOptions.decodeText(reader, json); + case 'CastOptions': return $root.circle.CastOptions.decodeText(reader, json); + case 'DequantizeOptions': return $root.circle.DequantizeOptions.decodeText(reader, json); + case 'MaximumMinimumOptions': return $root.circle.MaximumMinimumOptions.decodeText(reader, json); + case 'ArgMaxOptions': return $root.circle.ArgMaxOptions.decodeText(reader, json); + case 'LessOptions': return $root.circle.LessOptions.decodeText(reader, json); + case 'NegOptions': return $root.circle.NegOptions.decodeText(reader, json); + case 'PadV2Options': return $root.circle.PadV2Options.decodeText(reader, json); + case 'GreaterOptions': return $root.circle.GreaterOptions.decodeText(reader, json); + case 'GreaterEqualOptions': return $root.circle.GreaterEqualOptions.decodeText(reader, json); + case 'LessEqualOptions': return $root.circle.LessEqualOptions.decodeText(reader, json); + case 'SelectOptions': return $root.circle.SelectOptions.decodeText(reader, json); + case 'SliceOptions': return $root.circle.SliceOptions.decodeText(reader, json); + case 'TransposeConvOptions': return $root.circle.TransposeConvOptions.decodeText(reader, json); + case 'SparseToDenseOptions': return $root.circle.SparseToDenseOptions.decodeText(reader, json); + case 'TileOptions': return $root.circle.TileOptions.decodeText(reader, json); + case 'ExpandDimsOptions': return $root.circle.ExpandDimsOptions.decodeText(reader, json); + case 'EqualOptions': return $root.circle.EqualOptions.decodeText(reader, json); + case 'NotEqualOptions': return $root.circle.NotEqualOptions.decodeText(reader, json); + case 'ShapeOptions': return $root.circle.ShapeOptions.decodeText(reader, json); + case 'PowOptions': return $root.circle.PowOptions.decodeText(reader, json); + case 'ArgMinOptions': return $root.circle.ArgMinOptions.decodeText(reader, json); + case 'FakeQuantOptions': return $root.circle.FakeQuantOptions.decodeText(reader, json); + case 'PackOptions': return $root.circle.PackOptions.decodeText(reader, json); + case 'LogicalOrOptions': return $root.circle.LogicalOrOptions.decodeText(reader, json); + case 'OneHotOptions': return $root.circle.OneHotOptions.decodeText(reader, json); + case 'LogicalAndOptions': return $root.circle.LogicalAndOptions.decodeText(reader, json); + case 'LogicalNotOptions': return $root.circle.LogicalNotOptions.decodeText(reader, json); + case 'UnpackOptions': return $root.circle.UnpackOptions.decodeText(reader, json); + case 'FloorDivOptions': return $root.circle.FloorDivOptions.decodeText(reader, json); + case 'SquareOptions': return $root.circle.SquareOptions.decodeText(reader, json); + case 'ZerosLikeOptions': return $root.circle.ZerosLikeOptions.decodeText(reader, json); + case 'FillOptions': return $root.circle.FillOptions.decodeText(reader, json); + case 'BidirectionalSequenceLSTMOptions': return $root.circle.BidirectionalSequenceLSTMOptions.decodeText(reader, json); + case 'BidirectionalSequenceRNNOptions': return $root.circle.BidirectionalSequenceRNNOptions.decodeText(reader, json); + case 'UnidirectionalSequenceLSTMOptions': return $root.circle.UnidirectionalSequenceLSTMOptions.decodeText(reader, json); + case 'FloorModOptions': return $root.circle.FloorModOptions.decodeText(reader, json); + case 'RangeOptions': return $root.circle.RangeOptions.decodeText(reader, json); + case 'ResizeNearestNeighborOptions': return $root.circle.ResizeNearestNeighborOptions.decodeText(reader, json); + case 'LeakyReluOptions': return $root.circle.LeakyReluOptions.decodeText(reader, json); + case 'SquaredDifferenceOptions': return $root.circle.SquaredDifferenceOptions.decodeText(reader, json); + case 'MirrorPadOptions': return $root.circle.MirrorPadOptions.decodeText(reader, json); + case 'AbsOptions': return $root.circle.AbsOptions.decodeText(reader, json); + case 'SplitVOptions': return $root.circle.SplitVOptions.decodeText(reader, json); + case 'UniqueOptions': return $root.circle.UniqueOptions.decodeText(reader, json); + case 'ReverseV2Options': return $root.circle.ReverseV2Options.decodeText(reader, json); + case 'AddNOptions': return $root.circle.AddNOptions.decodeText(reader, json); + case 'GatherNdOptions': return $root.circle.GatherNdOptions.decodeText(reader, json); + case 'CosOptions': return $root.circle.CosOptions.decodeText(reader, json); + case 'WhereOptions': return $root.circle.WhereOptions.decodeText(reader, json); + case 'RankOptions': return $root.circle.RankOptions.decodeText(reader, json); + case 'ReverseSequenceOptions': return $root.circle.ReverseSequenceOptions.decodeText(reader, json); + case 'MatrixDiagOptions': return $root.circle.MatrixDiagOptions.decodeText(reader, json); + case 'QuantizeOptions': return $root.circle.QuantizeOptions.decodeText(reader, json); + case 'MatrixSetDiagOptions': return $root.circle.MatrixSetDiagOptions.decodeText(reader, json); + case 'HardSwishOptions': return $root.circle.HardSwishOptions.decodeText(reader, json); + case 'IfOptions': return $root.circle.IfOptions.decodeText(reader, json); + case 'WhileOptions': return $root.circle.WhileOptions.decodeText(reader, json); + case 'DepthToSpaceOptions': return $root.circle.DepthToSpaceOptions.decodeText(reader, json); + case 'NonMaxSuppressionV4Options': return $root.circle.NonMaxSuppressionV4Options.decodeText(reader, json); + case 'NonMaxSuppressionV5Options': return $root.circle.NonMaxSuppressionV5Options.decodeText(reader, json); + case 'ScatterNdOptions': return $root.circle.ScatterNdOptions.decodeText(reader, json); + case 'SelectV2Options': return $root.circle.SelectV2Options.decodeText(reader, json); + case 'DensifyOptions': return $root.circle.DensifyOptions.decodeText(reader, json); + case 'SegmentSumOptions': return $root.circle.SegmentSumOptions.decodeText(reader, json); + case 'BatchMatMulOptions': return $root.circle.BatchMatMulOptions.decodeText(reader, json); + case 'CumsumOptions': return $root.circle.CumsumOptions.decodeText(reader, json); + case 'CallOnceOptions': return $root.circle.CallOnceOptions.decodeText(reader, json); + case 'BroadcastToOptions': return $root.circle.BroadcastToOptions.decodeText(reader, json); + case 'Rfft2dOptions': return $root.circle.Rfft2dOptions.decodeText(reader, json); + case 'Conv3DOptions': return $root.circle.Conv3DOptions.decodeText(reader, json); + case 'HashtableOptions': return $root.circle.HashtableOptions.decodeText(reader, json); + case 'HashtableFindOptions': return $root.circle.HashtableFindOptions.decodeText(reader, json); + case 'HashtableImportOptions': return $root.circle.HashtableImportOptions.decodeText(reader, json); + case 'HashtableSizeOptions': return $root.circle.HashtableSizeOptions.decodeText(reader, json); + case 'VarHandleOptions': return $root.circle.VarHandleOptions.decodeText(reader, json); + case 'ReadVariableOptions': return $root.circle.ReadVariableOptions.decodeText(reader, json); + case 'AssignVariableOptions': return $root.circle.AssignVariableOptions.decodeText(reader, json); + case 'RandomOptions': return $root.circle.RandomOptions.decodeText(reader, json); + case 'BucketizeOptions': return $root.circle.BucketizeOptions.decodeText(reader, json); + case 'GeluOptions': return $root.circle.GeluOptions.decodeText(reader, json); + case 'DynamicUpdateSliceOptions': return $root.circle.DynamicUpdateSliceOptions.decodeText(reader, json); + case 'UnsortedSegmentProdOptions': return $root.circle.UnsortedSegmentProdOptions.decodeText(reader, json); + case 'UnsortedSegmentMaxOptions': return $root.circle.UnsortedSegmentMaxOptions.decodeText(reader, json); + case 'UnsortedSegmentMinOptions': return $root.circle.UnsortedSegmentMinOptions.decodeText(reader, json); + case 'UnsortedSegmentSumOptions': return $root.circle.UnsortedSegmentSumOptions.decodeText(reader, json); + case 'ATan2Options': return $root.circle.ATan2Options.decodeText(reader, json); + case 'SignOptions': return $root.circle.SignOptions.decodeText(reader, json); + case 'BitcastOptions': return $root.circle.BitcastOptions.decodeText(reader, json); + case 'BitwiseXorOptions': return $root.circle.BitwiseXorOptions.decodeText(reader, json); + case 'RightShiftOptions': return $root.circle.RightShiftOptions.decodeText(reader, json); + case 'BCQGatherOptions': return $root.circle.BCQGatherOptions.decodeText(reader, json); + case 'BCQFullyConnectedOptions': return $root.circle.BCQFullyConnectedOptions.decodeText(reader, json); + case 'InstanceNormOptions': return $root.circle.InstanceNormOptions.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.circle.BuiltinOptions2 = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.circle.StablehloConcatenateOptions.decode(reader, position); + case 2: return $root.circle.StablehloBroadcastInDimOptions.decode(reader, position); + case 3: return $root.circle.StablehloSliceOptions.decode(reader, position); + case 4: return $root.circle.StablehloConvolutionOptions.decode(reader, position); + case 5: return $root.circle.StablehloCustomCallOptions.decode(reader, position); + case 6: return $root.circle.StablehloReduceOptions.decode(reader, position); + case 7: return $root.circle.StablehloScatterOptions.decode(reader, position); + case 8: return $root.circle.StablehloCompareOptions.decode(reader, position); + case 9: return $root.circle.StablehloDynamicSliceOptions.decode(reader, position); + case 10: return $root.circle.StablehloPadOptions.decode(reader, position); + case 11: return $root.circle.StablehloIotaOptions.decode(reader, position); + case 12: return $root.circle.StablehloDotGeneralOptions.decode(reader, position); + case 13: return $root.circle.StablehloReduceWindowOptions.decode(reader, position); + case 14: return $root.circle.StablehloSortOptions.decode(reader, position); + case 15: return $root.circle.StablehloWhileOptions.decode(reader, position); + case 16: return $root.circle.StablehloGatherOptions.decode(reader, position); + case 17: return $root.circle.StablehloTransposeOptions.decode(reader, position); + case 18: return $root.circle.DilateOptions.decode(reader, position); + case 19: return $root.circle.StablehloRngBitGeneratorOptions.decode(reader, position); + case 20: return $root.circle.ReduceWindowOptions.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'StablehloConcatenateOptions': return $root.circle.StablehloConcatenateOptions.decodeText(reader, json); + case 'StablehloBroadcastInDimOptions': return $root.circle.StablehloBroadcastInDimOptions.decodeText(reader, json); + case 'StablehloSliceOptions': return $root.circle.StablehloSliceOptions.decodeText(reader, json); + case 'StablehloConvolutionOptions': return $root.circle.StablehloConvolutionOptions.decodeText(reader, json); + case 'StablehloCustomCallOptions': return $root.circle.StablehloCustomCallOptions.decodeText(reader, json); + case 'StablehloReduceOptions': return $root.circle.StablehloReduceOptions.decodeText(reader, json); + case 'StablehloScatterOptions': return $root.circle.StablehloScatterOptions.decodeText(reader, json); + case 'StablehloCompareOptions': return $root.circle.StablehloCompareOptions.decodeText(reader, json); + case 'StablehloDynamicSliceOptions': return $root.circle.StablehloDynamicSliceOptions.decodeText(reader, json); + case 'StablehloPadOptions': return $root.circle.StablehloPadOptions.decodeText(reader, json); + case 'StablehloIotaOptions': return $root.circle.StablehloIotaOptions.decodeText(reader, json); + case 'StablehloDotGeneralOptions': return $root.circle.StablehloDotGeneralOptions.decodeText(reader, json); + case 'StablehloReduceWindowOptions': return $root.circle.StablehloReduceWindowOptions.decodeText(reader, json); + case 'StablehloSortOptions': return $root.circle.StablehloSortOptions.decodeText(reader, json); + case 'StablehloWhileOptions': return $root.circle.StablehloWhileOptions.decodeText(reader, json); + case 'StablehloGatherOptions': return $root.circle.StablehloGatherOptions.decodeText(reader, json); + case 'StablehloTransposeOptions': return $root.circle.StablehloTransposeOptions.decodeText(reader, json); + case 'DilateOptions': return $root.circle.DilateOptions.decodeText(reader, json); + case 'StablehloRngBitGeneratorOptions': return $root.circle.StablehloRngBitGeneratorOptions.decodeText(reader, json); + case 'ReduceWindowOptions': return $root.circle.ReduceWindowOptions.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.circle.StablehloGatherOptions = class StablehloGatherOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloGatherOptions(); + $.offset_dims = reader.int64s_(position, 4); + $.collapsed_slice_dims = reader.int64s_(position, 6); + $.start_index_map = reader.int64s_(position, 8); + $.index_vector_dim = reader.int64_(position, 10, 0); + $.slice_sizes = reader.int64s_(position, 12); + $.indices_are_sorted = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloGatherOptions(); + $.offset_dims = reader.array(json.offset_dims); + $.collapsed_slice_dims = reader.array(json.collapsed_slice_dims); + $.start_index_map = reader.array(json.start_index_map); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.slice_sizes = reader.array(json.slice_sizes); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + return $; + } +}; + +$root.circle.StablehloTransposeOptions = class StablehloTransposeOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloTransposeOptions(); + $.permutation = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloTransposeOptions(); + $.permutation = reader.array(json.permutation); + return $; + } +}; + +$root.circle.StablehloPrecisionConfig = { + DEFAULT: 0, + HIGH: 1, + HIGHEST: 2 +}; + +$root.circle.StablehloDotGeneralOptions = class StablehloDotGeneralOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.int64s_(position, 4); + $.rhs_batching_dimensions = reader.int64s_(position, 6); + $.lhs_contracting_dimensions = reader.int64s_(position, 8); + $.rhs_contracting_dimensions = reader.int64s_(position, 10); + $.precision_config = reader.typedArray(position, 12, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.array(json.lhs_batching_dimensions); + $.rhs_batching_dimensions = reader.array(json.rhs_batching_dimensions); + $.lhs_contracting_dimensions = reader.array(json.lhs_contracting_dimensions); + $.rhs_contracting_dimensions = reader.array(json.rhs_contracting_dimensions); + $.precision_config = reader.objectArray(json.precision_config, $root.circle.StablehloPrecisionConfig.decodeText); + return $; + } +}; + +$root.circle.StablehloReduceWindowOptions = class StablehloReduceWindowOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloReduceWindowOptions(); + $.window_dimensions = reader.int64s_(position, 4); + $.window_strides = reader.int64s_(position, 6); + $.base_dilations = reader.int64s_(position, 8); + $.window_dilations = reader.int64s_(position, 10); + $.padding = reader.int64s_(position, 12); + $.body_subgraph_index = reader.int32_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloReduceWindowOptions(); + $.window_dimensions = reader.array(json.window_dimensions); + $.window_strides = reader.array(json.window_strides); + $.base_dilations = reader.array(json.base_dilations); + $.window_dilations = reader.array(json.window_dilations); + $.padding = reader.array(json.padding); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.circle.StablehloWhileOptions = class StablehloWhileOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloWhileOptions(); + $.cond_subgraph_index = reader.int32_(position, 4, 0); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloWhileOptions(); + $.cond_subgraph_index = reader.value(json.cond_subgraph_index, 0); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.circle.StablehloSortOptions = class StablehloSortOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloSortOptions(); + $.dimension = reader.int64_(position, 4, 0); + $.is_stable = reader.bool_(position, 6, false); + $.comparator_subgraph_index = reader.int32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloSortOptions(); + $.dimension = reader.value(json.dimension, 0); + $.is_stable = reader.value(json.is_stable, false); + $.comparator_subgraph_index = reader.value(json.comparator_subgraph_index, 0); + return $; + } +}; + +$root.circle.StablehloConcatenateOptions = class StablehloConcatenateOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloConcatenateOptions(); + $.dimension = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloConcatenateOptions(); + $.dimension = reader.value(json.dimension, 0); + return $; + } +}; + +$root.circle.StablehloBroadcastInDimOptions = class StablehloBroadcastInDimOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloBroadcastInDimOptions(); + $.broadcast_dimensions = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloBroadcastInDimOptions(); + $.broadcast_dimensions = reader.array(json.broadcast_dimensions); + return $; + } +}; + +$root.circle.StablehloComparisonDirection = { + STABLEHLO_COMPARISON_DIRECTION_EQ: 0, + STABLEHLO_COMPARISON_DIRECTION_NE: 1, + STABLEHLO_COMPARISON_DIRECTION_GE: 2, + STABLEHLO_COMPARISON_DIRECTION_GT: 3, + STABLEHLO_COMPARISON_DIRECTION_LE: 4, + STABLEHLO_COMPARISON_DIRECTION_LT: 5 +}; + +$root.circle.StablehloComparisonType = { + STABLEHLO_COMPARISON_TYPE_NOTYPE: 0, + STABLEHLO_COMPARISON_TYPE_FLOAT: 1, + STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER: 2, + STABLEHLO_COMPARISON_TYPE_SIGNED: 3, + STABLEHLO_COMPARISON_TYPE_UNSIGNED: 4 +}; + +$root.circle.StablehloCompareOptions = class StablehloCompareOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloCompareOptions(); + $.comparison_direction = reader.uint32_(position, 4, 0); + $.compare_type = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloCompareOptions(); + $.comparison_direction = $root.circle.StablehloComparisonDirection[json.comparison_direction]; + $.compare_type = $root.circle.StablehloComparisonType[json.compare_type]; + return $; + } +}; + +$root.circle.StablehloDynamicSliceOptions = class StablehloDynamicSliceOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.array(json.slice_sizes); + return $; + } +}; + +$root.circle.StablehloPadOptions = class StablehloPadOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloPadOptions(); + $.edge_padding_low = reader.int64s_(position, 4); + $.edge_padding_high = reader.int64s_(position, 6); + $.interior_padding = reader.int64s_(position, 8); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloPadOptions(); + $.edge_padding_low = reader.array(json.edge_padding_low); + $.edge_padding_high = reader.array(json.edge_padding_high); + $.interior_padding = reader.array(json.interior_padding); + return $; + } +}; + +$root.circle.StablehloIotaOptions = class StablehloIotaOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloIotaOptions(); + $.iota_dimension = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloIotaOptions(); + $.iota_dimension = reader.value(json.iota_dimension, 0); + return $; + } +}; + +$root.circle.StablehloCustomCallOptions = class StablehloCustomCallOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloCustomCallOptions(); + $.call_target_name = reader.string_(position, 4, null); + $.has_side_effect = reader.bool_(position, 6, false); + $.backend_config = reader.string_(position, 8, null); + $.api_version = reader.int32_(position, 10, 0); + $.called_computations = reader.typedArray(position, 12, Int32Array); + $.custom_attributes = reader.typedArray(position, 14, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloCustomCallOptions(); + $.call_target_name = reader.value(json.call_target_name, null); + $.has_side_effect = reader.value(json.has_side_effect, false); + $.backend_config = reader.value(json.backend_config, null); + $.api_version = reader.value(json.api_version, 0); + $.called_computations = reader.typedArray(json.called_computations, Int32Array); + $.custom_attributes = reader.typedArray(json.custom_attributes, Uint8Array); + return $; + } +}; + +$root.circle.StablehloReduceOptions = class StablehloReduceOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloReduceOptions(); + $.dimensions = reader.int64s_(position, 4); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloReduceOptions(); + $.dimensions = reader.array(json.dimensions); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.circle.StablehloSliceOptions = class StablehloSliceOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloSliceOptions(); + $.start_indices = reader.int64s_(position, 4); + $.limit_indices = reader.int64s_(position, 6); + $.strides = reader.int64s_(position, 8); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloSliceOptions(); + $.start_indices = reader.array(json.start_indices); + $.limit_indices = reader.array(json.limit_indices); + $.strides = reader.array(json.strides); + return $; + } +}; + +$root.circle.StablehloConvolutionOptions = class StablehloConvolutionOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloConvolutionOptions(); + $.window_strides = reader.int64s_(position, 4); + $.padding = reader.int64s_(position, 6); + $.lhs_dilation = reader.int64s_(position, 8); + $.rhs_dilation = reader.int64s_(position, 10); + $.window_reversal = reader.bools_(position, 12); + $.input_batch_dimension = reader.int64_(position, 14, 0); + $.input_feature_dimension = reader.int64_(position, 16, 0); + $.input_spatial_dimensions = reader.int64s_(position, 18); + $.kernel_input_feature_dimension = reader.int64_(position, 20, 0); + $.kernel_output_feature_dimension = reader.int64_(position, 22, 0); + $.kernel_spatial_dimensions = reader.int64s_(position, 24); + $.output_batch_dimension = reader.int64_(position, 26, 0); + $.output_feature_dimension = reader.int64_(position, 28, 0); + $.output_spatial_dimensions = reader.int64s_(position, 30); + $.feature_group_count = reader.int64_(position, 32, 0); + $.batch_group_count = reader.int64_(position, 34, 0); + $.precision_config = reader.typedArray(position, 36, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloConvolutionOptions(); + $.window_strides = reader.array(json.window_strides); + $.padding = reader.array(json.padding); + $.lhs_dilation = reader.array(json.lhs_dilation); + $.rhs_dilation = reader.array(json.rhs_dilation); + $.window_reversal = reader.array(json.window_reversal); + $.input_batch_dimension = reader.value(json.input_batch_dimension, 0); + $.input_feature_dimension = reader.value(json.input_feature_dimension, 0); + $.input_spatial_dimensions = reader.array(json.input_spatial_dimensions); + $.kernel_input_feature_dimension = reader.value(json.kernel_input_feature_dimension, 0); + $.kernel_output_feature_dimension = reader.value(json.kernel_output_feature_dimension, 0); + $.kernel_spatial_dimensions = reader.array(json.kernel_spatial_dimensions); + $.output_batch_dimension = reader.value(json.output_batch_dimension, 0); + $.output_feature_dimension = reader.value(json.output_feature_dimension, 0); + $.output_spatial_dimensions = reader.array(json.output_spatial_dimensions); + $.feature_group_count = reader.value(json.feature_group_count, 0); + $.batch_group_count = reader.value(json.batch_group_count, 0); + $.precision_config = reader.objectArray(json.precision_config, $root.circle.StablehloPrecisionConfig.decodeText); + return $; + } +}; + +$root.circle.StablehloScatterOptions = class StablehloScatterOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloScatterOptions(); + $.indices_are_sorted = reader.bool_(position, 4, false); + $.update_window_dims = reader.int64s_(position, 6); + $.inserted_window_dims = reader.int64s_(position, 8); + $.scatter_dims_to_operand_dims = reader.int64s_(position, 10); + $.index_vector_dim = reader.int64_(position, 12, 0); + $.unique_indices = reader.bool_(position, 14, false); + $.update_computation_subgraph_index = reader.int32_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloScatterOptions(); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + $.update_window_dims = reader.array(json.update_window_dims); + $.inserted_window_dims = reader.array(json.inserted_window_dims); + $.scatter_dims_to_operand_dims = reader.array(json.scatter_dims_to_operand_dims); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.unique_indices = reader.value(json.unique_indices, false); + $.update_computation_subgraph_index = reader.value(json.update_computation_subgraph_index, 0); + return $; + } +}; + +$root.circle.RngAlgorithm = { + DEFAULT: 0, + PHILOX: 1, + THREEFRY: 2 +}; + +$root.circle.StablehloRngBitGeneratorOptions = class StablehloRngBitGeneratorOptions { + + static decode(reader, position) { + const $ = new $root.circle.StablehloRngBitGeneratorOptions(); + $.algorithm = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StablehloRngBitGeneratorOptions(); + $.algorithm = $root.circle.RngAlgorithm[json.algorithm]; + return $; + } +}; + +$root.circle.Padding = { + SAME: 0, + VALID: 1 +}; + +$root.circle.ActivationFunctionType = { + NONE: 0, + RELU: 1, + RELU_N1_TO_1: 2, + RELU6: 3, + TANH: 4, + SIGN_BIT: 5 +}; + +$root.circle.Conv2DOptions = class Conv2DOptions { + + static decode(reader, position) { + const $ = new $root.circle.Conv2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.fused_activation_function = reader.int8_(position, 10, 0); + $.dilation_w_factor = reader.int32_(position, 12, 1); + $.dilation_h_factor = reader.int32_(position, 14, 1); + $.quantized_bias_type = reader.int8_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Conv2DOptions(); + $.padding = $root.circle.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + $.quantized_bias_type = $root.circle.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.circle.Conv3DOptions = class Conv3DOptions { + + static decode(reader, position) { + const $ = new $root.circle.Conv3DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_d = reader.int32_(position, 6, 0); + $.stride_w = reader.int32_(position, 8, 0); + $.stride_h = reader.int32_(position, 10, 0); + $.fused_activation_function = reader.int8_(position, 12, 0); + $.dilation_d_factor = reader.int32_(position, 14, 1); + $.dilation_w_factor = reader.int32_(position, 16, 1); + $.dilation_h_factor = reader.int32_(position, 18, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Conv3DOptions(); + $.padding = $root.circle.Padding[json.padding]; + $.stride_d = reader.value(json.stride_d, 0); + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.dilation_d_factor = reader.value(json.dilation_d_factor, 1); + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + return $; + } +}; + +$root.circle.Pool2DOptions = class Pool2DOptions { + + static decode(reader, position) { + const $ = new $root.circle.Pool2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.filter_width = reader.int32_(position, 10, 0); + $.filter_height = reader.int32_(position, 12, 0); + $.fused_activation_function = reader.int8_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Pool2DOptions(); + $.padding = $root.circle.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.filter_width = reader.value(json.filter_width, 0); + $.filter_height = reader.value(json.filter_height, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.DepthwiseConv2DOptions = class DepthwiseConv2DOptions { + + static decode(reader, position) { + const $ = new $root.circle.DepthwiseConv2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.depth_multiplier = reader.int32_(position, 10, 0); + $.fused_activation_function = reader.int8_(position, 12, 0); + $.dilation_w_factor = reader.int32_(position, 14, 1); + $.dilation_h_factor = reader.int32_(position, 16, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.DepthwiseConv2DOptions(); + $.padding = $root.circle.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.depth_multiplier = reader.value(json.depth_multiplier, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + return $; + } +}; + +$root.circle.ConcatEmbeddingsOptions = class ConcatEmbeddingsOptions { + + static decode(reader, position) { + const $ = new $root.circle.ConcatEmbeddingsOptions(); + $.num_channels = reader.int32_(position, 4, 0); + $.num_columns_per_channel = reader.typedArray(position, 6, Int32Array); + $.embedding_dim_per_channel = reader.typedArray(position, 8, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ConcatEmbeddingsOptions(); + $.num_channels = reader.value(json.num_channels, 0); + $.num_columns_per_channel = reader.typedArray(json.num_columns_per_channel, Int32Array); + $.embedding_dim_per_channel = reader.typedArray(json.embedding_dim_per_channel, Int32Array); + return $; + } +}; + +$root.circle.LSHProjectionType = { + UNKNOWN: 0, + SPARSE: 1, + DENSE: 2 +}; + +$root.circle.LSHProjectionOptions = class LSHProjectionOptions { + + static decode(reader, position) { + const $ = new $root.circle.LSHProjectionOptions(); + $.type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.LSHProjectionOptions(); + $.type = $root.circle.LSHProjectionType[json.type]; + return $; + } +}; + +$root.circle.SVDFOptions = class SVDFOptions { + + static decode(reader, position) { + const $ = new $root.circle.SVDFOptions(); + $.rank = reader.int32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SVDFOptions(); + $.rank = reader.value(json.rank, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.RNNOptions = class RNNOptions { + + static decode(reader, position) { + const $ = new $root.circle.RNNOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.RNNOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.SequenceRNNOptions = class SequenceRNNOptions { + + static decode(reader, position) { + const $ = new $root.circle.SequenceRNNOptions(); + $.time_major = reader.bool_(position, 4, false); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SequenceRNNOptions(); + $.time_major = reader.value(json.time_major, false); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.BidirectionalSequenceRNNOptions = class BidirectionalSequenceRNNOptions { + + static decode(reader, position) { + const $ = new $root.circle.BidirectionalSequenceRNNOptions(); + $.time_major = reader.bool_(position, 4, false); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.merge_outputs = reader.bool_(position, 8, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BidirectionalSequenceRNNOptions(); + $.time_major = reader.value(json.time_major, false); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.merge_outputs = reader.value(json.merge_outputs, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.FullyConnectedOptionsWeightsFormat = { + DEFAULT: 0, + SHUFFLED4x16INT8: 1, + SHUFFLED16x1FLOAT32: 127 +}; + +$root.circle.FullyConnectedOptions = class FullyConnectedOptions { + + static decode(reader, position) { + const $ = new $root.circle.FullyConnectedOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.weights_format = reader.int8_(position, 6, 0); + $.keep_num_dims = reader.bool_(position, 8, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 10, false); + $.quantized_bias_type = reader.int8_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.FullyConnectedOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.weights_format = $root.circle.FullyConnectedOptionsWeightsFormat[json.weights_format]; + $.keep_num_dims = reader.value(json.keep_num_dims, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + $.quantized_bias_type = $root.circle.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.circle.SoftmaxOptions = class SoftmaxOptions { + + static decode(reader, position) { + const $ = new $root.circle.SoftmaxOptions(); + $.beta = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SoftmaxOptions(); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.circle.ConcatenationOptions = class ConcatenationOptions { + + static decode(reader, position) { + const $ = new $root.circle.ConcatenationOptions(); + $.axis = reader.int32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ConcatenationOptions(); + $.axis = reader.value(json.axis, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.AddOptions = class AddOptions { + + static decode(reader, position) { + const $ = new $root.circle.AddOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.pot_scale_int16 = reader.bool_(position, 6, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.AddOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.pot_scale_int16 = reader.value(json.pot_scale_int16, true); + return $; + } +}; + +$root.circle.MulOptions = class MulOptions { + + static decode(reader, position) { + const $ = new $root.circle.MulOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.MulOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.L2NormOptions = class L2NormOptions { + + static decode(reader, position) { + const $ = new $root.circle.L2NormOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.L2NormOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.LocalResponseNormalizationOptions = class LocalResponseNormalizationOptions { + + static decode(reader, position) { + const $ = new $root.circle.LocalResponseNormalizationOptions(); + $.radius = reader.int32_(position, 4, 0); + $.bias = reader.float32_(position, 6, 0); + $.alpha = reader.float32_(position, 8, 0); + $.beta = reader.float32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.LocalResponseNormalizationOptions(); + $.radius = reader.value(json.radius, 0); + $.bias = reader.value(json.bias, 0); + $.alpha = reader.value(json.alpha, 0); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.circle.LSTMKernelType = { + FULL: 0, + BASIC: 1 +}; + +$root.circle.LSTMOptions = class LSTMOptions { + + static decode(reader, position) { + const $ = new $root.circle.LSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.kernel_type = reader.int8_(position, 10, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.LSTMOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.kernel_type = $root.circle.LSTMKernelType[json.kernel_type]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.UnidirectionalSequenceLSTMOptions = class UnidirectionalSequenceLSTMOptions { + + static decode(reader, position) { + const $ = new $root.circle.UnidirectionalSequenceLSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.time_major = reader.bool_(position, 10, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 12, false); + $.diagonal_recurrent_tensors = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.UnidirectionalSequenceLSTMOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.time_major = reader.value(json.time_major, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + $.diagonal_recurrent_tensors = reader.value(json.diagonal_recurrent_tensors, false); + return $; + } +}; + +$root.circle.BidirectionalSequenceLSTMOptions = class BidirectionalSequenceLSTMOptions { + + static decode(reader, position) { + const $ = new $root.circle.BidirectionalSequenceLSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.merge_outputs = reader.bool_(position, 10, false); + $.time_major = reader.bool_(position, 12, true); + $.asymmetric_quantize_inputs = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BidirectionalSequenceLSTMOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.merge_outputs = reader.value(json.merge_outputs, false); + $.time_major = reader.value(json.time_major, true); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.ResizeBilinearOptions = class ResizeBilinearOptions { + + static decode(reader, position) { + const $ = new $root.circle.ResizeBilinearOptions(); + $.new_height = reader.int32_(position, 4, 0); + $.new_width = reader.int32_(position, 6, 0); + $.align_corners = reader.bool_(position, 8, false); + $.half_pixel_centers = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ResizeBilinearOptions(); + $.new_height = reader.value(json.new_height, 0); + $.new_width = reader.value(json.new_width, 0); + $.align_corners = reader.value(json.align_corners, false); + $.half_pixel_centers = reader.value(json.half_pixel_centers, false); + return $; + } +}; + +$root.circle.ResizeNearestNeighborOptions = class ResizeNearestNeighborOptions { + + static decode(reader, position) { + const $ = new $root.circle.ResizeNearestNeighborOptions(); + $.align_corners = reader.bool_(position, 4, false); + $.half_pixel_centers = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ResizeNearestNeighborOptions(); + $.align_corners = reader.value(json.align_corners, false); + $.half_pixel_centers = reader.value(json.half_pixel_centers, false); + return $; + } +}; + +$root.circle.CallOptions = class CallOptions { + + static decode(reader, position) { + const $ = new $root.circle.CallOptions(); + $.subgraph = reader.uint32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.CallOptions(); + $.subgraph = reader.value(json.subgraph, 0); + return $; + } +}; + +$root.circle.PadOptions = class PadOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.PadOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.PadOptions(); + return $; + } +}; + +$root.circle.PadV2Options = class PadV2Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.PadV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.PadV2Options(); + return $; + } +}; + +$root.circle.ReshapeOptions = class ReshapeOptions { + + static decode(reader, position) { + const $ = new $root.circle.ReshapeOptions(); + $.new_shape = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ReshapeOptions(); + $.new_shape = reader.typedArray(json.new_shape, Int32Array); + return $; + } +}; + +$root.circle.SpaceToBatchNDOptions = class SpaceToBatchNDOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SpaceToBatchNDOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SpaceToBatchNDOptions(); + return $; + } +}; + +$root.circle.BatchToSpaceNDOptions = class BatchToSpaceNDOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BatchToSpaceNDOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BatchToSpaceNDOptions(); + return $; + } +}; + +$root.circle.SkipGramOptions = class SkipGramOptions { + + static decode(reader, position) { + const $ = new $root.circle.SkipGramOptions(); + $.ngram_size = reader.int32_(position, 4, 0); + $.max_skip_size = reader.int32_(position, 6, 0); + $.include_all_ngrams = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SkipGramOptions(); + $.ngram_size = reader.value(json.ngram_size, 0); + $.max_skip_size = reader.value(json.max_skip_size, 0); + $.include_all_ngrams = reader.value(json.include_all_ngrams, false); + return $; + } +}; + +$root.circle.SpaceToDepthOptions = class SpaceToDepthOptions { + + static decode(reader, position) { + const $ = new $root.circle.SpaceToDepthOptions(); + $.block_size = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SpaceToDepthOptions(); + $.block_size = reader.value(json.block_size, 0); + return $; + } +}; + +$root.circle.DepthToSpaceOptions = class DepthToSpaceOptions { + + static decode(reader, position) { + const $ = new $root.circle.DepthToSpaceOptions(); + $.block_size = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.DepthToSpaceOptions(); + $.block_size = reader.value(json.block_size, 0); + return $; + } +}; + +$root.circle.SubOptions = class SubOptions { + + static decode(reader, position) { + const $ = new $root.circle.SubOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.pot_scale_int16 = reader.bool_(position, 6, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SubOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.pot_scale_int16 = reader.value(json.pot_scale_int16, true); + return $; + } +}; + +$root.circle.DivOptions = class DivOptions { + + static decode(reader, position) { + const $ = new $root.circle.DivOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.DivOptions(); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.TopKV2Options = class TopKV2Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.TopKV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.TopKV2Options(); + return $; + } +}; + +$root.circle.CombinerType = { + SUM: 0, + MEAN: 1, + SQRTN: 2 +}; + +$root.circle.EmbeddingLookupSparseOptions = class EmbeddingLookupSparseOptions { + + static decode(reader, position) { + const $ = new $root.circle.EmbeddingLookupSparseOptions(); + $.combiner = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.EmbeddingLookupSparseOptions(); + $.combiner = $root.circle.CombinerType[json.combiner]; + return $; + } +}; + +$root.circle.GatherOptions = class GatherOptions { + + static decode(reader, position) { + const $ = new $root.circle.GatherOptions(); + $.axis = reader.int32_(position, 4, 0); + $.batch_dims = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.GatherOptions(); + $.axis = reader.value(json.axis, 0); + $.batch_dims = reader.value(json.batch_dims, 0); + return $; + } +}; + +$root.circle.TransposeOptions = class TransposeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.TransposeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.TransposeOptions(); + return $; + } +}; + +$root.circle.ExpOptions = class ExpOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.ExpOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ExpOptions(); + return $; + } +}; + +$root.circle.CosOptions = class CosOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.CosOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.CosOptions(); + return $; + } +}; + +$root.circle.ReducerOptions = class ReducerOptions { + + static decode(reader, position) { + const $ = new $root.circle.ReducerOptions(); + $.keep_dims = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ReducerOptions(); + $.keep_dims = reader.value(json.keep_dims, false); + return $; + } +}; + +$root.circle.SqueezeOptions = class SqueezeOptions { + + static decode(reader, position) { + const $ = new $root.circle.SqueezeOptions(); + $.squeeze_dims = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SqueezeOptions(); + $.squeeze_dims = reader.typedArray(json.squeeze_dims, Int32Array); + return $; + } +}; + +$root.circle.SplitOptions = class SplitOptions { + + static decode(reader, position) { + const $ = new $root.circle.SplitOptions(); + $.num_splits = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SplitOptions(); + $.num_splits = reader.value(json.num_splits, 0); + return $; + } +}; + +$root.circle.SplitVOptions = class SplitVOptions { + + static decode(reader, position) { + const $ = new $root.circle.SplitVOptions(); + $.num_splits = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SplitVOptions(); + $.num_splits = reader.value(json.num_splits, 0); + return $; + } +}; + +$root.circle.StridedSliceOptions = class StridedSliceOptions { + + static decode(reader, position) { + const $ = new $root.circle.StridedSliceOptions(); + $.begin_mask = reader.int32_(position, 4, 0); + $.end_mask = reader.int32_(position, 6, 0); + $.ellipsis_mask = reader.int32_(position, 8, 0); + $.new_axis_mask = reader.int32_(position, 10, 0); + $.shrink_axis_mask = reader.int32_(position, 12, 0); + $.offset = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.StridedSliceOptions(); + $.begin_mask = reader.value(json.begin_mask, 0); + $.end_mask = reader.value(json.end_mask, 0); + $.ellipsis_mask = reader.value(json.ellipsis_mask, 0); + $.new_axis_mask = reader.value(json.new_axis_mask, 0); + $.shrink_axis_mask = reader.value(json.shrink_axis_mask, 0); + $.offset = reader.value(json.offset, false); + return $; + } +}; + +$root.circle.LogSoftmaxOptions = class LogSoftmaxOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LogSoftmaxOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LogSoftmaxOptions(); + return $; + } +}; + +$root.circle.CastOptions = class CastOptions { + + static decode(reader, position) { + const $ = new $root.circle.CastOptions(); + $.in_data_type = reader.int8_(position, 4, 0); + $.out_data_type = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.CastOptions(); + $.in_data_type = $root.circle.TensorType[json.in_data_type]; + $.out_data_type = $root.circle.TensorType[json.out_data_type]; + return $; + } +}; + +$root.circle.DequantizeOptions = class DequantizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.DequantizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.DequantizeOptions(); + return $; + } +}; + +$root.circle.MaximumMinimumOptions = class MaximumMinimumOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.MaximumMinimumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.MaximumMinimumOptions(); + return $; + } +}; + +$root.circle.TileOptions = class TileOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.TileOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.TileOptions(); + return $; + } +}; + +$root.circle.ArgMaxOptions = class ArgMaxOptions { + + static decode(reader, position) { + const $ = new $root.circle.ArgMaxOptions(); + $.output_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ArgMaxOptions(); + $.output_type = $root.circle.TensorType[json.output_type]; + return $; + } +}; + +$root.circle.ArgMinOptions = class ArgMinOptions { + + static decode(reader, position) { + const $ = new $root.circle.ArgMinOptions(); + $.output_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ArgMinOptions(); + $.output_type = $root.circle.TensorType[json.output_type]; + return $; + } +}; + +$root.circle.GreaterOptions = class GreaterOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.GreaterOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.GreaterOptions(); + return $; + } +}; + +$root.circle.GreaterEqualOptions = class GreaterEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.GreaterEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.GreaterEqualOptions(); + return $; + } +}; + +$root.circle.LessOptions = class LessOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LessOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LessOptions(); + return $; + } +}; + +$root.circle.LessEqualOptions = class LessEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LessEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LessEqualOptions(); + return $; + } +}; + +$root.circle.NegOptions = class NegOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.NegOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.NegOptions(); + return $; + } +}; + +$root.circle.SelectOptions = class SelectOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SelectOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SelectOptions(); + return $; + } +}; + +$root.circle.SliceOptions = class SliceOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SliceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SliceOptions(); + return $; + } +}; + +$root.circle.TransposeConvOptions = class TransposeConvOptions { + + static decode(reader, position) { + const $ = new $root.circle.TransposeConvOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.fused_activation_function = reader.int8_(position, 10, 0); + $.quantized_bias_type = reader.int8_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.TransposeConvOptions(); + $.padding = $root.circle.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + $.quantized_bias_type = $root.circle.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.circle.ExpandDimsOptions = class ExpandDimsOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.ExpandDimsOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ExpandDimsOptions(); + return $; + } +}; + +$root.circle.SparseToDenseOptions = class SparseToDenseOptions { + + static decode(reader, position) { + const $ = new $root.circle.SparseToDenseOptions(); + $.validate_indices = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SparseToDenseOptions(); + $.validate_indices = reader.value(json.validate_indices, false); + return $; + } +}; + +$root.circle.EqualOptions = class EqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.EqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.EqualOptions(); + return $; + } +}; + +$root.circle.NotEqualOptions = class NotEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.NotEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.NotEqualOptions(); + return $; + } +}; + +$root.circle.ShapeOptions = class ShapeOptions { + + static decode(reader, position) { + const $ = new $root.circle.ShapeOptions(); + $.out_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ShapeOptions(); + $.out_type = $root.circle.TensorType[json.out_type]; + return $; + } +}; + +$root.circle.RankOptions = class RankOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.RankOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.RankOptions(); + return $; + } +}; + +$root.circle.PowOptions = class PowOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.PowOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.PowOptions(); + return $; + } +}; + +$root.circle.FakeQuantOptions = class FakeQuantOptions { + + static decode(reader, position) { + const $ = new $root.circle.FakeQuantOptions(); + $.min = reader.float32_(position, 4, 0); + $.max = reader.float32_(position, 6, 0); + $.num_bits = reader.int32_(position, 8, 0); + $.narrow_range = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.FakeQuantOptions(); + $.min = reader.value(json.min, 0); + $.max = reader.value(json.max, 0); + $.num_bits = reader.value(json.num_bits, 0); + $.narrow_range = reader.value(json.narrow_range, false); + return $; + } +}; + +$root.circle.PackOptions = class PackOptions { + + static decode(reader, position) { + const $ = new $root.circle.PackOptions(); + $.values_count = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.PackOptions(); + $.values_count = reader.value(json.values_count, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.circle.LogicalOrOptions = class LogicalOrOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LogicalOrOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LogicalOrOptions(); + return $; + } +}; + +$root.circle.OneHotOptions = class OneHotOptions { + + static decode(reader, position) { + const $ = new $root.circle.OneHotOptions(); + $.axis = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.OneHotOptions(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.circle.AbsOptions = class AbsOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.AbsOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.AbsOptions(); + return $; + } +}; + +$root.circle.HardSwishOptions = class HardSwishOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.HardSwishOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.HardSwishOptions(); + return $; + } +}; + +$root.circle.LogicalAndOptions = class LogicalAndOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LogicalAndOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LogicalAndOptions(); + return $; + } +}; + +$root.circle.LogicalNotOptions = class LogicalNotOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.LogicalNotOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.LogicalNotOptions(); + return $; + } +}; + +$root.circle.UnpackOptions = class UnpackOptions { + + static decode(reader, position) { + const $ = new $root.circle.UnpackOptions(); + $.num = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.UnpackOptions(); + $.num = reader.value(json.num, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.circle.FloorDivOptions = class FloorDivOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.FloorDivOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.FloorDivOptions(); + return $; + } +}; + +$root.circle.SquareOptions = class SquareOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SquareOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SquareOptions(); + return $; + } +}; + +$root.circle.ZerosLikeOptions = class ZerosLikeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.ZerosLikeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ZerosLikeOptions(); + return $; + } +}; + +$root.circle.FillOptions = class FillOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.FillOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.FillOptions(); + return $; + } +}; + +$root.circle.FloorModOptions = class FloorModOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.FloorModOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.FloorModOptions(); + return $; + } +}; + +$root.circle.RangeOptions = class RangeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.RangeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.RangeOptions(); + return $; + } +}; + +$root.circle.LeakyReluOptions = class LeakyReluOptions { + + static decode(reader, position) { + const $ = new $root.circle.LeakyReluOptions(); + $.alpha = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.LeakyReluOptions(); + $.alpha = reader.value(json.alpha, 0); + return $; + } +}; + +$root.circle.SquaredDifferenceOptions = class SquaredDifferenceOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SquaredDifferenceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SquaredDifferenceOptions(); + return $; + } +}; + +$root.circle.MirrorPadMode = { + REFLECT: 0, + SYMMETRIC: 1 +}; + +$root.circle.MirrorPadOptions = class MirrorPadOptions { + + static decode(reader, position) { + const $ = new $root.circle.MirrorPadOptions(); + $.mode = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.MirrorPadOptions(); + $.mode = $root.circle.MirrorPadMode[json.mode]; + return $; + } +}; + +$root.circle.UniqueOptions = class UniqueOptions { + + static decode(reader, position) { + const $ = new $root.circle.UniqueOptions(); + $.idx_out_type = reader.int8_(position, 4, 2); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.UniqueOptions(); + $.idx_out_type = $root.circle.TensorType[json.idx_out_type]; + return $; + } +}; + +$root.circle.ReverseV2Options = class ReverseV2Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.ReverseV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ReverseV2Options(); + return $; + } +}; + +$root.circle.AddNOptions = class AddNOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.AddNOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.AddNOptions(); + return $; + } +}; + +$root.circle.GatherNdOptions = class GatherNdOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.GatherNdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.GatherNdOptions(); + return $; + } +}; + +$root.circle.WhereOptions = class WhereOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.WhereOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.WhereOptions(); + return $; + } +}; + +$root.circle.ReverseSequenceOptions = class ReverseSequenceOptions { + + static decode(reader, position) { + const $ = new $root.circle.ReverseSequenceOptions(); + $.seq_dim = reader.int32_(position, 4, 0); + $.batch_dim = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ReverseSequenceOptions(); + $.seq_dim = reader.value(json.seq_dim, 0); + $.batch_dim = reader.value(json.batch_dim, 0); + return $; + } +}; + +$root.circle.MatrixDiagOptions = class MatrixDiagOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.MatrixDiagOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.MatrixDiagOptions(); + return $; + } +}; + +$root.circle.QuantizeOptions = class QuantizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.QuantizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.QuantizeOptions(); + return $; + } +}; + +$root.circle.MatrixSetDiagOptions = class MatrixSetDiagOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.MatrixSetDiagOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.MatrixSetDiagOptions(); + return $; + } +}; + +$root.circle.IfOptions = class IfOptions { + + static decode(reader, position) { + const $ = new $root.circle.IfOptions(); + $.then_subgraph_index = reader.int32_(position, 4, 0); + $.else_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.IfOptions(); + $.then_subgraph_index = reader.value(json.then_subgraph_index, 0); + $.else_subgraph_index = reader.value(json.else_subgraph_index, 0); + return $; + } +}; + +$root.circle.CallOnceOptions = class CallOnceOptions { + + static decode(reader, position) { + const $ = new $root.circle.CallOnceOptions(); + $.init_subgraph_index = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.CallOnceOptions(); + $.init_subgraph_index = reader.value(json.init_subgraph_index, 0); + return $; + } +}; + +$root.circle.WhileOptions = class WhileOptions { + + static decode(reader, position) { + const $ = new $root.circle.WhileOptions(); + $.cond_subgraph_index = reader.int32_(position, 4, 0); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.WhileOptions(); + $.cond_subgraph_index = reader.value(json.cond_subgraph_index, 0); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.circle.NonMaxSuppressionV4Options = class NonMaxSuppressionV4Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.NonMaxSuppressionV4Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.NonMaxSuppressionV4Options(); + return $; + } +}; + +$root.circle.NonMaxSuppressionV5Options = class NonMaxSuppressionV5Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.NonMaxSuppressionV5Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.NonMaxSuppressionV5Options(); + return $; + } +}; + +$root.circle.ScatterNdOptions = class ScatterNdOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.ScatterNdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ScatterNdOptions(); + return $; + } +}; + +$root.circle.SelectV2Options = class SelectV2Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.SelectV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SelectV2Options(); + return $; + } +}; + +$root.circle.DensifyOptions = class DensifyOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.DensifyOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.DensifyOptions(); + return $; + } +}; + +$root.circle.SegmentSumOptions = class SegmentSumOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SegmentSumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SegmentSumOptions(); + return $; + } +}; + +$root.circle.BatchMatMulOptions = class BatchMatMulOptions { + + static decode(reader, position) { + const $ = new $root.circle.BatchMatMulOptions(); + $.adjoint_lhs = reader.bool_(position, 4, false); + $.adjoint_rhs = reader.bool_(position, 6, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BatchMatMulOptions(); + $.adjoint_lhs = reader.value(json.adjoint_lhs, false); + $.adjoint_rhs = reader.value(json.adjoint_rhs, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.circle.CumsumOptions = class CumsumOptions { + + static decode(reader, position) { + const $ = new $root.circle.CumsumOptions(); + $.exclusive = reader.bool_(position, 4, false); + $.reverse = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.CumsumOptions(); + $.exclusive = reader.value(json.exclusive, false); + $.reverse = reader.value(json.reverse, false); + return $; + } +}; + +$root.circle.BroadcastToOptions = class BroadcastToOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BroadcastToOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BroadcastToOptions(); + return $; + } +}; + +$root.circle.Rfft2dOptions = class Rfft2dOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.Rfft2dOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.Rfft2dOptions(); + return $; + } +}; + +$root.circle.HashtableOptions = class HashtableOptions { + + static decode(reader, position) { + const $ = new $root.circle.HashtableOptions(); + $.table_id = reader.int32_(position, 4, 0); + $.key_dtype = reader.int8_(position, 6, 0); + $.value_dtype = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.HashtableOptions(); + $.table_id = reader.value(json.table_id, 0); + $.key_dtype = $root.circle.TensorType[json.key_dtype]; + $.value_dtype = $root.circle.TensorType[json.value_dtype]; + return $; + } +}; + +$root.circle.HashtableFindOptions = class HashtableFindOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.HashtableFindOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.HashtableFindOptions(); + return $; + } +}; + +$root.circle.HashtableImportOptions = class HashtableImportOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.HashtableImportOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.HashtableImportOptions(); + return $; + } +}; + +$root.circle.HashtableSizeOptions = class HashtableSizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.HashtableSizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.HashtableSizeOptions(); + return $; + } +}; + +$root.circle.VarHandleOptions = class VarHandleOptions { + + static decode(reader, position) { + const $ = new $root.circle.VarHandleOptions(); + $.container = reader.string_(position, 4, null); + $.shared_name = reader.string_(position, 6, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.VarHandleOptions(); + $.container = reader.value(json.container, null); + $.shared_name = reader.value(json.shared_name, null); + return $; + } +}; + +$root.circle.ReadVariableOptions = class ReadVariableOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.ReadVariableOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ReadVariableOptions(); + return $; + } +}; + +$root.circle.AssignVariableOptions = class AssignVariableOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.AssignVariableOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.AssignVariableOptions(); + return $; + } +}; + +$root.circle.RandomOptions = class RandomOptions { + + static decode(reader, position) { + const $ = new $root.circle.RandomOptions(); + $.seed = reader.int64_(position, 4, 0); + $.seed2 = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.RandomOptions(); + $.seed = reader.value(json.seed, 0); + $.seed2 = reader.value(json.seed2, 0); + return $; + } +}; + +$root.circle.BucketizeOptions = class BucketizeOptions { + + static decode(reader, position) { + const $ = new $root.circle.BucketizeOptions(); + $.boundaries = reader.typedArray(position, 4, Float32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BucketizeOptions(); + $.boundaries = reader.typedArray(json.boundaries, Float32Array); + return $; + } +}; + +$root.circle.GeluOptions = class GeluOptions { + + static decode(reader, position) { + const $ = new $root.circle.GeluOptions(); + $.approximate = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.GeluOptions(); + $.approximate = reader.value(json.approximate, false); + return $; + } +}; + +$root.circle.DynamicUpdateSliceOptions = class DynamicUpdateSliceOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.DynamicUpdateSliceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.DynamicUpdateSliceOptions(); + return $; + } +}; + +$root.circle.UnsortedSegmentProdOptions = class UnsortedSegmentProdOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.UnsortedSegmentProdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.UnsortedSegmentProdOptions(); + return $; + } +}; + +$root.circle.UnsortedSegmentMaxOptions = class UnsortedSegmentMaxOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.UnsortedSegmentMaxOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.UnsortedSegmentMaxOptions(); + return $; + } +}; + +$root.circle.UnsortedSegmentSumOptions = class UnsortedSegmentSumOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.UnsortedSegmentSumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.UnsortedSegmentSumOptions(); + return $; + } +}; + +$root.circle.ATan2Options = class ATan2Options { + + static decode(/* reader, position */) { + const $ = new $root.circle.ATan2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.ATan2Options(); + return $; + } +}; + +$root.circle.UnsortedSegmentMinOptions = class UnsortedSegmentMinOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.UnsortedSegmentMinOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.UnsortedSegmentMinOptions(); + return $; + } +}; + +$root.circle.SignOptions = class SignOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SignOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SignOptions(); + return $; + } +}; + +$root.circle.BitcastOptions = class BitcastOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BitcastOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BitcastOptions(); + return $; + } +}; + +$root.circle.BitwiseXorOptions = class BitwiseXorOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BitwiseXorOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BitwiseXorOptions(); + return $; + } +}; + +$root.circle.RightShiftOptions = class RightShiftOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.RightShiftOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.RightShiftOptions(); + return $; + } +}; + +$root.circle.DilateOptions = class DilateOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.DilateOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.DilateOptions(); + return $; + } +}; + +$root.circle.ReduceWindowFunction = { + UNSUPPORTED: 0, + ADD: 1, + MUL: 2, + MINIMUM: 3, + MAXIMUM: 4, + ALL: 5, + ANY: 6 +}; + +$root.circle.ReduceWindowOptions = class ReduceWindowOptions { + + static decode(reader, position) { + const $ = new $root.circle.ReduceWindowOptions(); + $.reduce_function = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.ReduceWindowOptions(); + $.reduce_function = $root.circle.ReduceWindowFunction[json.reduce_function]; + return $; + } +}; + +$root.circle.BCQGatherOptions = class BCQGatherOptions { + + static decode(reader, position) { + const $ = new $root.circle.BCQGatherOptions(); + $.input_hidden_size = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BCQGatherOptions(); + $.input_hidden_size = reader.value(json.input_hidden_size, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.circle.BCQFullyConnectedOptions = class BCQFullyConnectedOptions { + + static decode(reader, position) { + const $ = new $root.circle.BCQFullyConnectedOptions(); + $.weights_hidden_size = reader.int32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.BCQFullyConnectedOptions(); + $.weights_hidden_size = reader.value(json.weights_hidden_size, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.InstanceNormOptions = class InstanceNormOptions { + + static decode(reader, position) { + const $ = new $root.circle.InstanceNormOptions(); + $.epsilon = reader.float32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.InstanceNormOptions(); + $.epsilon = reader.value(json.epsilon, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.circle.OperatorCode = class OperatorCode { + + static decode(reader, position) { + const $ = new $root.circle.OperatorCode(); + $.deprecated_builtin_code = reader.int8_(position, 4, 0); + $.custom_code = reader.string_(position, 6, null); + $.version = reader.int32_(position, 8, 1); + $.builtin_code = reader.int32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.OperatorCode(); + $.deprecated_builtin_code = reader.value(json.deprecated_builtin_code, 0); + $.custom_code = reader.value(json.custom_code, null); + $.version = reader.value(json.version, 1); + $.builtin_code = $root.circle.BuiltinOperator[json.builtin_code]; + return $; + } +}; + +$root.circle.CustomOptionsFormat = { + FLEXBUFFERS: 0 +}; + +$root.circle.DataFormat = { + CHANNELS_LAST: 0, + CHANNELS_FIRST: 1 +}; + +$root.circle.Operator = class Operator { + + static decode(reader, position) { + const $ = new $root.circle.Operator(); + $.opcode_index = reader.uint32_(position, 4, 0); + $.inputs = reader.typedArray(position, 6, Int32Array); + $.outputs = reader.typedArray(position, 8, Int32Array); + $.builtin_options = reader.union(position, 10, $root.circle.BuiltinOptions.decode); + $.custom_options = reader.typedArray(position, 14, Uint8Array); + $.custom_options_format = reader.int8_(position, 16, 0); + $.mutating_variable_inputs = reader.bools_(position, 18); + $.intermediates = reader.typedArray(position, 20, Int32Array); + $.large_custom_options_offset = reader.uint64_(position, 22, 0); + $.large_custom_options_size = reader.uint64_(position, 24, 0); + $.builtin_options_2 = reader.union(position, 26, $root.circle.BuiltinOptions2.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Operator(); + $.opcode_index = reader.value(json.opcode_index, 0); + $.inputs = reader.typedArray(json.inputs, Int32Array); + $.outputs = reader.typedArray(json.outputs, Int32Array); + $.builtin_options = $root.circle.BuiltinOptions.decodeText(reader, json.builtin_options, json.builtin_options_type); + $.custom_options = reader.typedArray(json.custom_options, Uint8Array); + $.custom_options_format = $root.circle.CustomOptionsFormat[json.custom_options_format]; + $.mutating_variable_inputs = reader.array(json.mutating_variable_inputs); + $.intermediates = reader.typedArray(json.intermediates, Int32Array); + $.large_custom_options_offset = reader.value(json.large_custom_options_offset, 0); + $.large_custom_options_size = reader.value(json.large_custom_options_size, 0); + $.builtin_options_2 = $root.circle.BuiltinOptions2.decodeText(reader, json.builtin_options_2, json.builtin_options_2_type); + return $; + } +}; + +$root.circle.SubGraph = class SubGraph { + + static decode(reader, position) { + const $ = new $root.circle.SubGraph(); + $.tensors = reader.tableArray(position, 4, $root.circle.Tensor.decode); + $.inputs = reader.typedArray(position, 6, Int32Array); + $.outputs = reader.typedArray(position, 8, Int32Array); + $.operators = reader.tableArray(position, 10, $root.circle.Operator.decode); + $.name = reader.string_(position, 12, null); + $.deprecated_data_format = reader.int8_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SubGraph(); + $.tensors = reader.objectArray(json.tensors, $root.circle.Tensor.decodeText); + $.inputs = reader.typedArray(json.inputs, Int32Array); + $.outputs = reader.typedArray(json.outputs, Int32Array); + $.operators = reader.objectArray(json.operators, $root.circle.Operator.decodeText); + $.name = reader.value(json.name, null); + $.deprecated_data_format = $root.circle.DataFormat[json.deprecated_data_format]; + return $; + } +}; + +$root.circle.Buffer = class Buffer { + + static decode(reader, position) { + const $ = new $root.circle.Buffer(); + $.data = reader.typedArray(position, 4, Uint8Array); + $.offset = reader.uint64_(position, 6, 0); + $.size = reader.uint64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Buffer(); + $.data = reader.typedArray(json.data, Uint8Array); + $.offset = reader.value(json.offset, 0); + $.size = reader.value(json.size, 0); + return $; + } +}; + +$root.circle.Metadata = class Metadata { + + static decode(reader, position) { + const $ = new $root.circle.Metadata(); + $.name = reader.string_(position, 4, null); + $.buffer = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Metadata(); + $.name = reader.value(json.name, null); + $.buffer = reader.value(json.buffer, 0); + return $; + } +}; + +$root.circle.TensorMap = class TensorMap { + + static decode(reader, position) { + const $ = new $root.circle.TensorMap(); + $.name = reader.string_(position, 4, null); + $.tensor_index = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.TensorMap(); + $.name = reader.value(json.name, null); + $.tensor_index = reader.value(json.tensor_index, 0); + return $; + } +}; + +$root.circle.SignatureDef = class SignatureDef { + + static decode(reader, position) { + const $ = new $root.circle.SignatureDef(); + $.inputs = reader.tableArray(position, 4, $root.circle.TensorMap.decode); + $.outputs = reader.tableArray(position, 6, $root.circle.TensorMap.decode); + $.signature_key = reader.string_(position, 8, null); + $.deprecated_tag = reader.string_(position, 10, null); + $.subgraph_index = reader.uint32_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.SignatureDef(); + $.inputs = reader.objectArray(json.inputs, $root.circle.TensorMap.decodeText); + $.outputs = reader.objectArray(json.outputs, $root.circle.TensorMap.decodeText); + $.signature_key = reader.value(json.signature_key, null); + $.deprecated_tag = reader.value(json.deprecated_tag, null); + $.subgraph_index = reader.value(json.subgraph_index, 0); + return $; + } +}; + +$root.circle.Model = class Model { + + static identifier(reader) { + return reader.identifier === 'CIR0'; + } + + static create(reader) { + return $root.circle.Model.decode(reader, reader.root); + } + + static createText(reader) { + return $root.circle.Model.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.circle.Model(); + $.version = reader.uint32_(position, 4, 0); + $.operator_codes = reader.tableArray(position, 6, $root.circle.OperatorCode.decode); + $.subgraphs = reader.tableArray(position, 8, $root.circle.SubGraph.decode); + $.description = reader.string_(position, 10, null); + $.buffers = reader.tableArray(position, 12, $root.circle.Buffer.decode); + $.metadata_buffer = reader.typedArray(position, 14, Int32Array); + $.metadata = reader.tableArray(position, 16, $root.circle.Metadata.decode); + $.signature_defs = reader.tableArray(position, 18, $root.circle.SignatureDef.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.Model(); + $.version = reader.value(json.version, 0); + $.operator_codes = reader.objectArray(json.operator_codes, $root.circle.OperatorCode.decodeText); + $.subgraphs = reader.objectArray(json.subgraphs, $root.circle.SubGraph.decodeText); + $.description = reader.value(json.description, null); + $.buffers = reader.objectArray(json.buffers, $root.circle.Buffer.decodeText); + $.metadata_buffer = reader.typedArray(json.metadata_buffer, Int32Array); + $.metadata = reader.objectArray(json.metadata, $root.circle.Metadata.decodeText); + $.signature_defs = reader.objectArray(json.signature_defs, $root.circle.SignatureDef.decodeText); + return $; + } +}; diff --git a/circle.js b/circle.js new file mode 100644 index 00000000000..23c326cef04 --- /dev/null +++ b/circle.js @@ -0,0 +1,629 @@ + +import * as flatbuffers from '../source/flatbuffers.js'; +import * as flexbuffers from '../source/flexbuffers.js'; +import * as zip from '../source/zip.js'; + +const circle = {}; + +circle.ModelFactory = class { + + match(context) { + const tags = context.tags('flatbuffers'); + if (tags.get('file_identifier') === 'CIR0') { + return 'circle.flatbuffers'; + } + const obj = context.peek('json'); + if (obj && obj.subgraphs && obj.operator_codes) { + return 'circle.flatbuffers.json'; + } + return undefined; + } + + async open(context, target) { + await context.require('./circle-schema'); + circle.schema = flatbuffers.get('circle').circle; + let model = null; + const attachments = new Map(); + switch (target) { + case 'circle.flatbuffers.json': { + try { + const obj = context.peek('json'); + const reader = new flatbuffers.TextReader(obj); + model = circle.schema.Model.createText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new circle.Error(`File text format is not circle.Model (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'circle.flatbuffers': { + const stream = context.stream; + try { + const reader = flatbuffers.BinaryReader.open(stream); + model = circle.schema.Model.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new circle.Error(`File format is not circle.Model (${message.replace(/\.$/, '')}).`); + } + try { + const archive = zip.Archive.open(stream); + if (archive) { + for (const [name, value] of archive.entries) { + attachments.set(name, value); + } + } + } catch (error) { + // continue regardless of error + } + break; + } + default: { + throw new circle.Error(`Unsupported Circle format '${target}'.`); + } + } + const metadata = await context.metadata('circle-metadata.json'); + return new circle.Model(metadata, model); + } +}; + +circle.Model = class { + + constructor(metadata, model) { + this._graphs = []; + this._format = 'Circle'; + this._format = `${this._format} v${model.version}`; + this._description = model.description || ''; + this._metadata = new Map(); + const builtinOperators = new Map(); + const upperCase = new Set([ '2D', 'LSH', 'SVDF', 'RNN', 'L2', 'LSTM' ]); + for (const key of Object.keys(circle.schema.BuiltinOperator)) { + const value = key === 'BATCH_MATMUL' ? 'BATCH_MAT_MUL' : key; + const name = value.split('_').map((s) => (s.length < 1 || upperCase.has(s)) ? s : s[0] + s.substring(1).toLowerCase()).join(''); + const index = circle.schema.BuiltinOperator[key]; + builtinOperators.set(index, name); + } + const operators = model.operator_codes.map((operator) => { + const code = operator.builtin_code || 0; + const version = operator.version; + const custom = code === circle.schema.BuiltinOperator.CUSTOM; + const name = custom ? operator.custom_code ? operator.custom_code : 'Custom' : builtinOperators.has(code) ? builtinOperators.get(code) : code.toString(); + return custom ? { name: name, version: version, custom: true } : { name: name, version: version }; + }); + let modelMetadata = null; + for (const metadata of model.metadata) { + const buffer = model.buffers[metadata.buffer]; + if (buffer) { + switch (metadata.name) { + case 'min_runtime_version': { + const data = buffer.data || new Uint8Array(0); + this._runtime = new TextDecoder().decode(data); + break; + } + case 'TFLITE_METADATA': { + const data = buffer.data || new Uint8Array(0); + const reader = flatbuffers.BinaryReader.open(data); + if (circle.schema.ModelMetadata.identifier(reader)) { + modelMetadata = circle.schema.ModelMetadata.create(reader); + if (modelMetadata.name) { + this._name = modelMetadata.name; + } + if (modelMetadata.version) { + this._version = modelMetadata.version; + } + if (modelMetadata.description) { + this._description = this._description ? [ this._description, modelMetadata.description].join(' ') : modelMetadata.description; + } + if (modelMetadata.author) { + this._metadata.set('author', modelMetadata.author); + } + if (modelMetadata.license) { + this._metadata.set('license', modelMetadata.license); + } + } + break; + } + default: { + break; + } + } + } + } + const subgraphs = model.subgraphs; + const subgraphsMetadata = modelMetadata ? modelMetadata.subgraph_metadata : null; + for (let i = 0; i < subgraphs.length; i++) { + const subgraph = subgraphs[i]; + const name = subgraphs.length > 1 ? i.toString() : ''; + const subgraphMetadata = subgraphsMetadata && i < subgraphsMetadata.length ? subgraphsMetadata[i] : null; + this._graphs.push(new circle.Graph(metadata, subgraph, subgraphMetadata, name, operators, model)); + } + } + + get format() { + return this._format; + } + + get runtime() { + return this._runtime; + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +circle.Graph = class { + + constructor(metadata, subgraph, subgraphMetadata, name, operators, model) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + this._name = subgraph.name || name; + const tensors = new Map(); + const args = (index) => { + if (index === -1) { + return null; + } + if (!tensors.has(index)) { + if (index < subgraph.tensors.length) { + const tensor = subgraph.tensors[index]; + const buffer = model.buffers[tensor.buffer]; + const is_variable = tensor.is_variable; + const data = buffer ? buffer.data : null; + const initializer = (data && data.length > 0) || is_variable ? new circle.Tensor(index, tensor, buffer, is_variable) : null; + tensors.set(index, new circle.Value(index, tensor, initializer)); + } else { + tensors.set(index, new circle.Value(index, { name: '' }, null)); + } + } + return tensors.get(index); + }; + for (let i = 0; i < subgraph.operators.length; i++) { + const node = subgraph.operators[i]; + const index = node.opcode_index; + const operator = index < operators.length ? operators[index] : { name: `(${index})` }; + this._nodes.push(new circle.Node(metadata, node, operator, i.toString(), args)); + } + const applyTensorMetadata = (argument, tensorMetadata) => { + if (tensorMetadata) { + const description = tensorMetadata.description; + if (description) { + argument.description = description; + } + const content = tensorMetadata.content; + if (argument.type && content) { + let denotation = null; + const contentProperties = content.content_properties; + if (contentProperties instanceof circle.schema.FeatureProperties) { + denotation = 'Feature'; + } else if (contentProperties instanceof circle.schema.ImageProperties) { + denotation = 'Image'; + switch (contentProperties.color_space) { + case 0: denotation += '(Unknown)'; break; + case 1: denotation += '(RGB)'; break; + case 2: denotation += '(Grayscale)'; break; + default: throw circle.Error(`Unsupported image color space '${contentProperties.color_space}'.`); + } + } else if (contentProperties instanceof circle.schema.BoundingBoxProperties) { + denotation = 'BoundingBox'; + } else if (contentProperties instanceof circle.schema.AudioProperties) { + denotation = `Audio(${contentProperties.sample_rate},${contentProperties.channels})`; + } + if (denotation) { + argument.type.denotation = denotation; + } + } + } + }; + const inputs = subgraph.inputs; + for (let i = 0; i < inputs.length; i++) { + const input = inputs[i]; + const value = args(input); + if (subgraphMetadata && i < subgraphMetadata.input_tensor_metadata.length) { + applyTensorMetadata(value, subgraphMetadata.input_tensor_metadata[i]); + } + this._inputs.push(new circle.Argument(value ? value.name : '?', true, value ? [ value ] : [])); + } + const outputs = subgraph.outputs; + for (let i = 0; i < outputs.length; i++) { + const output = outputs[i]; + const value = args(output); + if (subgraphMetadata && i < subgraphMetadata.output_tensor_metadata.length) { + applyTensorMetadata(value, subgraphMetadata.output_tensor_metadata[i]); + } + this._outputs.push(new circle.Argument(value ? value.name : '?', true, value ? [ value ] : [])); + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +circle.Node = class { + + constructor(metadata, node, type, location, args) { + this._location = location; + this._type = type.custom ? { name: type.name, category: 'custom' } : metadata.type(type.name); + this._inputs = []; + this._outputs = []; + this._attributes = []; + if (node) { + let inputs = []; + let outputs = []; + inputs = Array.from(node.inputs || new Int32Array(0)); + outputs = Array.from(node.outputs || new Int32Array(0)); + let inputIndex = 0; + while (inputIndex < inputs.length) { + let count = 1; + let inputName = null; + let inputVisible = true; + const inputArguments = []; + if (this._type && this._type.inputs && inputIndex < this._type.inputs.length) { + const input = this._type.inputs[inputIndex]; + inputName = input.name; + if (input.option == 'variadic') { + count = inputs.length - inputIndex; + } + if (input && input.visible === false) { + inputVisible = false; + } + } + const inputArray = inputs.slice(inputIndex, inputIndex + count); + for (const index of inputArray) { + const value = args(index); + if (value) { + inputArguments.push(value); + } + } + inputIndex += count; + inputName = inputName ? inputName : inputIndex.toString(); + this._inputs.push(new circle.Argument(inputName, inputVisible, inputArguments)); + } + for (let k = 0; k < outputs.length; k++) { + const index = outputs[k]; + const outputArguments = []; + const value = args(index); + if (value) { + outputArguments.push(value); + } + let outputName = k.toString(); + if (this._type && this._type.outputs && k < this._type.outputs.length) { + const output = this._type.outputs[k]; + if (output && output.name) { + outputName = output.name; + } + } + this._outputs.push(new circle.Argument(outputName, true, outputArguments)); + } + if (type.custom && node.custom_options.length > 0) { + let decoded = false; + if (node.custom_options_format === circle.schema.CustomOptionsFormat.FLEXBUFFERS) { + try { + const reader = flexbuffers.BinaryReader.open(node.custom_options); + if (reader) { + const custom_options = reader.read(); + if (Array.isArray(custom_options)) { + const attribute = new circle.Attribute(null, 'custom_options', custom_options); + this._attributes.push(attribute); + decoded = true; + } else if (custom_options) { + for (const [key, value] of Object.entries(custom_options)) { + const schema = metadata.attribute(type.name, key); + const attribute = new circle.Attribute(schema, key, value); + this._attributes.push(attribute); + } + decoded = true; + } + } + } catch (err) { + // continue regardless of error + } + } + if (!decoded) { + const schema = metadata.attribute(type.name, 'custom'); + this._attributes.push(new circle.Attribute(schema, 'custom', Array.from(node.custom_options))); + } + } + const options = node.builtin_options; + if (options) { + for (const [name, value] of Object.entries(options)) { + if (name === 'fused_activation_function' && value !== 0) { + const activationFunctionMap = { 1: 'Relu', 2: 'ReluN1To1', 3: 'Relu6', 4: 'Tanh', 5: 'SignBit' }; + if (!activationFunctionMap[value]) { + throw new circle.Error(`Unsupported activation funtion index '${JSON.stringify(value)}'.`); + } + const type = activationFunctionMap[value]; + this._chain = [ new circle.Node(metadata, null, { name: type }, null, []) ]; + } + const schema = metadata.attribute(type.name, name); + this._attributes.push(new circle.Attribute(schema, name, value)); + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return ''; + } + + get location() { + return this._location; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } + + get attributes() { + return this._attributes; + } +}; + +circle.Attribute = class { + + constructor(metadata, name, value) { + this._name = name; + this._value = ArrayBuffer.isView(value) ? Array.from(value) : value; + this._type = metadata && metadata.type ? metadata.type : null; + if (this._name === 'fused_activation_function') { + this._visible = false; + } + if (this._type) { + this._value = circle.Utility.enum(this._type, this._value); + } + if (metadata) { + if (metadata.visible === false) { + this._visible = false; + } else if (metadata.default !== undefined) { + value = this._value; + if (typeof value === 'function') { + value = value(); + } + if (value === metadata.default) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +circle.Argument = class { + + constructor(name, visible, value) { + this._name = name; + this._visible = visible; + this._value = value; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get value() { + return this._value; + } +}; + +circle.Value = class { + + constructor(index, tensor, initializer) { + const name = tensor.name || ''; + this.name = `${name}\n${index}`; + this.location = index.toString(); + this.type = tensor.type !== undefined && tensor.shape !== undefined ? new circle.TensorType(tensor) : null; + this.initializer = initializer; + const quantization = tensor.quantization; + if (quantization && (quantization.scale.length > 0 || quantization.zero_point.length > 0 || quantization.min.length > 0 || quantization.max.length)) { + this.quantization = { + type: 'linear', + dimension: quantization.quantized_dimension, + scale: quantization.scale, + offset: quantization.zero_point.map((value) => value.toNumber()), + min: quantization.min, + max: quantization.max + }; + } + } +}; + +circle.Tensor = class { + + constructor(index, tensor, buffer, is_variable) { + this._location = index.toString(); + this._type = new circle.TensorType(tensor); + this._is_variable = is_variable; + this._name = tensor.name; + this._data = buffer.data.slice(0); + } + + get category() { + return this._is_variable ? 'Variable' : ''; + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get encoding() { + switch (this._type.dataType) { + case 'string': return '|'; + default: return '<'; + } + } + + get values() { + switch (this._type.dataType) { + case 'string': { + let offset = 0; + const data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + const count = data.getInt32(0, true); + offset += 4; + const offsetTable = []; + for (let j = 0; j < count; j++) { + offsetTable.push(data.getInt32(offset, true)); + offset += 4; + } + offsetTable.push(this._data.length); + const stringTable = []; + const utf8Decoder = new TextDecoder('utf-8'); + for (let k = 0; k < count; k++) { + const textArray = this._data.subarray(offsetTable[k], offsetTable[k + 1]); + stringTable.push(utf8Decoder.decode(textArray)); + } + return stringTable; + } + default: return this._data; + } + } +}; + +circle.TensorType = class { + + constructor(tensor) { + this._dataType = circle.Utility.dataType(tensor.type); + this._shape = new circle.TensorShape(Array.from(tensor.shape || [])); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + set denotation(value) { + this._denotation = value; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +circle.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +circle.Utility = class { + + static dataType(type) { + if (!circle.Utility._tensorTypeMap) { + circle.Utility._tensorTypeMap = new Map(Object.entries(circle.schema.TensorType).map(([key, value]) => [ value, key.toLowerCase() ])); + circle.Utility._tensorTypeMap.set(6, 'boolean'); + } + return circle.Utility._tensorTypeMap.has(type) ? circle.Utility._tensorTypeMap.get(type) : '?'; + } + + static enum(name, value) { + const type = name && circle.schema ? circle.schema[name] : undefined; + if (type) { + circle.Utility._enums = circle.Utility._enums || new Map(); + if (!circle.Utility._enums.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + circle.Utility._enums.set(name, entries); + } + const map = circle.Utility._enums.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } +}; + +circle.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Circle model.'; + } +}; + +export const ModelFactory = circle.ModelFactory; diff --git a/cntk-metadata.json b/cntk-metadata.json new file mode 100644 index 00000000000..44e012f79a7 --- /dev/null +++ b/cntk-metadata.json @@ -0,0 +1,795 @@ +[ + { + "name": "Abs", + "identifier": 8 + }, + { + "name": "Acos", + "identifier": 82 + }, + { + "name": "Asin", + "identifier": 81 + }, + { + "name": "Asinh", + "identifier": 86 + }, + { + "name": "Assign", + "identifier": 73 + }, + { + "name": "Atan", + "identifier": 96 + }, + { + "name": "Atanh", + "identifier": 85 + }, + { + "name": "AveragePooling", + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BatchNormalization", + "identifier": 40, + "category": "Normalization", + "attributes": [ + { "name": "disableRegularization", "default": false }, + { "name": "useCuDNNEngine", "visible": false }, + { "name": "useCntkEngine", "visible": false }, + { "name": "runCountUntied", "visible": false }, + { "name": "epsilon", "default": 0.00001 }, + { "name": "normalizationTimeConstant", "default": 0 }, + { "name": "disableRegularization", "default": false }, + { "name": "blendTimeConstant", "default": 0 }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "count" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Block", + "identifier": 57 + }, + { + "name": "Cast", + "identifier": 91 + }, + { + "name": "ClassificationError", + "identifier": 36, + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Clip", + "identifier": 41 + }, + { + "name": "Combine", + "identifier": 44, + "category": "Tensor", + "inputs": [ + { "name": "inputs", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConstantOp", + "identifier": 89 + }, + { + "name": "Convolution", + "identifier": 33, + "category": "Layer", + "attributes": [ + { "name": "transpose", "default": false }, + { "name": "maxTempMemSizeInSamples", "default": 0 }, + { "name": "dilation", "default": [ 1, null ] }, + { "name": "outputShape", "default": 0 }, + { "name": "sharing", "default": [ true, null ] }, + { "name": "strides", "default": [ 1, null ] }, + { "name": "includePad", "default": false }, + { "name": "ceilOutDim", "default": false }, + { "name": "autoPadding", "default": [ true, null ] }, + { "name": "lowerPad", "default": [ 0, null ] }, + { "name": "upperPad", "default": [ 0, null ] }, + { "name": "convolution2D", "visible": false }, + { "name": "poolKind", "type": "PoolKind", "default": "None" }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "W" }, + { "name": "b" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvolutionSequenceShape", + "identifier": 97 + }, + { + "name": "Cos", + "identifier": 55 + }, + { + "name": "CosDistance", + "identifier": 53 + }, + { + "name": "CosDistanceWithNegativeSamples", + "identifier": 67 + }, + { + "name": "Cosh", + "identifier": 78 + }, + { + "name": "Crop", + "identifier": 84, + "category": "Data" + }, + { + "name": "CrossEntropyWithSoftmax", + "identifier": 35, + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "CustomProxyOp", + "identifier": 93 + }, + { + "name": "Dropout", + "identifier": 15, + "category": "Dropout", + "attributes": [ + { "name": "rngSeed", "visible": false }, + { "name": "rngOffset", "visible": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "EditDistanceError", + "identifier": 61 + }, + { + "name": "ElementTimes", + "identifier": 21, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "ELU", + "identifier": 65 + }, + { + "name": "Equal", + "identifier": 22, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Exp", + "identifier": 4, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "EyeLikeOp", + "identifier": 92 + }, + { + "name": "Floor", + "identifier": 7, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ForwardBackward", + "identifier": 66 + }, + { + "name": "FutureValue", + "identifier": 38, + "attributes": [ + { "name": "offset", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "initialState" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Gather", + "identifier": 74, + "category": "Transform" + }, + { + "name": "GatherPacked", + "identifier": 29, + "inputs": [ + { "name": "index" }, + { "name": "source" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Greater", + "identifier": 26, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "GreaterEqual", + "identifier": 27, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Hardmax", + "identifier": 11, + "category": "Activation" + }, + { + "name": "InvStdDev", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LabelsToGraph", + "identifier": 63 + }, + { + "name": "LambdaRank", + "identifier": 59 + }, + { + "name": "LatticeSequenceWithSoftmax", + "identifier": 90 + }, + { + "name": "Less", + "identifier": 24, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "LessEqual", + "identifier": 25, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Log", + "identifier": 5, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Logistic", + "identifier": 48 + }, + { + "name": "LogPlus", + "identifier": 52 + }, + { + "name": "LogSoftmax", + "identifier": 51 + }, + { + "name": "MaxPooling", + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Mean", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Minus", + "identifier": 20, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "NDCG", + "identifier": 60 + }, + { + "name": "Negate", + "identifier": 0, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NoOp", + "identifier": 62, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NotEqual", + "identifier": 23, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "OneHot", + "identifier": 68 + }, + { + "name": "OptimizedRNNStack", + "identifier": 49 + }, + { + "name": "PackedIndex", + "identifier": 28, + "inputs": [ + { "name": "source" }, + { "name": "index" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pad", + "identifier": 83, + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pass", + "identifier": 56 + }, + { + "name": "PastValue", + "identifier": 37, + "attributes": [ + { "name": "offset", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "initialState" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Plus", + "identifier": 19, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Pooling", + "identifier": 17, + "category": "Pool", + "attributes": [ + { "name": "transpose", "default": false }, + { "name": "includePad", "default": false }, + { "name": "ceilOutDim", "default": false }, + { "name": "autoPadding", "default": [ false, null ] }, + { "name": "sharing", "default": [ true, null ] }, + { "name": "strides", "default": [ 1, null ] }, + { "name": "lowerPad", "default": [ 0, null ] }, + { "name": "upperPad", "default": [ 0, null ] }, + { "name": "outputShape", "default": 0 }, + { "name": "maxTempMemSizeInSamples", "default": 0 }, + { "name": "poolingType", "type": "PoolingType", "default": "Max" }, + { "name": "poolKind", "type": "PoolKind", "default": "None" }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pow", + "identifier": 69 + }, + { + "name": "RandomDistribution", + "identifier": 76 + }, + { + "name": "RandomSample", + "identifier": 45 + }, + { + "name": "RandomSampleInclusionFrequency", + "identifier": 46 + }, + { + "name": "Reciprocal", + "identifier": 9, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReconcileDynamicAxis", + "identifier": 50 + }, + { + "name": "RectifiedLinear", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReduceElements", + "identifier": 39, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReLU", + "identifier": 3, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Reshape", + "identifier": 16, + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ROIPooling", + "identifier": 47, + "category": "Pool", + "attributes": [ + { "name": "spatialScale", "default": 0.0625 }, + { "name": "poolKind", "type": "PoolKind", "default": "None" } + ], + "inputs": [ + { "name": "inputs" }, + { "name": "ROIs" } + ], + "outputs": [ + { "name": "outputs" } + ] + }, + { + "name": "ScatterPacked", + "identifier": 30 + }, + { + "name": "Select", + "identifier": 42 + }, + { + "name": "Sigmoid", + "identifier": 1, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Sin", + "identifier": 54 + }, + { + "name": "Sinh", + "identifier": 77 + }, + { + "name": "Slice", + "identifier": 14, + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Softmax", + "identifier": 10, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Splice", + "identifier": 43, + "category": "Tensor", + "inputs": [ + { "name": "inputs", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Sqrt", + "identifier": 6, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SquaredError", + "identifier": 34 + }, + { + "name": "Squeeze", + "identifier": 88, + "category": "Transform" + }, + { + "name": "StableSigmoid", + "identifier": 75, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "StopGradient", + "identifier": 64 + }, + { + "name": "StraightThrough", + "identifier": 94 + }, + { + "name": "SumAll", + "identifier": 18 + }, + { + "name": "Tan", + "identifier": 95 + }, + { + "name": "Tanh", + "identifier": 2, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Times", + "identifier": 31, + "attributes": [ + { "name": "outputRank", "default": 1 }, + { "name": "inferInputRankToMap", "visible": false, "default": -1 } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "ToBatch", + "identifier": 80 + }, + { + "name": "TopK", + "identifier": 87, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ToSequence", + "identifier": 70 + }, + { + "name": "ToSequenceLike", + "identifier": 71 + }, + { + "name": "TransposeAxes", + "identifier": 12, + "category": "Activation" + }, + { + "name": "TransposeDimensions", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "TransposeTimes", + "identifier": 32 + }, + { + "name": "UnpackBatch", + "identifier": 79 + }, + { + "name": "UnpackSequence", + "identifier": 72 + }, + { + "name": "Unpooling", + "identifier": 58 + }, + { + "name": "Where", + "identifier": 13, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } +] \ No newline at end of file diff --git a/cntk-proto.js b/cntk-proto.js new file mode 100644 index 00000000000..88f404bb4df --- /dev/null +++ b/cntk-proto.js @@ -0,0 +1,362 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('cntk'); + +$root.CNTK = {}; + +$root.CNTK.proto = {}; + +$root.CNTK.proto.NDShape = class NDShape { + + constructor() { + this.shape_dim = []; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDShape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape_dim = reader.array(message.shape_dim, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.Axis = class Axis { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.Axis(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.static_axis_idx = reader.int32(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.is_ordered_dynamic_axis = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.Axis.prototype.static_axis_idx = 0; +$root.CNTK.proto.Axis.prototype.name = ""; +$root.CNTK.proto.Axis.prototype.is_ordered_dynamic_axis = false; + +$root.CNTK.proto.NDArrayView = class NDArrayView { + + constructor() { + } + + get values() { + $root.CNTK.proto.NDArrayView.valuesSet = $root.CNTK.proto.NDArrayView.valuesSet || new Set([ "float_values", "double_values", "bytes_value", "sint32_values"]); + return Object.keys(this).find((key) => $root.CNTK.proto.NDArrayView.valuesSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDArrayView(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data_type = reader.int32(); + break; + case 2: + message.storage_format = reader.int32(); + break; + case 3: + message.shape = $root.CNTK.proto.NDShape.decode(reader, reader.uint32()); + break; + case 4: + message.float_values = $root.CNTK.proto.NDArrayView.FloatValues.decode(reader, reader.uint32()); + break; + case 5: + message.double_values = $root.CNTK.proto.NDArrayView.DoubleValues.decode(reader, reader.uint32()); + break; + case 6: + message.bytes_value = $root.CNTK.proto.NDArrayView.BytesValue.decode(reader, reader.uint32()); + break; + case 7: + message.sint32_values = $root.CNTK.proto.NDArrayView.IntValues.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.NDArrayView.prototype.data_type = 0; +$root.CNTK.proto.NDArrayView.prototype.storage_format = 0; +$root.CNTK.proto.NDArrayView.prototype.shape = null; + +$root.CNTK.proto.NDArrayView.DataType = { + "Unknown": 0, + "Float": 1, + "Double": 2, + "Float16": 4, + "Int8": 5, + "Int16": 6 +}; + +$root.CNTK.proto.NDArrayView.StorageFormat = { + "Dense": 0, + "SparseCSC": 1, + "SparseBlockCol": 2 +}; + +$root.CNTK.proto.NDArrayView.FloatValues = class FloatValues { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDArrayView.FloatValues(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.floats(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.NDArrayView.DoubleValues = class DoubleValues { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDArrayView.DoubleValues(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.doubles(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.NDArrayView.BytesValue = class BytesValue { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDArrayView.BytesValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.NDArrayView.BytesValue.prototype.value = new Uint8Array([]); + +$root.CNTK.proto.NDArrayView.IntValues = class IntValues { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.NDArrayView.IntValues(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.sint32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.Vector = class Vector { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.Vector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push($root.CNTK.proto.DictionaryValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.Dictionary = class Dictionary { + + constructor() { + this.data = {}; + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.Dictionary(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.uint64(); + break; + case 2: + reader.entry(message.data, () => reader.string(), () => $root.CNTK.proto.DictionaryValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.Dictionary.prototype.version = protobuf.Uint64.create(0); + +$root.CNTK.proto.DictionaryValue = class DictionaryValue { + + constructor() { + } + + get value() { + $root.CNTK.proto.DictionaryValue.valueSet = $root.CNTK.proto.DictionaryValue.valueSet || new Set([ "bool_value", "int_value", "size_t_value", "float_value", "double_value", "string_value", "nd_shape_value", "axis_value", "vector_value", "dictionary_value", "nd_array_view_value"]); + return Object.keys(this).find((key) => $root.CNTK.proto.DictionaryValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CNTK.proto.DictionaryValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.uint64(); + break; + case 2: + message.value_type = reader.int32(); + break; + case 3: + message.bool_value = reader.bool(); + break; + case 4: + message.int_value = reader.int32(); + break; + case 5: + message.size_t_value = reader.uint64(); + break; + case 6: + message.float_value = reader.float(); + break; + case 7: + message.double_value = reader.double(); + break; + case 8: + message.string_value = reader.string(); + break; + case 9: + message.nd_shape_value = $root.CNTK.proto.NDShape.decode(reader, reader.uint32()); + break; + case 10: + message.axis_value = $root.CNTK.proto.Axis.decode(reader, reader.uint32()); + break; + case 11: + message.vector_value = $root.CNTK.proto.Vector.decode(reader, reader.uint32()); + break; + case 12: + message.dictionary_value = $root.CNTK.proto.Dictionary.decode(reader, reader.uint32()); + break; + case 13: + message.nd_array_view_value = $root.CNTK.proto.NDArrayView.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.CNTK.proto.DictionaryValue.prototype.version = protobuf.Uint64.create(0); +$root.CNTK.proto.DictionaryValue.prototype.value_type = 0; + +$root.CNTK.proto.DictionaryValue.Type = { + "None": 0, + "Bool": 1, + "Int": 2, + "SizeT": 3, + "Float": 4, + "Double": 5, + "String": 6, + "NDShape": 7, + "Axis": 8, + "Vector": 9, + "Dictionary": 10, + "NDArrayView": 11 +}; diff --git a/cntk.js b/cntk.js new file mode 100644 index 00000000000..cce94c7f0ed --- /dev/null +++ b/cntk.js @@ -0,0 +1,1074 @@ + +import * as base from './base.js'; +import * as protobuf from './protobuf.js'; + +const cntk = {}; + +cntk.ModelFactory = class { + + match(context) { + const stream = context.stream; + // CNTK v1 + const signature = [ 0x42, 0x00, 0x43, 0x00, 0x4e, 0x00, 0x00, 0x00 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return { name: 'cntk.v1', value: stream }; + } + // CNTK v2 + const tags = context.tags('pb'); + if (tags.get(1) === 0 && tags.get(2) === 2) { + return { name: 'cntk.v2', value: stream }; + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('cntk-metadata.json'); + switch (target.name) { + case 'cntk.v1': { + let obj = null; + try { + const stream = target.value; + const buffer = stream.peek(); + obj = new cntk.ComputationNetwork(buffer); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new cntk.Error(`File format is not CNTK v1 (${message.replace(/\.$/, '')}).`); + } + return new cntk.Model(metadata, 1, obj); + } + case 'cntk.v2': { + await context.require('./cntk-proto'); + let obj = null; + try { + cntk.proto = protobuf.get('cntk').CNTK.proto; + cntk.proto.PoolingType = { 0: 'Max', 1: 'Average' }; + const stream = target.value; + const reader = protobuf.BinaryReader.open(stream); + const dictionary = cntk.proto.Dictionary.decode(reader); + obj = cntk.ModelFactory._convertDictionary(dictionary); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new cntk.Error(`File format is not cntk.Dictionary (${message.replace(/\.$/, '')}).`); + } + return new cntk.Model(metadata, 2, obj); + } + default: { + throw new cntk.Error(`Unsupported CNTK format '${target}'.`); + } + } + } + + static _convertDictionary(dictionary) { + const target = {}; + for (const key of Object.keys(dictionary.data).filter((key) => key != 'version')) { + target[key] = cntk.ModelFactory._convertDictionaryValue(dictionary.data[key]); + } + return target; + } + + static _convertDictionaryValue(dictionaryValue) { + switch (dictionaryValue.value_type) { + case cntk.proto.DictionaryValue.Type.Bool: + return dictionaryValue.bool_value; + case cntk.proto.DictionaryValue.Type.Int: + return dictionaryValue.int_value; + case cntk.proto.DictionaryValue.Type.SizeT: + return dictionaryValue.size_t_value; + case cntk.proto.DictionaryValue.Type.Float: + return dictionaryValue.float_value; + case cntk.proto.DictionaryValue.Type.Double: + return dictionaryValue.double_value; + case cntk.proto.DictionaryValue.Type.String: + return dictionaryValue.string_value; + case cntk.proto.DictionaryValue.Type.Vector: + return cntk.ModelFactory._convertVectorValue(dictionaryValue.vector_value); + case cntk.proto.DictionaryValue.Type.NDShape: + return dictionaryValue.nd_shape_value; + case cntk.proto.DictionaryValue.Type.Axis: + return dictionaryValue.axis_value; + case cntk.proto.DictionaryValue.Type.Dictionary: + return cntk.ModelFactory._convertDictionary(dictionaryValue.dictionary_value); + case cntk.proto.DictionaryValue.Type.NDArrayView: + return dictionaryValue.nd_array_view_value; + default: + throw new cntk.Error(`Unsupported dictionary value type '${dictionaryValue.value_type}'.`); + } + } + + static _convertVectorValue(vectorValue) { + return vectorValue.value.map((item) => { + return cntk.ModelFactory._convertDictionaryValue(item); + }); + } +}; + +cntk.Model = class { + + constructor(metadata, version, obj) { + switch (version) { + case 1: + this.format = `CNTK v1${obj.version ? (`.${obj.version}`) : ''}`; + break; + case 2: + this.format = 'CNTK v2'; + break; + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + this.graphs = [ new cntk.Graph(metadata, version, obj) ]; + } +}; + +cntk.Graph = class { + + constructor(metadata, version, obj) { + metadata = new cntk.GraphMetadata(metadata); + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = new Map(); + values.map = (name, version, obj) => { + if (obj && values.has(name)) { + throw new cntk.Error(`Duplicate value '${name}'.`); + } + if (!values.has(name)) { + switch (version) { + case 1: + values.set(name, new cntk.Value(version, obj ? obj : { name: name })); + break; + case 2: + values.set(name, new cntk.Value(version, obj ? obj : { uid: name })); + break; + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } + return values.get(name); + }; + switch (version) { + case 1: { + for (const name of Object.keys(obj.nodes)) { + const node = obj.nodes[name]; + switch (node.__type__) { + case 'InputValue': { + const argument = new cntk.Argument(node.name, [ values.map(node.name, version, node) ]); + this.inputs.push(argument); + break; + } + case 'LearnableParameter': { + values.map(node.name, version, node); + break; + } + default: + break; + } + } + for (const name of Object.keys(obj.nodes)) { + const node = obj.nodes[name]; + if (node.__type__ != 'InputValue' && node.__type__ != 'LearnableParameter') { + this.nodes.push(new cntk.Node(metadata, version, node, values)); + } + } + if (obj.output) { + for (const output of obj.output) { + const argument = new cntk.Argument(output, [ values.map(output, version) ]); + this.outputs.push(argument); + } + } + break; + } + case 2: { + const map = new Map(obj.primitive_functions.map((node) => [ node.uid, node ])); + for (const input of obj.inputs) { + const value = values.map(input.uid, version, input); + // VariableKind { 0: 'input', 1: 'output', 2: 'parameter', 3: 'constant', 4: 'placeholder' } + if (input.kind == 0) { + const inputName = input.name || input.uid; + this.inputs.push(new cntk.Argument(inputName, [ value ])); + } + } + for (const block of obj.primitive_functions) { + if (block.op == 57 && block.block_function_composite) { + const list = [ block.block_function_composite.root ]; + const output = map.get(block.block_function_composite.root); + const keys = block.block_function_composite_arguments_map_keys; + const args = block.block_function_composite_arguments_map_values; + block.inputs = args; + if (!Array.isArray(keys) || !Array.isArray(args) || keys.length !== args.length) { + throw new cntk.Error('Invalid block function composite arguments.'); + } + const inputs = keys.map((key) => new cntk.Argument(key, [ values.map(key, version) ])); + const outputs = [ new cntk.Argument('output', [ values.map(`${output.uid}_Output_0`, version) ]) ]; + const nodes = []; + while (list.length > 0) { + const name = list.shift(); + if (map.has(name)) { + const node = map.get(name); + nodes.push(new cntk.Node(metadata, version, node, values)); + map.delete(name); + for (let i = 0; i < node.inputs.length; i++) { + const parts = node.inputs[i].split('_'); + if (parts.length >= 3) { + parts.pop(); + if (parts.pop() == 'Output') { + list.push(parts.join('_')); + } + } + } + } + } + const func = new cntk.Function(block.block_function_op_name, nodes, inputs, outputs); + metadata.add(block.uid, func); + } + } + for (const node of map.values()) { + this.nodes.push(new cntk.Node(metadata, version, node, values)); + } + break; + } + default: { + throw new cntk.Error(`Unsupported graph version '${version}'.`); + } + } + } +}; + +cntk.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +cntk.Value = class { + + constructor(version, obj) { + switch (version) { + case 1: + switch (obj.__type__) { + case 'InputValue': + this.name = obj.name; + this.type = new cntk.TensorType(version, obj.precision, obj.sampleLayout); + this.initializer = null; + break; + case 'LearnableParameter': + this.name = obj.name; + this.initializer = new cntk.Tensor(version, obj); + this.type = this.initializer.type; + break; + default: + this.name = obj.name; + this.type = null; + this.initializer = null; + break; + } + break; + case 2: + if (obj.value) { + this.name = obj.name || obj.uid; + this.type = null; + this.initializer = new cntk.Tensor(version, obj); + } else { + this.name = obj.uid; + if (obj.data_type && obj.shape) { + this.type = new cntk.TensorType(version, obj.data_type, obj.shape); + } + this.initializer = null; + } + break; + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } +}; + +cntk.Node = class { + + constructor(metadata, version, obj, values) { + this.attributes = []; + this.inputs = []; + this.outputs = []; + let inputs = []; + let outputs = []; + switch (version) { + case 1: { + const type = obj.__type__; + this.type = metadata.type(type) || { name: type }; + this.name = obj.name; + for (const [name, value] of Object.entries(obj)) { + if (name != '__type__' && name != 'name' && name != 'inputs' && name != 'precision') { + const attribute = new cntk.Attribute(metadata.attribute(type, name), name, value); + this.attributes.push(attribute); + } + } + inputs = obj.inputs.map((input) => values.map(input, version)); + outputs = [ values.map(this.name, version) ]; + break; + } + case 2: { + this.name = obj.name || obj.uid || null; + const output = obj.uid; + if (obj.op == 57) { + this.type = metadata.type(obj.uid) || { name: obj.uid }; + } else if (Object.prototype.hasOwnProperty.call(obj, 'op')) { + // cntk/Source/CNTKv2LibraryDll/API/Internals/PrimitiveOpType.h + this.type = metadata.type(obj.op.toNumber()); + } else { + const type = obj.type; + this.type = metadata.type(type) || { name: type }; + if (obj.user_defined_state) { + for (const [name, value] of Object.entries(obj.user_defined_state)) { + const attribute = new cntk.Attribute(metadata.attribute(type, name), name, value); + this.attributes.push(attribute); + } + } + } + if (obj.attributes) { + for (const [name, value] of Object.entries(obj.attributes)) { + const attribute = new cntk.Attribute(metadata.attribute(this.type, name), name, value); + this.attributes.push(attribute); + } + } + inputs = obj.inputs.map((input) => values.map(input, version)); + outputs.push(values.map(`${output}_Output_0`, version)); + break; + } + default: { + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } + let inputIndex = 0; + if (this.type && this.type.inputs) { + for (const inputSchema of this.type.inputs) { + if (inputIndex < inputs.length || inputSchema.option != 'optional') { + const inputCount = inputSchema.type === 'Tensor[]' ? (inputs.length - inputIndex) : 1; + const inputArguments = []; + for (const inputArgument of inputs.slice(inputIndex, inputIndex + inputCount)) { + if (inputArgument.name != '' || inputSchema.option != 'optional') { + inputArguments.push(inputArgument); + } + } + this.inputs.push(new cntk.Argument(inputSchema.name, inputArguments)); + inputIndex += inputCount; + } + } + } + this.inputs.push(...inputs.slice(inputIndex).map((argument, index) => { + return new cntk.Argument((inputIndex + index).toString(), [ argument ]); + })); + + let outputIndex = 0; + if (this.type && this.type.outputs) { + for (const outputSchema of this.type.outputs) { + if (outputIndex < outputs.length || !outputSchema.optional) { + const outputCount = outputSchema.type === 'Tensor[]' ? (outputs.length - outputIndex) : 1; + this.outputs.push(new cntk.Argument(outputSchema.name, outputs.slice(outputIndex, outputIndex + outputCount))); + outputIndex += outputCount; + } + } + } + this.outputs.push(...outputs.slice(outputIndex).map((argument) => { + return new cntk.Argument(outputIndex.toString(), [ argument ]); + })); + } +}; + +cntk.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + this.type = null; + if (this.value && this.value.__type__ === 'shape') { + this.value = new cntk.TensorShape(1, value); + this.type = 'shape'; + } + if (cntk.proto && this.value instanceof cntk.proto.NDShape) { + this.value = new cntk.TensorShape(2, value); + this.type = 'shape'; + } + if (cntk.proto && this.value instanceof cntk.proto.Axis) { + const axis = { __type__: 'Axis' }; + for (const key of Object.keys(value).filter((key) => key !== 'name')) { + axis[key] = value[key]; + } + this.value = axis; + } + if (metadata) { + if (metadata.type) { + this.type = metadata.type; + const type = cntk[this.type] || cntk.proto[this.type]; + if (type && type[this.value]) { + this.value = type[this.value]; + } + } + if (metadata.visible === false) { + this.visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + let defaultValue = metadata.default; + value = this.value; + if (typeof value == 'function') { + value = value(); + } + if (this.type == 'shape') { + value = value.dimensions; + } + if (value == defaultValue) { + this.visible = false; + } else if (Array.isArray(value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (value.every((item, index) => item == defaultValue[index])) { + this.visible = false; + } + } + } + } + } +}; + +cntk.Tensor = class { + + constructor(version, tensor) { + this.encoding = '|'; + this.values = null; + switch (version) { + case 1: { + if (tensor.__type__ == 'LearnableParameter') { + this.name = tensor.name || null; + this.type = new cntk.TensorType(version, tensor.precision, tensor.sampleLayout); + } + break; + } + case 2: { + this.name = tensor.name || tensor.uid || null; + this.type = new cntk.TensorType(version, tensor.data_type, tensor.shape); + const value = tensor.value; + if (this.type.dataType === 'float32' && value && value.float_values && value.float_values.value && value.float_values.value.length > 0) { + this.values = value.float_values.value; + } + break; + } + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } +}; + +cntk.TensorType = class { + + constructor(version, dataType, shape) { + this.dataType = '?'; + switch (version) { + case 1: + switch (dataType) { + case 'float': this.dataType = 'float32'; break; + case 'double': this.dataType = 'float64'; break; + case 'half': this.dataType = 'float16'; break; + case '': this.dataType = 'float32'; break; + default: throw new cntk.Error(`Unsupported tensor data type '${dataType}'.`); + } + this.shape = new cntk.TensorShape(version, shape); + break; + case 2: + dataType = dataType.toNumber(); + switch (dataType) { + case 1: this.dataType = 'float32'; break; + default: throw new cntk.Error(`Unsupported tensor data type '${dataType}'.`); + } + this.shape = new cntk.TensorShape(version, shape); + break; + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +cntk.TensorShape = class { + + constructor(version, shape) { + switch (version) { + case 1: + this.dimensions = shape.dims; + break; + case 2: + this.dimensions = shape.shape_dim.map((dimension) => dimension.toNumber()); + break; + default: + throw new cntk.Error(`Unsupported CNTK version '${version}'.`); + } + } + + toString() { + return (this.dimensions && this.dimensions.length) ? (`[${this.dimensions.join(',')}]`) : ''; + } +}; + +cntk.Function = class { + + constructor(name, nodes, inputs, outputs) { + this.type = 'function'; + this.name = name; + this.inputs = inputs; + this.outputs = outputs; + this.nodes = nodes; + switch (this.name) { + case 'PReLU': + case 'Softmax': + this.category = 'Activation'; + break; + case 'Dropout': + this.category = 'Dropout'; + break; + case 'Convolution': + case 'ConvolutionTranspose': + case 'Dense': + case 'linear': + case 'LSTM': + this.category = 'Layer'; + break; + case 'BatchNormalization': + case 'lrn': + this.category = 'Normalization'; + break; + case 'AveragePooling': + case 'MaxPooling': + this.category = 'Pool'; + break; + default: + this.category = null; + break; + } + } +}; + +cntk.GraphMetadata = class { + + constructor(metadata) { + this._metadata = metadata; + this._functions = new Map(); + this._attributes = new Map(); + } + + add(name, func) { + if (this._functions.has(name)) { + throw new cntk.Error(`Duplicate function identifier '${func.name}'.`); + } + this._functions.set(name, func); + } + + name(code) { + // cntk/Source/CNTKv2LibraryDll/API/Internals/PrimitiveOpType.h + return this._metadata.name(code); + } + + type(name) { + if (this._functions.has(name)) { + return this._functions.get(name); + } + return this._metadata.type(name); + } + + attribute(type, name) { + const key = `${type}:${name}`; + if (!this._attributes.has(key)) { + const metadata = this.type(type); + if (metadata && metadata.attributes && metadata.attributes.length > 0) { + for (const attribute of metadata.attributes) { + this._attributes.set(`${type}:${attribute.name}`, attribute); + } + } + if (!this._attributes.has(key)) { + this._attributes.set(key, null); + } + } + return this._attributes.get(key); + } +}; + +cntk.ComputationNetwork = class { + + constructor(buffer) { + const reader = new base.BinaryReader(buffer); + reader.match = function(text) { + const position = this.position; + for (let i = 0; i < text.length; i++) { + if (this.uint16() != text.charCodeAt(i)) { + this.seek(position); + return false; + } + } + if (this.uint16() != 0) { + this.seek(position); + return false; + } + return true; + }; + reader.assert = function(text) { + if (!this.match(text)) { + throw new cntk.Error(`Invalid '${text}' signature.`); + } + }; + reader.string = function() { + const content = []; + let c = this.uint16(); + while (c != 0) { + content.push(String.fromCharCode(c)); + c = this.uint16(); + } + return content.join(''); + }; + reader.strings = function() { + const count = this.uint64(); + const array = new Array(count); + for (let i = 0; i < count; i++) { + array[i] = this.string(); + } + return array; + }; + reader.booleans = function() { + const count = this.uint64(); + const array = new Array(count); + for (let i = 0; i < count; i++) { + array[i] = this.boolean(); + } + return array; + }; + reader.matrix = function () { + const type = this.byte(); + switch (type) { + case 100: { + // dense + this.assert('BMAT'); + const elsize = this.uint64(); + const value = {}; + value.name = this.string(); + value.format = this.uint32(); + value.rows = this.uint64(); + value.columns = this.uint64(); + this.read(elsize * value.rows * value.columns); + this.assert('EMAT'); + return value; + } + case 115: // sparse + throw new cntk.Error('Matrix sparse type not implemented.'); + default: + throw new cntk.Error(`Matrix type '${type}' not implemented.`); + } + }; + reader.shape = function(acceptLegacyFormat) { + const dims = []; + const rank = this.uint32(); + let dim0 = 0; + if (rank > 0) { + dim0 = this.uint32(); + } + if (!acceptLegacyFormat || dim0 !== 0) { + if (rank > 0) { + dims.push(dim0); + } + for (let i = 1; i < rank; i++) { + dims.push(this.uint32()); + } + } else { + const dim = this.uint32(); + dims.push(this.uint32()); + dims.push(rank); + dims.push(dim); + } + return { __type__: 'shape', dims: dims }; + }; + const shape = (dims) => { + return { __type__: 'shape', dims: dims }; + }; + reader.assert('BCN'); + reader.assert('BVersion'); + this.version = reader.uint64(); + reader.assert('EVersion'); + const numNodes = reader.uint64(); + reader.assert('BNodeList'); + const op = {}; + op.Minus = function() {}; + op.Plus = function() {}; + op.GreaterEqual = function() {}; + op.Equal = function() {}; + op.NotEqual = function() {}; + op.GreaterEqual = function() {}; + op.Exp = function() {}; + op.Log = function() {}; + op.Reciprocal = function() {}; + op.ElementTimes = function() {}; + op.ClassificationError = function() {}; + op.RectifiedLinear = function() {}; + op.InputValue = function(reader, version) { + this.rows = reader.uint64(); + this.cols = reader.uint64(); + this.sampleLayout = reader.shape(true); + this.dynamicAxisNodeName = ''; + if (version >= 8) { + const nrAxes = reader.uint32(); + if (nrAxes == 1) { + this.dynamicAxisNodeName = reader.string(); + } + } + this.learningRateMultiplier = 0; + if (version >= 10) { + this.learningRateMultiplier = reader.float32(); + } + }; + op.LearnableParameter = function(reader, version) { + if (version >= 3) { + this.learningRateMultiplier = reader.float32(); + this.sampleLayout = reader.shape(false); + } else { + throw new cntk.Error('LeanableParameter reader implemented.'); + } + this.value = reader.matrix(); + }; + op.CrossEntropyWithSoftmax = function(reader) { + this.evalMode = reader.uint32(); + if (this.evalMode > 2) { + this.evalMode = 0; + reader.skip(-4); + } + }; + op.Times = function(reader, version) { + this.outputRank = (version >= 3) ? reader.uint64() : 1; + this.inferInputRankToMap = (version >= 12) ? reader.int32() : -1; + }; + op.Dropout = function(reader, version) { + if (version >= 16) { + this.rngSeed = (version == 16) ? reader.uint32() : reader.uint64(); + this.rngOffset = reader.uint64(); + } + }; + op.ConvolutionBase = function(reader, version) { + if (version >= 5) { + this.kernelShape = reader.shape(false); + this.mapCount = reader.shape(false); + this.strides = reader.shape(false); + this.sharing = reader.booleans(); + this.autoPadding = reader.booleans(); + this.lowerPad = reader.shape(false); + this.upperPad = reader.shape(false); + this.poolKind = reader.int32(); + this.imageLayoutKind = reader.int32(); + this.maxTempMemSizeInSamples = reader.uint64(); + } + if (version >= 9) { + this.transpose = reader.boolean(); + } + if (version >= 20) { + this.outputShape = reader.shape(false); + } + if (version >= 21) { + this.ceilOutDim = reader.boolean(); + } + if (version >= 23) { + this.includePad = reader.boolean(); + } + }; + op.Convolution = function(reader, version) { + op.ConvolutionBase.apply(this, [ reader, version ]); + if (version < 5) { + this.kernelShape = shape([ reader.uint64(), reader.uint64(), 1 ]); + this.strides = shape([ reader.uint64(), reader.uint64(), 1 ]); + this.mapCount = shape([ reader.uint32() ]); + this.imageLayoutKind = reader.int32(); + this.autoPadding = [ reader.boolean() ]; + this.maxTempMemSizeInSamples = reader.uint64(); + this.poolKind = 'None'; + this.convolution2D = true; + this.sharing = [ true ]; + this.lowerPad = shape([ 0 ]); + this.upperPad = shape([ 0 ]); + } else { + this.convolution2D = reader.boolean(); + if (version >= 18) { + this.dilation = reader.shape(); + } else { + this.dilation = shape([ 1 ]); + } + } + }; + op.Pooling = function(reader, version) { + op.ConvolutionBase.apply(this, [ reader, version ]); + }; + op.PoolingBase = function(reader) { + this.imageLayoutKind = reader.int32(); + this.windowWidth = reader.uint32(); + this.windowHeight = reader.uint64(); + this.horizontalSubsample = reader.uint64(); + this.verticalSubsample = reader.uint64(); + }; + op.MaxPooling = function(reader, version) { + op.PoolingBase.apply(this, [ reader, version ]); + }; + op.ROIPooling = function(reader, version) { + this.roiOutputShape = reader.shape(false); + this.poolKind = (version < 26) ? 'Max' : reader.int32(); + this.spatialScale = (version < 26) ? 0.0625 : reader.float64(); + }; + op.Reshape = function(reader) { + this.beginDimParameter = reader.uint32(); + this.endDimParameter = reader.uint32(); + this.replacementSampleLayout = reader.shape(false); + }; + op.ReduceElements = function(reader, version) { + let num_axes = 1; + if (version >= 27) { + num_axes = reader.uint32(); + } + this.axes = []; + for (let i = 0; i < num_axes; i++) { + this.axes.push(reader.uint32()); + } + this.operation = reader.string(); + if (version >= 24) { + this.keepDimensions = reader.boolean(); + } + }; + op.BatchNormalization = function(reader, version) { + let mbCount = 0; + if (version >= 6) { + this.spatial = reader.boolean(); + this.normalizationTimeConstant = reader.float64(); + this.blendTimeConstant = reader.float64(); + this.imageLayoutKind = reader.int32(); + if (version >= 13) { + if (version != 19) { + this.runCountUntied = reader.uint64(); + } else { + this.runCountUntied = reader.boolean() ? 0 : 'SIZE_MAX'; // TODO + } + } else { + mbCount = reader.uint64(); + } + this.epsilon = reader.float64(); + this.useCntkEngine = reader.boolean(); + } else { + const verWritten = reader.int32(); + const verReadable = reader.int32(); + if (verReadable > verWritten || verWritten < 0x00010001 || verReadable > 0x00010004) { + throw new cntk.Error('BatchNormalization version not supported.'); + } + this.eval = reader.boolean(); + this.spatial = reader.boolean(); + if (verWritten >= 0x00010004) { + this.normalizationTimeConstant = reader.float64(); + } else { + reader.float64(); // expAvgFactor + } + if (verWritten >= 0x00010002) { + this.imageLayoutKind = reader.int32(); + mbCount = reader.uint64(); + } + if (verWritten >= 0x00010003) { + this.epsilon = reader.float64(); + this.useCntkEngine = reader.boolean(); + } + } + if (version < 13) { + this.runCountUntied = 16 * mbCount; + this.convertRunningVariancePending = true; + } + }; + op.Tanh = function() {}; + op.Sigmoid = function() {}; + op.Logistic = function() {}; + op.SquareError = function() {}; + op.ErrorPrediction = function() {}; + op.RowStack = function(reader, version) { + this.spliceDim = (version >= 3) ? reader.int32() : 1; + }; + op.Slice = function(reader, version) { + let num = 1; + if (version >= 22) { + num = reader.int32(); + } + this.index = []; + this.axis = []; + this.strideMultiplier = []; + for (let i = 0; i < num; i++) { + this.index.push([ [ reader.uint64(), reader.uint64() ] ]); + if (version >= 3) { + this.axis.push(reader.int32()); + } + if (version >= 27) { + this.strideMultiplier.push(reader.int32()); + } + } + }; + op.PastValue = function(reader, version) { + this.timeStep = reader.int32(); + if (version > 3) { + this.sampleLayout = reader.shape(false); + } else { + const rows = reader.uint64(); + reader.uint64(); + this.sampleLayout = shape([ rows ], true); + } + if (version >= 2) { + this.initialStateValue = reader.int32(); + } + }; + op.FutureValue = function(reader, version) { + this.timeStep = reader.int32(); + if (version > 3) { + this.sampleLayout = reader.shape(false); + } else { + const rows = reader.uint64(); + reader.uint64(); + this.sampleLayout = shape([ rows ], true); + } + if (version >= 2) { + this.initialStateValue = reader.int32(); + } + }; + op.TransposeDimensions = function(reader, version) { + if (version >= 3) { + this.axis1 = reader.int32(); + this.axis2 = reader.int32(); + if (version >= 25 && this.axis1 == 0 && this.axis2 == 0) { + const size = reader.uint64(); + this.perm = []; + for (let i = 0; i < size; i++) { + this.perm.push(reader.uint64()); + } + } + } else { + this.axis1 = 1; + this.axis2 = 2; + } + }; + op.AveragePooling = function(reader, version) { + op.PoolingBase.apply(this, [ reader, version ]); + }; + op.InvStdDev = function(reader) { + this.hasComputed = reader.boolean(); + this.value = reader.matrix(); + }; + op.Mean = function(reader) { + this.hasComputed = reader.boolean(); + this.value = reader.matrix(); + }; + op.PerDimMeanVarNormalization = function() {}; + op.Softmax = function() {}; + op.DynamicAxis = function() {}; + + const nodes = []; + this.nodes = {}; + for (let i = 0; i < numNodes; i++) { + const precision = this.version >= 7 ? reader.string() : ''; + if (precision != 'float' && precision != 'double' && precision != 'half' && precision != '') { + throw new cntk.Error(`Invalid precision format '${precision}'.`); + } + const obj = { __type__: reader.string() }; + obj.name = reader.string(); + obj.precision = precision; + const constructor = op[obj.__type__]; + if (!constructor) { + throw new cntk.Error(`Unsupported node type '${obj.__type__}'.`); + } + constructor.apply(obj, [ reader, this.version ]); + nodes.push(obj); + this.nodes[obj.name] = obj; + } + reader.assert('ENodeList'); + reader.assert('BRelation'); + for (let j = 0; j < numNodes; j++) { + const nodeName = reader.string(); + const node = this.nodes[nodeName]; + const numChildren = reader.uint64(); + const children = []; + for (let k = 0; k < numChildren; k++) { + children.push(reader.string()); + } + if (this.version < 19 && node.__type__ == 'BatchNormalization') { + const runSampleCount = { + __type__: 'LearnableParameter', + name: `${nodeName}.run_sample_count`, + precision: node.precision, + sampleLayout: shape([ 1 ]), // TODO set value = 0 + learningRateMultiplier: 0 + }; + nodes.push(runSampleCount); + this.nodes[runSampleCount.name] = runSampleCount; + children.push(runSampleCount.name); + } + if (node.__type__ == 'Convolution' && children.length > 1) { + children.splice(0, 0, children.pop()); + } + node.inputs = children; + } + reader.assert('ERelation'); + reader.assert('BRootNodes'); + if (reader.match('BFeatureNodes')) { + this.feature = reader.strings(); + reader.assert('EFeatureNodes'); + } + if (reader.match('BLabelNodes')) { + this.label = reader.strings(); + reader.assert('ELabelNodes'); + } + if (reader.match('BCriterionNodes')) { + this.criterion = reader.strings(); + reader.assert('ECriterionNodes'); + } + if (this.criterion.length == 0) { + if (reader.match('BCriteriaNodes')) { + this.criterion = reader.strings(); + reader.assert('ECriteriaNodes'); + } + } + if (reader.match('BNodesReqMultiSeqHandling')) { + reader.strings(); + reader.assert('ENodesReqMultiSeqHandling'); + } + if (reader.match('BEvalNodes')) { + this.eval = reader.strings(); + reader.assert('EEvalNodes'); + } + if (reader.match('BOutputNodes')) { + this.output = reader.strings(); + reader.assert('EOutputNodes'); + } + if (reader.match('BPairNodes')) { + this.pair = reader.strings(); + reader.assert('EPairNodes'); + } + reader.assert('ERootNodes'); + reader.assert('ECN'); + } +}; + +cntk.ImageLayoutKind = { + 0: 'CHW', + 1: 'HWC' +}; + +cntk.PoolKind = { + 0: 'None', + 1: 'Max', + 2: 'Average' +}; + +cntk.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading CNTK model.'; + } +}; + +export const ModelFactory = cntk.ModelFactory; diff --git a/coreml-metadata.json b/coreml-metadata.json new file mode 100644 index 00000000000..1e794c59984 --- /dev/null +++ b/coreml-metadata.json @@ -0,0 +1,497 @@ +[ + { + "name": "activation", + "category": "Activation", + "description": "Applies specified type of activation function to input." + }, + { + "name": "add", + "description": "A layer that performs elementwise addition.", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "average", + "description": "A layer that computes the elementwise average of the inputs." + }, + { + "name": "batchnorm", + "category": "Normalization", + "description": "A layer that performs batch normalization, which is performed along the channel axis, and repeated along the other axes, if present.", + "attributes": [ + { "name": "epsilon", "default": 0.000009999999747378752 }, + { "name": "computeMeanVar", "visible": false }, + { "name": "instanceNormalization", "visible": false } + ] + }, + { + "name": "bias", + "category": "Layer", + "description": "A layer that performs elementwise addition of a bias, which is broadcasted to match the input shape." + }, + { + "name": "biDirectionalLSTM", + "category": "Layer", + "description": "Bidirectional long short-term memory (LSTM) layer. The first LSTM operates on the input sequence in the forward direction. The second LSTM operates on the input sequence in the reverse direction.", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "c" }, + { "name": "h_rev" }, + { "name": "c_rev" }, + { "name": "inputGateWeightMatrix", "visible": false }, + { "name": "forgetGateWeightMatrix", "visible": false }, + { "name": "blockInputWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "inputGateRecursionMatrix", "visible": false }, + { "name": "forgetGateRecursionMatrix", "visible": false }, + { "name": "blockInputRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "inputGateBiasVector", "visible": false }, + { "name": "forgetGateBiasVector", "visible": false }, + { "name": "blockInputBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false }, + { "name": "inputGateWeightMatrix_rev", "visible": false }, + { "name": "forgetGateWeightMatrix_rev", "visible": false }, + { "name": "blockInputWeightMatrix_rev", "visible": false }, + { "name": "outputGateWeightMatrix_rev", "visible": false }, + { "name": "inputGateRecursionMatrix_rev", "visible": false }, + { "name": "forgetGateRecursionMatrix_rev", "visible": false }, + { "name": "blockInputRecursionMatrix_rev", "visible": false }, + { "name": "outputGateRecursionMatrix_rev", "visible": false }, + { "name": "inputGateBiasVector_rev", "visible": false }, + { "name": "forgetGateBiasVector_rev", "visible": false }, + { "name": "blockInputBiasVector_rev", "visible": false }, + { "name": "outputGateBiasVector_rev", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" }, + { "name": "c" }, + { "name": "h_rev" }, + { "name": "c_rev" } + ] + }, + { + "name": "concat", + "category": "Tensor", + "description": "A layer that concatenates along the channel axis (default) or sequence axis.", + "inputs": [ + { "name": "inputs", "type": "Tensor[]" } + ] + }, + { + "name": "convolution", + "category": "Layer", + "description": "A layer that performs spatial convolution or deconvolution.", + "attributes": [ + { "name": "outputShape", "type": "uint64[]", "description": "Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True. When is_deconv == False, this parameter is ignored. If it is None, the output shape is calculated automatically using the border_mode. Kindly refer to NeuralNetwork.proto for details.", "visible": false }, + { "name": "outputChannels", "type": "uint64", "description": "The number of kernels. Same as ``C_out`` used in the layer description.", "visible": false }, + { "name": "kernelChannels", "type": "uint64", "description": "Channel dimension of the kernels. Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False. Must be equal to ``inputChannels``, if isDeconvolution == True.", "visible": false }, + { "name": "nGroups", "type": "uint64", "description": "Group convolution, i.e. weight reuse along channel axis. Input and kernels are divided into g groups and convolution / deconvolution is applied within the groups independently. If not set or 0, it is set to the default value 1.", "default": 1 }, + { "name": "isDeconvolution", "type": "boolean", "description": "Flag to specify whether it is a deconvolution layer." }, + { "name": "valid", "type": "ValidPadding", "visible": false }, + { "name": "same", "type": "SamePadding", "visible": false }, + { "name": "dilationFactor", "type": "uint64[]", "default": [ 1, 1 ] }, + { "name": "stride", "type": "uint64[]", "default": [ 1, 1 ] }, + { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] }, + { "name": "hasBias", "type": "boolean", "description": "Flag to specify whether a bias is to be added or not.", "visible": false } + ] + }, + { + "name": "crop", + "category": "Data", + "description": "A layer that crops the spatial dimensions of an input. If two inputs are provided, the shape of the second input is used as the reference shape.", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "dot", + "description": "If true, inputs are normalized first, thereby computing the cosine similarity." + }, + { + "name": "embedding", + "category": "Transform", + "description": "A layer that performs a matrix lookup and optionally adds a bias." + }, + { + "name": "featureVectorizer", + "inputs": [ + { "name": "inputs", "type": "Tensor[]" } + ] + }, + { + "name": "flatten", + "category": "Shape", + "description": "A layer that flattens the input.", + "attributes": [ + { "name": "mode", "type": "FlattenLayerParams.FlattenOrder" } + ] + }, + { + "name": "gather", + "category": "Transform", + "description": "Gather layer that gathers elements from the first input, along a specified axis, at indices specified in the second input.", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "gelu", + "category": "Activation", + "description": "Gaussian error linear unit activation.", + "attributes": [ + { "name": "mode", "type": "GeluLayerParams.GeluMode" } + ] + }, + { + "name": "gru", + "category": "Layer", + "description": "Gated-Recurrent Unit (GRU) Layer", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "updateGateWeightMatrix", "visible": false }, + { "name": "resetGateWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "updateGateRecursionMatrix", "visible": false }, + { "name": "resetGateRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "updateGateBiasVector", "visible": false }, + { "name": "resetGateBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" } + ] + }, + { + "name": "innerProduct", + "category": "Layer", + "description": "A layer that performs a matrix vector product. This is equivalent to a fully-connected, or dense layer.", + "attributes": [ + { "name": "inputChannels", "type": "uint64", "visible": false }, + { "name": "outputChannels", "type": "uint64", "visible": false }, + { "name": "hasBias", "type": "boolean", "visible": false } + ] + }, + { + "name": "int64ClassLabels", + "category": "Data", + "outputs": [ + { "name": "probabilities" }, + { "name": "feature" } + ] + }, + { + "name": "itemSimilarityRecommender", + "inputs": [ + { "name": "item" }, + { "name": "numRecommendations" }, + { "name": "itemRestriction" }, + { "name": "itemExclusion" } + ], + "outputs": [ + { "name": "recommendedItemList" }, + { "name": "recommendedItemScore" } + ] + }, + { + "name": "l2normalize", + "category": "Normalization", + "description": "A layer that performs L2 normalization, i.e. divides by the the square root of the sum of squares of all elements of input." + }, + { + "name": "loadConstant", + "category": "Data" + }, + { + "name": "lrn", + "category": "Normalization", + "description": "A layer that performs local response normalization (LRN).", + "attributes": [ + { "name": "k", "default": 1 } + ] + }, + { + "name": "max", + "description": "A layer that computes the elementwise maximum over the inputs." + }, + { + "name": "min", + "description": "A layer that computes the elementwise minimum over the inputs." + }, + { + "name": "multiply", + "description": "A layer that performs elementwise multiplication.", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "mvn", + "description": "Fill a constant value in the padded region." + }, + { + "name": "mvn", + "category": "Normalization", + "description": "A layer that performs mean variance normalization, along axis = -3." + }, + { + "name": "nonMaximumSuppression", + "attributes": [ + { "name": "iouThreshold" }, + { "name": "confidenceThreshold" } + ], + "inputs": [ + { "name": "confidence" }, + { "name": "coordinates" }, + { "name": "iouThreshold" }, + { "name": "confidenceThreshold" } + ], + "outputs": [ + { "name": "confidence" }, + { "name": "coordinates" } + ] + }, + { + "name": "padding", + "category": "Shape", + "description": "Fill a constant value in the padded region.", + "attributes": [ + { "name": "paddingAmounts", "visible": false } + ] + }, + { + "name": "permute", + "category": "Shape", + "description": "A layer that rearranges the dimensions and data of an input." + }, + { + "name": "pooling", + "category": "Pool", + "description": "Spatial Pooling layer to reduce dimensions of input using the specified kernel size and type.", + "attributes": [ + { "name": "includeLastPixel", "type": "ValidCompletePadding", "visible": false }, + { "name": "same", "type": "SamePadding", "visible": false }, + { "name": "valid", "type": "ValidCompletePadding", "visible": false }, + { "name": "type", "type": "PoolingLayerParams.PoolingType" }, + { "name": "globalPooling", "type": "boolean", "default": false }, + { "name": "stride", "type": "uint64", "default": [ 1, 1 ] }, + { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] }, + { "name": "avgPoolExcludePadding", "type": "boolean", "default": false } + ] + }, + { + "name": "reduce", + "description": "A layer that reduces the input using a specified operation." + }, + { + "name": "reorganizeData", + "category": "Shape", + "description": "A layer that reorganizes data in the input in: 1. SPACE_TO_DEPTH, 2. DEPTH_TO_SPACE." + }, + { + "name": "reshape", + "category": "Shape", + "description": "A layer that recasts the input into a new shape." + }, + { + "name": "scale", + "category": "Layer", + "description": "A layer that performs elmentwise multiplication by a scale factor and optionally adds a bias.", + "attributes": [ + { "name": "hasBias", "type": "boolean", "visible": false } + ] + }, + { + "name": "scaler", + "category": "Data" + }, + { + "name": "sequenceRepeat", + "category": "Shape", + "description": "A layer that repeats a sequence." + }, + { + "name": "slice", + "description": "A layer that slices the input data along a given axis." + }, + { + "name": "slice", + "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs." + }, + { + "name": "softmax", + "category": "Activation", + "description": "A layer that performs softmax normalization. Normalization is done along the channel axis." + }, + { + "name": "softmaxND", + "category": "Activation", + "description": "A layer that performs softmax normalization along a specified axis." + }, + { + "name": "squeeze", + "category": "Transform" + }, + { + "name": "stringClassLabels", + "category": "Data", + "outputs": [ + { "name": "probabilities" }, + { "name": "feature" } + ] + }, + { + "name": "textClassifier", + "attributes": [ + { "name": "revision", "visible": false } + ] + }, + { + "name": "unary", + "description": "A layer that applies a unary function.", + "attributes": [ + { "name": "type", "type": "UnaryFunctionLayerParams.Operation" }, + { "name": "alpha", "default": 1 }, + { "name": "scale", "default": 1 }, + { "name": "epsilon", "default": 9.999999974752427e-7 } + ], + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "uniDirectionalLSTM", + "category": "Layer", + "description": "A unidirectional long short-term memory (LSTM) layer.", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "c" }, + { "name": "inputGateWeightMatrix", "visible": false }, + { "name": "forgetGateWeightMatrix", "visible": false }, + { "name": "blockInputWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "inputGateRecursionMatrix", "visible": false }, + { "name": "forgetGateRecursionMatrix", "visible": false }, + { "name": "blockInputRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "inputGateBiasVector", "visible": false }, + { "name": "forgetGateBiasVector", "visible": false }, + { "name": "blockInputBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" }, + { "name": "c" } + ] + }, + { + "name": "upsample", + "category": "Data", + "description": "A layer that scales up spatial dimensions. It supports two modes: nearest neighbour (default) and bilinear." + }, + { + "name": "transpose", + "category": "Transform" + }, + { + "name": "wordTagger", + "attributes": [ + { "name": "revision", "visible": false } + ], + "outputs": [ + { "name": "tokens" }, + { "name": "tags" }, + { "name": "locations" }, + { "name": "lengths" } + ] + }, + { + "name": "program:conv", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "program:batch_norm", + "category": "Normalization", + "inputs": [ + { "name": "x" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "gamma" }, + { "name": "beta" } + ] + }, + { + "name": "program:linear", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "program:pad", + "category": "Tensor" + }, + { + "name": "program:transpose", + "category": "Transform" + }, + { + "name": "program:sigmoid", + "category": "Activation" + }, + { + "name": "program:softmax", + "category": "Activation" + }, + { + "name": "program:relu", + "category": "Activation" + }, + { + "name": "program:relu6", + "category": "Activation" + }, + { + "name": "program:reshape", + "category": "Shape" + }, + { + "name": "program:concat", + "category": "Tensor" + }, + { + "name": "program:layer_norm", + "category": "Normalization" + } +] \ No newline at end of file diff --git a/coreml-proto.js b/coreml-proto.js new file mode 100644 index 00000000000..e7cb51d4044 --- /dev/null +++ b/coreml-proto.js @@ -0,0 +1,18917 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('coreml'); + +$root.CoreML = {}; + +$root.CoreML.Specification = {}; + +$root.CoreML.Specification.Pipeline = class Pipeline { + + constructor() { + this.models = []; + this.names = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Pipeline(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.models.push($root.CoreML.Specification.Model.decode(reader, reader.uint32())); + break; + case 2: + message.names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Pipeline(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "models": + message.models.push($root.CoreML.Specification.Model.decodeText(reader)); + break; + case "names": + reader.array(message.names, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PipelineClassifier = class PipelineClassifier { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PipelineClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PipelineClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pipeline": + message.pipeline = $root.CoreML.Specification.Pipeline.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PipelineClassifier.prototype.pipeline = null; + +$root.CoreML.Specification.PipelineRegressor = class PipelineRegressor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PipelineRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PipelineRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pipeline": + message.pipeline = $root.CoreML.Specification.Pipeline.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PipelineRegressor.prototype.pipeline = null; + +$root.CoreML.Specification.FeatureDescription = class FeatureDescription { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FeatureDescription(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shortDescription = reader.string(); + break; + case 3: + message.type = $root.CoreML.Specification.FeatureType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FeatureDescription(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shortDescription": + message.shortDescription = reader.string(); + break; + case "type": + message.type = $root.CoreML.Specification.FeatureType.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FeatureDescription.prototype.name = ""; +$root.CoreML.Specification.FeatureDescription.prototype.shortDescription = ""; +$root.CoreML.Specification.FeatureDescription.prototype.type = null; + +$root.CoreML.Specification.Metadata = class Metadata { + + constructor() { + this.userDefined = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Metadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shortDescription = reader.string(); + break; + case 2: + message.versionString = reader.string(); + break; + case 3: + message.author = reader.string(); + break; + case 4: + message.license = reader.string(); + break; + case 100: + reader.entry(message.userDefined, () => reader.string(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Metadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shortDescription": + message.shortDescription = reader.string(); + break; + case "versionString": + message.versionString = reader.string(); + break; + case "author": + message.author = reader.string(); + break; + case "license": + message.license = reader.string(); + break; + case "userDefined": + reader.entry(message.userDefined, () => reader.string(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Metadata.prototype.shortDescription = ""; +$root.CoreML.Specification.Metadata.prototype.versionString = ""; +$root.CoreML.Specification.Metadata.prototype.author = ""; +$root.CoreML.Specification.Metadata.prototype.license = ""; + +$root.CoreML.Specification.ModelDescription = class ModelDescription { + + constructor() { + this.input = []; + this.output = []; + this.trainingInput = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ModelDescription(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 10: + message.output.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 11: + message.predictedFeatureName = reader.string(); + break; + case 12: + message.predictedProbabilitiesName = reader.string(); + break; + case 50: + message.trainingInput.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 100: + message.metadata = $root.CoreML.Specification.Metadata.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ModelDescription(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + message.input.push($root.CoreML.Specification.FeatureDescription.decodeText(reader)); + break; + case "output": + message.output.push($root.CoreML.Specification.FeatureDescription.decodeText(reader)); + break; + case "predictedFeatureName": + message.predictedFeatureName = reader.string(); + break; + case "predictedProbabilitiesName": + message.predictedProbabilitiesName = reader.string(); + break; + case "trainingInput": + message.trainingInput.push($root.CoreML.Specification.FeatureDescription.decodeText(reader)); + break; + case "metadata": + message.metadata = $root.CoreML.Specification.Metadata.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ModelDescription.prototype.predictedFeatureName = ""; +$root.CoreML.Specification.ModelDescription.prototype.predictedProbabilitiesName = ""; +$root.CoreML.Specification.ModelDescription.prototype.metadata = null; + +$root.CoreML.Specification.SerializedModel = class SerializedModel { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SerializedModel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.identifier = reader.string(); + break; + case 2: + message.model = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SerializedModel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "identifier": + message.identifier = reader.string(); + break; + case "model": + message.model = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SerializedModel.prototype.identifier = ""; +$root.CoreML.Specification.SerializedModel.prototype.model = new Uint8Array([]); + +$root.CoreML.Specification.Model = class Model { + + constructor() { + } + + get Type() { + $root.CoreML.Specification.Model.TypeSet = $root.CoreML.Specification.Model.TypeSet || new Set([ "pipelineClassifier", "pipelineRegressor", "pipeline", "glmRegressor", "supportVectorRegressor", "treeEnsembleRegressor", "neuralNetworkRegressor", "bayesianProbitRegressor", "glmClassifier", "supportVectorClassifier", "treeEnsembleClassifier", "neuralNetworkClassifier", "kNearestNeighborsClassifier", "neuralNetwork", "itemSimilarityRecommender", "mlProgram", "customModel", "linkedModel", "classConfidenceThresholding", "oneHotEncoder", "imputer", "featureVectorizer", "dictVectorizer", "scaler", "categoricalMapping", "normalizer", "arrayFeatureExtractor", "nonMaximumSuppression", "identity", "textClassifier", "wordTagger", "visionFeaturePrint", "soundAnalysisPreprocessing", "gazetteer", "wordEmbedding", "audioFeaturePrint", "serializedModel"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Model.TypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Model(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.specificationVersion = reader.int32(); + break; + case 2: + message.description = $root.CoreML.Specification.ModelDescription.decode(reader, reader.uint32()); + break; + case 10: + message.isUpdatable = reader.bool(); + break; + case 200: + message.pipelineClassifier = $root.CoreML.Specification.PipelineClassifier.decode(reader, reader.uint32()); + break; + case 201: + message.pipelineRegressor = $root.CoreML.Specification.PipelineRegressor.decode(reader, reader.uint32()); + break; + case 202: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + case 300: + message.glmRegressor = $root.CoreML.Specification.GLMRegressor.decode(reader, reader.uint32()); + break; + case 301: + message.supportVectorRegressor = $root.CoreML.Specification.SupportVectorRegressor.decode(reader, reader.uint32()); + break; + case 302: + message.treeEnsembleRegressor = $root.CoreML.Specification.TreeEnsembleRegressor.decode(reader, reader.uint32()); + break; + case 303: + message.neuralNetworkRegressor = $root.CoreML.Specification.NeuralNetworkRegressor.decode(reader, reader.uint32()); + break; + case 304: + message.bayesianProbitRegressor = $root.CoreML.Specification.BayesianProbitRegressor.decode(reader, reader.uint32()); + break; + case 400: + message.glmClassifier = $root.CoreML.Specification.GLMClassifier.decode(reader, reader.uint32()); + break; + case 401: + message.supportVectorClassifier = $root.CoreML.Specification.SupportVectorClassifier.decode(reader, reader.uint32()); + break; + case 402: + message.treeEnsembleClassifier = $root.CoreML.Specification.TreeEnsembleClassifier.decode(reader, reader.uint32()); + break; + case 403: + message.neuralNetworkClassifier = $root.CoreML.Specification.NeuralNetworkClassifier.decode(reader, reader.uint32()); + break; + case 404: + message.kNearestNeighborsClassifier = $root.CoreML.Specification.KNearestNeighborsClassifier.decode(reader, reader.uint32()); + break; + case 500: + message.neuralNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 501: + message.itemSimilarityRecommender = $root.CoreML.Specification.ItemSimilarityRecommender.decode(reader, reader.uint32()); + break; + case 502: + message.mlProgram = $root.CoreML.Specification.MILSpec.Program.decode(reader, reader.uint32()); + break; + case 555: + message.customModel = $root.CoreML.Specification.CustomModel.decode(reader, reader.uint32()); + break; + case 556: + message.linkedModel = $root.CoreML.Specification.LinkedModel.decode(reader, reader.uint32()); + break; + case 560: + message.classConfidenceThresholding = $root.CoreML.Specification.ClassConfidenceThresholding.decode(reader, reader.uint32()); + break; + case 600: + message.oneHotEncoder = $root.CoreML.Specification.OneHotEncoder.decode(reader, reader.uint32()); + break; + case 601: + message.imputer = $root.CoreML.Specification.Imputer.decode(reader, reader.uint32()); + break; + case 602: + message.featureVectorizer = $root.CoreML.Specification.FeatureVectorizer.decode(reader, reader.uint32()); + break; + case 603: + message.dictVectorizer = $root.CoreML.Specification.DictVectorizer.decode(reader, reader.uint32()); + break; + case 604: + message.scaler = $root.CoreML.Specification.Scaler.decode(reader, reader.uint32()); + break; + case 606: + message.categoricalMapping = $root.CoreML.Specification.CategoricalMapping.decode(reader, reader.uint32()); + break; + case 607: + message.normalizer = $root.CoreML.Specification.Normalizer.decode(reader, reader.uint32()); + break; + case 609: + message.arrayFeatureExtractor = $root.CoreML.Specification.ArrayFeatureExtractor.decode(reader, reader.uint32()); + break; + case 610: + message.nonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppression.decode(reader, reader.uint32()); + break; + case 900: + message.identity = $root.CoreML.Specification.Identity.decode(reader, reader.uint32()); + break; + case 2000: + message.textClassifier = $root.CoreML.Specification.CoreMLModels.TextClassifier.decode(reader, reader.uint32()); + break; + case 2001: + message.wordTagger = $root.CoreML.Specification.CoreMLModels.WordTagger.decode(reader, reader.uint32()); + break; + case 2002: + message.visionFeaturePrint = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.decode(reader, reader.uint32()); + break; + case 2003: + message.soundAnalysisPreprocessing = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.decode(reader, reader.uint32()); + break; + case 2004: + message.gazetteer = $root.CoreML.Specification.CoreMLModels.Gazetteer.decode(reader, reader.uint32()); + break; + case 2005: + message.wordEmbedding = $root.CoreML.Specification.CoreMLModels.WordEmbedding.decode(reader, reader.uint32()); + break; + case 2006: + message.audioFeaturePrint = $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.decode(reader, reader.uint32()); + break; + case 3000: + message.serializedModel = $root.CoreML.Specification.SerializedModel.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Model(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "specificationVersion": + message.specificationVersion = reader.int32(); + break; + case "description": + message.description = $root.CoreML.Specification.ModelDescription.decodeText(reader); + break; + case "isUpdatable": + message.isUpdatable = reader.bool(); + break; + case "pipelineClassifier": + message.pipelineClassifier = $root.CoreML.Specification.PipelineClassifier.decodeText(reader); + break; + case "pipelineRegressor": + message.pipelineRegressor = $root.CoreML.Specification.PipelineRegressor.decodeText(reader); + break; + case "pipeline": + message.pipeline = $root.CoreML.Specification.Pipeline.decodeText(reader); + break; + case "glmRegressor": + message.glmRegressor = $root.CoreML.Specification.GLMRegressor.decodeText(reader); + break; + case "supportVectorRegressor": + message.supportVectorRegressor = $root.CoreML.Specification.SupportVectorRegressor.decodeText(reader); + break; + case "treeEnsembleRegressor": + message.treeEnsembleRegressor = $root.CoreML.Specification.TreeEnsembleRegressor.decodeText(reader); + break; + case "neuralNetworkRegressor": + message.neuralNetworkRegressor = $root.CoreML.Specification.NeuralNetworkRegressor.decodeText(reader); + break; + case "bayesianProbitRegressor": + message.bayesianProbitRegressor = $root.CoreML.Specification.BayesianProbitRegressor.decodeText(reader); + break; + case "glmClassifier": + message.glmClassifier = $root.CoreML.Specification.GLMClassifier.decodeText(reader); + break; + case "supportVectorClassifier": + message.supportVectorClassifier = $root.CoreML.Specification.SupportVectorClassifier.decodeText(reader); + break; + case "treeEnsembleClassifier": + message.treeEnsembleClassifier = $root.CoreML.Specification.TreeEnsembleClassifier.decodeText(reader); + break; + case "neuralNetworkClassifier": + message.neuralNetworkClassifier = $root.CoreML.Specification.NeuralNetworkClassifier.decodeText(reader); + break; + case "kNearestNeighborsClassifier": + message.kNearestNeighborsClassifier = $root.CoreML.Specification.KNearestNeighborsClassifier.decodeText(reader); + break; + case "neuralNetwork": + message.neuralNetwork = $root.CoreML.Specification.NeuralNetwork.decodeText(reader); + break; + case "itemSimilarityRecommender": + message.itemSimilarityRecommender = $root.CoreML.Specification.ItemSimilarityRecommender.decodeText(reader); + break; + case "mlProgram": + message.mlProgram = $root.CoreML.Specification.MILSpec.Program.decodeText(reader); + break; + case "customModel": + message.customModel = $root.CoreML.Specification.CustomModel.decodeText(reader); + break; + case "linkedModel": + message.linkedModel = $root.CoreML.Specification.LinkedModel.decodeText(reader); + break; + case "classConfidenceThresholding": + message.classConfidenceThresholding = $root.CoreML.Specification.ClassConfidenceThresholding.decodeText(reader); + break; + case "oneHotEncoder": + message.oneHotEncoder = $root.CoreML.Specification.OneHotEncoder.decodeText(reader); + break; + case "imputer": + message.imputer = $root.CoreML.Specification.Imputer.decodeText(reader); + break; + case "featureVectorizer": + message.featureVectorizer = $root.CoreML.Specification.FeatureVectorizer.decodeText(reader); + break; + case "dictVectorizer": + message.dictVectorizer = $root.CoreML.Specification.DictVectorizer.decodeText(reader); + break; + case "scaler": + message.scaler = $root.CoreML.Specification.Scaler.decodeText(reader); + break; + case "categoricalMapping": + message.categoricalMapping = $root.CoreML.Specification.CategoricalMapping.decodeText(reader); + break; + case "normalizer": + message.normalizer = $root.CoreML.Specification.Normalizer.decodeText(reader); + break; + case "arrayFeatureExtractor": + message.arrayFeatureExtractor = $root.CoreML.Specification.ArrayFeatureExtractor.decodeText(reader); + break; + case "nonMaximumSuppression": + message.nonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppression.decodeText(reader); + break; + case "identity": + message.identity = $root.CoreML.Specification.Identity.decodeText(reader); + break; + case "textClassifier": + message.textClassifier = $root.CoreML.Specification.CoreMLModels.TextClassifier.decodeText(reader); + break; + case "wordTagger": + message.wordTagger = $root.CoreML.Specification.CoreMLModels.WordTagger.decodeText(reader); + break; + case "visionFeaturePrint": + message.visionFeaturePrint = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.decodeText(reader); + break; + case "soundAnalysisPreprocessing": + message.soundAnalysisPreprocessing = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.decodeText(reader); + break; + case "gazetteer": + message.gazetteer = $root.CoreML.Specification.CoreMLModels.Gazetteer.decodeText(reader); + break; + case "wordEmbedding": + message.wordEmbedding = $root.CoreML.Specification.CoreMLModels.WordEmbedding.decodeText(reader); + break; + case "audioFeaturePrint": + message.audioFeaturePrint = $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.decodeText(reader); + break; + case "serializedModel": + message.serializedModel = $root.CoreML.Specification.SerializedModel.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Model.prototype.specificationVersion = 0; +$root.CoreML.Specification.Model.prototype.description = null; +$root.CoreML.Specification.Model.prototype.isUpdatable = false; + +$root.CoreML.Specification.CoreMLModels = {}; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint = class VisionFeaturePrint { + + constructor() { + } + + get VisionFeaturePrintType() { + $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintTypeSet = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintTypeSet || new Set([ "scene", "objects"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.scene = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.decode(reader, reader.uint32()); + break; + case 21: + message.objects = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "scene": + message.scene = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.decodeText(reader); + break; + case "objects": + message.objects = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene = class Scene { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.enum($root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.prototype.version = 0; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion = { + "SCENE_VERSION_INVALID": 0, + "SCENE_VERSION_1": 1, + "SCENE_VERSION_2": 2 +}; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects = class Objects { + + constructor() { + this.output = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + case 100: + message.output.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.enum($root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.prototype.version = 0; + +$root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion = { + "OBJECTS_VERSION_INVALID": 0, + "OBJECTS_VERSION_1": 1 +}; + +$root.CoreML.Specification.CoreMLModels.AudioFeaturePrint = class AudioFeaturePrint { + + constructor() { + } + + get AudioFeaturePrintType() { + $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.AudioFeaturePrintTypeSet = $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.AudioFeaturePrintTypeSet || new Set([ "sound"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.AudioFeaturePrintTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.sound = $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sound": + message.sound = $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound = class Sound { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.enum($root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.prototype.version = 0; + +$root.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion = { + "SOUND_VERSION_INVALID": 0, + "SOUND_VERSION_1": 1 +}; + +$root.CoreML.Specification.CoreMLModels.TextClassifier = class TextClassifier { + + constructor() { + } + + get ClassLabels() { + $root.CoreML.Specification.CoreMLModels.TextClassifier.ClassLabelsSet = $root.CoreML.Specification.CoreMLModels.TextClassifier.ClassLabelsSet || new Set([ "stringClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.TextClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.TextClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.TextClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "revision": + message.revision = reader.uint32(); + break; + case "language": + message.language = reader.string(); + break; + case "modelParameterData": + message.modelParameterData = reader.bytes(); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.TextClassifier.prototype.revision = 0; +$root.CoreML.Specification.CoreMLModels.TextClassifier.prototype.language = ""; +$root.CoreML.Specification.CoreMLModels.TextClassifier.prototype.modelParameterData = new Uint8Array([]); + +$root.CoreML.Specification.CoreMLModels.WordTagger = class WordTagger { + + constructor() { + } + + get Tags() { + $root.CoreML.Specification.CoreMLModels.WordTagger.TagsSet = $root.CoreML.Specification.CoreMLModels.WordTagger.TagsSet || new Set([ "stringTags"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.WordTagger.TagsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.WordTagger(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 20: + message.tokensOutputFeatureName = reader.string(); + break; + case 21: + message.tokenTagsOutputFeatureName = reader.string(); + break; + case 22: + message.tokenLocationsOutputFeatureName = reader.string(); + break; + case 23: + message.tokenLengthsOutputFeatureName = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringTags = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.WordTagger(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "revision": + message.revision = reader.uint32(); + break; + case "language": + message.language = reader.string(); + break; + case "tokensOutputFeatureName": + message.tokensOutputFeatureName = reader.string(); + break; + case "tokenTagsOutputFeatureName": + message.tokenTagsOutputFeatureName = reader.string(); + break; + case "tokenLocationsOutputFeatureName": + message.tokenLocationsOutputFeatureName = reader.string(); + break; + case "tokenLengthsOutputFeatureName": + message.tokenLengthsOutputFeatureName = reader.string(); + break; + case "modelParameterData": + message.modelParameterData = reader.bytes(); + break; + case "stringTags": + message.stringTags = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.revision = 0; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.language = ""; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.tokensOutputFeatureName = ""; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.tokenTagsOutputFeatureName = ""; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.tokenLocationsOutputFeatureName = ""; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.tokenLengthsOutputFeatureName = ""; +$root.CoreML.Specification.CoreMLModels.WordTagger.prototype.modelParameterData = new Uint8Array([]); + +$root.CoreML.Specification.CoreMLModels.Gazetteer = class Gazetteer { + + constructor() { + } + + get ClassLabels() { + $root.CoreML.Specification.CoreMLModels.Gazetteer.ClassLabelsSet = $root.CoreML.Specification.CoreMLModels.Gazetteer.ClassLabelsSet || new Set([ "stringClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.Gazetteer.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.Gazetteer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.Gazetteer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "revision": + message.revision = reader.uint32(); + break; + case "language": + message.language = reader.string(); + break; + case "modelParameterData": + message.modelParameterData = reader.bytes(); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.Gazetteer.prototype.revision = 0; +$root.CoreML.Specification.CoreMLModels.Gazetteer.prototype.language = ""; +$root.CoreML.Specification.CoreMLModels.Gazetteer.prototype.modelParameterData = new Uint8Array([]); + +$root.CoreML.Specification.CoreMLModels.WordEmbedding = class WordEmbedding { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.WordEmbedding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.WordEmbedding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "revision": + message.revision = reader.uint32(); + break; + case "language": + message.language = reader.string(); + break; + case "modelParameterData": + message.modelParameterData = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.WordEmbedding.prototype.revision = 0; +$root.CoreML.Specification.CoreMLModels.WordEmbedding.prototype.language = ""; +$root.CoreML.Specification.CoreMLModels.WordEmbedding.prototype.modelParameterData = new Uint8Array([]); + +$root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing = class SoundAnalysisPreprocessing { + + constructor() { + } + + get SoundAnalysisPreprocessingType() { + $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.SoundAnalysisPreprocessingTypeSet = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.SoundAnalysisPreprocessingTypeSet || new Set([ "vggish"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.SoundAnalysisPreprocessingTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.vggish = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vggish": + message.vggish = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish = class Vggish { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StringToInt64Map = class StringToInt64Map { + + constructor() { + this.map = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StringToInt64Map(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.map, () => reader.string(), () => reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StringToInt64Map(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "map": + reader.entry(message.map, () => reader.string(), () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64ToStringMap = class Int64ToStringMap { + + constructor() { + this.map = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64ToStringMap(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.map, () => reader.int64(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64ToStringMap(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "map": + reader.entry(message.map, () => reader.int64(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StringToDoubleMap = class StringToDoubleMap { + + constructor() { + this.map = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StringToDoubleMap(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.map, () => reader.string(), () => reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StringToDoubleMap(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "map": + reader.entry(message.map, () => reader.string(), () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64ToDoubleMap = class Int64ToDoubleMap { + + constructor() { + this.map = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64ToDoubleMap(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.map, () => reader.int64(), () => reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64ToDoubleMap(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "map": + reader.entry(message.map, () => reader.int64(), () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StringVector = class StringVector { + + constructor() { + this.vector = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StringVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vector.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StringVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vector": + reader.array(message.vector, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64Vector = class Int64Vector { + + constructor() { + this.vector = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64Vector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vector = reader.array(message.vector, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64Vector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vector": + reader.array(message.vector, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FloatVector = class FloatVector { + + constructor() { + this.vector = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FloatVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vector = reader.floats(message.vector, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FloatVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vector": + reader.array(message.vector, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DoubleVector = class DoubleVector { + + constructor() { + this.vector = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DoubleVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vector = reader.doubles(message.vector, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DoubleVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vector": + reader.array(message.vector, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64Range = class Int64Range { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64Range(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minValue = reader.int64(); + break; + case 2: + message.maxValue = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64Range(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "minValue": + message.minValue = reader.int64(); + break; + case "maxValue": + message.maxValue = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64Range.prototype.minValue = protobuf.Int64.create(0); +$root.CoreML.Specification.Int64Range.prototype.maxValue = protobuf.Int64.create(0); + +$root.CoreML.Specification.Int64Set = class Int64Set { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64Set(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.array(message.values, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64Set(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DoubleRange = class DoubleRange { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DoubleRange(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minValue = reader.double(); + break; + case 2: + message.maxValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DoubleRange(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "minValue": + message.minValue = reader.double(); + break; + case "maxValue": + message.maxValue = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DoubleRange.prototype.minValue = 0; +$root.CoreML.Specification.DoubleRange.prototype.maxValue = 0; + +$root.CoreML.Specification.PrecisionRecallCurve = class PrecisionRecallCurve { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PrecisionRecallCurve(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.precisionValues = $root.CoreML.Specification.FloatVector.decode(reader, reader.uint32()); + break; + case 2: + message.precisionConfidenceThresholds = $root.CoreML.Specification.FloatVector.decode(reader, reader.uint32()); + break; + case 3: + message.recallValues = $root.CoreML.Specification.FloatVector.decode(reader, reader.uint32()); + break; + case 4: + message.recallConfidenceThresholds = $root.CoreML.Specification.FloatVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PrecisionRecallCurve(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "precisionValues": + message.precisionValues = $root.CoreML.Specification.FloatVector.decodeText(reader); + break; + case "precisionConfidenceThresholds": + message.precisionConfidenceThresholds = $root.CoreML.Specification.FloatVector.decodeText(reader); + break; + case "recallValues": + message.recallValues = $root.CoreML.Specification.FloatVector.decodeText(reader); + break; + case "recallConfidenceThresholds": + message.recallConfidenceThresholds = $root.CoreML.Specification.FloatVector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PrecisionRecallCurve.prototype.precisionValues = null; +$root.CoreML.Specification.PrecisionRecallCurve.prototype.precisionConfidenceThresholds = null; +$root.CoreML.Specification.PrecisionRecallCurve.prototype.recallValues = null; +$root.CoreML.Specification.PrecisionRecallCurve.prototype.recallConfidenceThresholds = null; + +$root.CoreML.Specification.Int64FeatureType = class Int64FeatureType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64FeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64FeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DoubleFeatureType = class DoubleFeatureType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DoubleFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DoubleFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StringFeatureType = class StringFeatureType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StringFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StringFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SizeRange = class SizeRange { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SizeRange(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lowerBound = reader.uint64(); + break; + case 2: + message.upperBound = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SizeRange(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lowerBound": + message.lowerBound = reader.uint64(); + break; + case "upperBound": + message.upperBound = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SizeRange.prototype.lowerBound = protobuf.Uint64.create(0); +$root.CoreML.Specification.SizeRange.prototype.upperBound = protobuf.Int64.create(0); + +$root.CoreML.Specification.ImageFeatureType = class ImageFeatureType { + + constructor() { + } + + get SizeFlexibility() { + $root.CoreML.Specification.ImageFeatureType.SizeFlexibilitySet = $root.CoreML.Specification.ImageFeatureType.SizeFlexibilitySet || new Set([ "enumeratedSizes", "imageSizeRange"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.ImageFeatureType.SizeFlexibilitySet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ImageFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.width = reader.int64(); + break; + case 2: + message.height = reader.int64(); + break; + case 21: + message.enumeratedSizes = $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes.decode(reader, reader.uint32()); + break; + case 31: + message.imageSizeRange = $root.CoreML.Specification.ImageFeatureType.ImageSizeRange.decode(reader, reader.uint32()); + break; + case 3: + message.colorSpace = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ImageFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "width": + message.width = reader.int64(); + break; + case "height": + message.height = reader.int64(); + break; + case "enumeratedSizes": + message.enumeratedSizes = $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes.decodeText(reader); + break; + case "imageSizeRange": + message.imageSizeRange = $root.CoreML.Specification.ImageFeatureType.ImageSizeRange.decodeText(reader); + break; + case "colorSpace": + message.colorSpace = reader.enum($root.CoreML.Specification.ImageFeatureType.ColorSpace); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ImageFeatureType.prototype.width = protobuf.Int64.create(0); +$root.CoreML.Specification.ImageFeatureType.prototype.height = protobuf.Int64.create(0); +$root.CoreML.Specification.ImageFeatureType.prototype.colorSpace = 0; + +$root.CoreML.Specification.ImageFeatureType.ColorSpace = { + "INVALID_COLOR_SPACE": 0, + "GRAYSCALE": 10, + "RGB": 20, + "BGR": 30, + "GRAYSCALE_FLOAT16": 40 +}; + +$root.CoreML.Specification.ImageFeatureType.ImageSize = class ImageSize { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ImageFeatureType.ImageSize(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.width = reader.uint64(); + break; + case 2: + message.height = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ImageFeatureType.ImageSize(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "width": + message.width = reader.uint64(); + break; + case "height": + message.height = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ImageFeatureType.ImageSize.prototype.width = protobuf.Uint64.create(0); +$root.CoreML.Specification.ImageFeatureType.ImageSize.prototype.height = protobuf.Uint64.create(0); + +$root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes = class EnumeratedImageSizes { + + constructor() { + this.sizes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sizes.push($root.CoreML.Specification.ImageFeatureType.ImageSize.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sizes": + message.sizes.push($root.CoreML.Specification.ImageFeatureType.ImageSize.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ImageFeatureType.ImageSizeRange = class ImageSizeRange { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ImageFeatureType.ImageSizeRange(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.widthRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + case 2: + message.heightRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ImageFeatureType.ImageSizeRange(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "widthRange": + message.widthRange = $root.CoreML.Specification.SizeRange.decodeText(reader); + break; + case "heightRange": + message.heightRange = $root.CoreML.Specification.SizeRange.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ImageFeatureType.ImageSizeRange.prototype.widthRange = null; +$root.CoreML.Specification.ImageFeatureType.ImageSizeRange.prototype.heightRange = null; + +$root.CoreML.Specification.ArrayFeatureType = class ArrayFeatureType { + + constructor() { + this.shape = []; + } + + get ShapeFlexibility() { + $root.CoreML.Specification.ArrayFeatureType.ShapeFlexibilitySet = $root.CoreML.Specification.ArrayFeatureType.ShapeFlexibilitySet || new Set([ "enumeratedShapes", "shapeRange"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.ArrayFeatureType.ShapeFlexibilitySet.has(key) && this[key] != null); + } + + get defaultOptionalValue() { + $root.CoreML.Specification.ArrayFeatureType.defaultOptionalValueSet = $root.CoreML.Specification.ArrayFeatureType.defaultOptionalValueSet || new Set([ "intDefaultValue", "floatDefaultValue", "doubleDefaultValue"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.ArrayFeatureType.defaultOptionalValueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArrayFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.int64(), tag); + break; + case 2: + message.dataType = reader.int32(); + break; + case 21: + message.enumeratedShapes = $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes.decode(reader, reader.uint32()); + break; + case 31: + message.shapeRange = $root.CoreML.Specification.ArrayFeatureType.ShapeRange.decode(reader, reader.uint32()); + break; + case 41: + message.intDefaultValue = reader.int32(); + break; + case 51: + message.floatDefaultValue = reader.float(); + break; + case 61: + message.doubleDefaultValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArrayFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.int64()); + break; + case "dataType": + message.dataType = reader.enum($root.CoreML.Specification.ArrayFeatureType.ArrayDataType); + break; + case "enumeratedShapes": + message.enumeratedShapes = $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes.decodeText(reader); + break; + case "shapeRange": + message.shapeRange = $root.CoreML.Specification.ArrayFeatureType.ShapeRange.decodeText(reader); + break; + case "intDefaultValue": + message.intDefaultValue = reader.int32(); + break; + case "floatDefaultValue": + message.floatDefaultValue = reader.float(); + break; + case "doubleDefaultValue": + message.doubleDefaultValue = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArrayFeatureType.prototype.dataType = 0; + +$root.CoreML.Specification.ArrayFeatureType.ArrayDataType = { + "INVALID_ARRAY_DATA_TYPE": 0, + "FLOAT32": 65568, + "DOUBLE": 65600, + "INT32": 131104, + "FLOAT16": 65552 +}; + +$root.CoreML.Specification.ArrayFeatureType.Shape = class Shape { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArrayFeatureType.Shape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArrayFeatureType.Shape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes = class EnumeratedShapes { + + constructor() { + this.shapes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapes.push($root.CoreML.Specification.ArrayFeatureType.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shapes": + message.shapes.push($root.CoreML.Specification.ArrayFeatureType.Shape.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArrayFeatureType.ShapeRange = class ShapeRange { + + constructor() { + this.sizeRanges = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArrayFeatureType.ShapeRange(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sizeRanges.push($root.CoreML.Specification.SizeRange.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArrayFeatureType.ShapeRange(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sizeRanges": + message.sizeRanges.push($root.CoreML.Specification.SizeRange.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DictionaryFeatureType = class DictionaryFeatureType { + + constructor() { + } + + get KeyType() { + $root.CoreML.Specification.DictionaryFeatureType.KeyTypeSet = $root.CoreML.Specification.DictionaryFeatureType.KeyTypeSet || new Set([ "int64KeyType", "stringKeyType"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.DictionaryFeatureType.KeyTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DictionaryFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64KeyType = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 2: + message.stringKeyType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DictionaryFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "int64KeyType": + message.int64KeyType = $root.CoreML.Specification.Int64FeatureType.decodeText(reader); + break; + case "stringKeyType": + message.stringKeyType = $root.CoreML.Specification.StringFeatureType.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SequenceFeatureType = class SequenceFeatureType { + + constructor() { + } + + get Type() { + $root.CoreML.Specification.SequenceFeatureType.TypeSet = $root.CoreML.Specification.SequenceFeatureType.TypeSet || new Set([ "int64Type", "stringType"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.SequenceFeatureType.TypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SequenceFeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 3: + message.stringType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + case 101: + message.sizeRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SequenceFeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "int64Type": + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decodeText(reader); + break; + case "stringType": + message.stringType = $root.CoreML.Specification.StringFeatureType.decodeText(reader); + break; + case "sizeRange": + message.sizeRange = $root.CoreML.Specification.SizeRange.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SequenceFeatureType.prototype.sizeRange = null; + +$root.CoreML.Specification.FeatureType = class FeatureType { + + constructor() { + } + + get Type() { + $root.CoreML.Specification.FeatureType.TypeSet = $root.CoreML.Specification.FeatureType.TypeSet || new Set([ "int64Type", "doubleType", "stringType", "imageType", "multiArrayType", "dictionaryType", "sequenceType"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.FeatureType.TypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FeatureType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 2: + message.doubleType = $root.CoreML.Specification.DoubleFeatureType.decode(reader, reader.uint32()); + break; + case 3: + message.stringType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + case 4: + message.imageType = $root.CoreML.Specification.ImageFeatureType.decode(reader, reader.uint32()); + break; + case 5: + message.multiArrayType = $root.CoreML.Specification.ArrayFeatureType.decode(reader, reader.uint32()); + break; + case 6: + message.dictionaryType = $root.CoreML.Specification.DictionaryFeatureType.decode(reader, reader.uint32()); + break; + case 7: + message.sequenceType = $root.CoreML.Specification.SequenceFeatureType.decode(reader, reader.uint32()); + break; + case 1000: + message.isOptional = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FeatureType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "int64Type": + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decodeText(reader); + break; + case "doubleType": + message.doubleType = $root.CoreML.Specification.DoubleFeatureType.decodeText(reader); + break; + case "stringType": + message.stringType = $root.CoreML.Specification.StringFeatureType.decodeText(reader); + break; + case "imageType": + message.imageType = $root.CoreML.Specification.ImageFeatureType.decodeText(reader); + break; + case "multiArrayType": + message.multiArrayType = $root.CoreML.Specification.ArrayFeatureType.decodeText(reader); + break; + case "dictionaryType": + message.dictionaryType = $root.CoreML.Specification.DictionaryFeatureType.decodeText(reader); + break; + case "sequenceType": + message.sequenceType = $root.CoreML.Specification.SequenceFeatureType.decodeText(reader); + break; + case "isOptional": + message.isOptional = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FeatureType.prototype.isOptional = false; + +$root.CoreML.Specification.ArrayFeatureExtractor = class ArrayFeatureExtractor { + + constructor() { + this.extractIndex = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArrayFeatureExtractor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.extractIndex = reader.array(message.extractIndex, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArrayFeatureExtractor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "extractIndex": + reader.array(message.extractIndex, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BayesianProbitRegressor = class BayesianProbitRegressor { + + constructor() { + this.features = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfFeatures = reader.uint32(); + break; + case 2: + message.bias = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decode(reader, reader.uint32()); + break; + case 3: + message.features.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight.decode(reader, reader.uint32())); + break; + case 10: + message.regressionInputFeatureName = reader.string(); + break; + case 11: + message.optimismInputFeatureName = reader.string(); + break; + case 12: + message.samplingScaleInputFeatureName = reader.string(); + break; + case 13: + message.samplingTruncationInputFeatureName = reader.string(); + break; + case 20: + message.meanOutputFeatureName = reader.string(); + break; + case 21: + message.varianceOutputFeatureName = reader.string(); + break; + case 22: + message.pessimisticProbabilityOutputFeatureName = reader.string(); + break; + case 23: + message.sampledProbabilityOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "numberOfFeatures": + message.numberOfFeatures = reader.uint32(); + break; + case "bias": + message.bias = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decodeText(reader); + break; + case "features": + message.features.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight.decodeText(reader)); + break; + case "regressionInputFeatureName": + message.regressionInputFeatureName = reader.string(); + break; + case "optimismInputFeatureName": + message.optimismInputFeatureName = reader.string(); + break; + case "samplingScaleInputFeatureName": + message.samplingScaleInputFeatureName = reader.string(); + break; + case "samplingTruncationInputFeatureName": + message.samplingTruncationInputFeatureName = reader.string(); + break; + case "meanOutputFeatureName": + message.meanOutputFeatureName = reader.string(); + break; + case "varianceOutputFeatureName": + message.varianceOutputFeatureName = reader.string(); + break; + case "pessimisticProbabilityOutputFeatureName": + message.pessimisticProbabilityOutputFeatureName = reader.string(); + break; + case "sampledProbabilityOutputFeatureName": + message.sampledProbabilityOutputFeatureName = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BayesianProbitRegressor.prototype.numberOfFeatures = 0; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.bias = null; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.regressionInputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.optimismInputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.samplingScaleInputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.samplingTruncationInputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.meanOutputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.varianceOutputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.pessimisticProbabilityOutputFeatureName = ""; +$root.CoreML.Specification.BayesianProbitRegressor.prototype.sampledProbabilityOutputFeatureName = ""; + +$root.CoreML.Specification.BayesianProbitRegressor.Gaussian = class Gaussian { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.Gaussian(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mean = reader.double(); + break; + case 2: + message.precision = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.Gaussian(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mean": + message.mean = reader.double(); + break; + case "precision": + message.precision = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BayesianProbitRegressor.Gaussian.prototype.mean = 0; +$root.CoreML.Specification.BayesianProbitRegressor.Gaussian.prototype.precision = 0; + +$root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight = class FeatureValueWeight { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureValue = reader.uint32(); + break; + case 2: + message.featureWeight = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "featureValue": + message.featureValue = reader.uint32(); + break; + case "featureWeight": + message.featureWeight = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.prototype.featureValue = 0; +$root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.prototype.featureWeight = null; + +$root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight = class FeatureWeight { + + constructor() { + this.weights = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureId = reader.uint32(); + break; + case 2: + message.weights.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "featureId": + message.featureId = reader.uint32(); + break; + case "weights": + message.weights.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight.prototype.featureId = 0; + +$root.CoreML.Specification.CategoricalMapping = class CategoricalMapping { + + constructor() { + } + + get MappingType() { + $root.CoreML.Specification.CategoricalMapping.MappingTypeSet = $root.CoreML.Specification.CategoricalMapping.MappingTypeSet || new Set([ "stringToInt64Map", "int64ToStringMap"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CategoricalMapping.MappingTypeSet.has(key) && this[key] != null); + } + + get ValueOnUnknown() { + $root.CoreML.Specification.CategoricalMapping.ValueOnUnknownSet = $root.CoreML.Specification.CategoricalMapping.ValueOnUnknownSet || new Set([ "strValue", "int64Value"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CategoricalMapping.ValueOnUnknownSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CategoricalMapping(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringToInt64Map = $root.CoreML.Specification.StringToInt64Map.decode(reader, reader.uint32()); + break; + case 2: + message.int64ToStringMap = $root.CoreML.Specification.Int64ToStringMap.decode(reader, reader.uint32()); + break; + case 101: + message.strValue = reader.string(); + break; + case 102: + message.int64Value = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CategoricalMapping(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "stringToInt64Map": + message.stringToInt64Map = $root.CoreML.Specification.StringToInt64Map.decodeText(reader); + break; + case "int64ToStringMap": + message.int64ToStringMap = $root.CoreML.Specification.Int64ToStringMap.decodeText(reader); + break; + case "strValue": + message.strValue = reader.string(); + break; + case "int64Value": + message.int64Value = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CustomModel = class CustomModel { + + constructor() { + this.parameters = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CustomModel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.className = reader.string(); + break; + case 30: + reader.entry(message.parameters, () => reader.string(), () => $root.CoreML.Specification.CustomModel.CustomModelParamValue.decode(reader, reader.uint32())); + break; + case 40: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CustomModel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "className": + message.className = reader.string(); + break; + case "parameters": + reader.entry(message.parameters, () => reader.string(), () => $root.CoreML.Specification.CustomModel.CustomModelParamValue.decodeText(reader)); + break; + case "description": + message.description = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CustomModel.prototype.className = ""; +$root.CoreML.Specification.CustomModel.prototype.description = ""; + +$root.CoreML.Specification.CustomModel.CustomModelParamValue = class CustomModelParamValue { + + constructor() { + } + + get value() { + $root.CoreML.Specification.CustomModel.CustomModelParamValue.valueSet = $root.CoreML.Specification.CustomModel.CustomModelParamValue.valueSet || new Set([ "doubleValue", "stringValue", "intValue", "longValue", "boolValue", "bytesValue"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CustomModel.CustomModelParamValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CustomModel.CustomModelParamValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.doubleValue = reader.double(); + break; + case 20: + message.stringValue = reader.string(); + break; + case 30: + message.intValue = reader.int32(); + break; + case 40: + message.longValue = reader.int64(); + break; + case 50: + message.boolValue = reader.bool(); + break; + case 60: + message.bytesValue = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CustomModel.CustomModelParamValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "doubleValue": + message.doubleValue = reader.double(); + break; + case "stringValue": + message.stringValue = reader.string(); + break; + case "intValue": + message.intValue = reader.int32(); + break; + case "longValue": + message.longValue = reader.int64(); + break; + case "boolValue": + message.boolValue = reader.bool(); + break; + case "bytesValue": + message.bytesValue = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DictVectorizer = class DictVectorizer { + + constructor() { + } + + get Map() { + $root.CoreML.Specification.DictVectorizer.MapSet = $root.CoreML.Specification.DictVectorizer.MapSet || new Set([ "stringToIndex", "int64ToIndex"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.DictVectorizer.MapSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DictVectorizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringToIndex = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 2: + message.int64ToIndex = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DictVectorizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "stringToIndex": + message.stringToIndex = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ToIndex": + message.int64ToIndex = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FeatureVectorizer = class FeatureVectorizer { + + constructor() { + this.inputList = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FeatureVectorizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputList.push($root.CoreML.Specification.FeatureVectorizer.InputColumn.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FeatureVectorizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputList": + message.inputList.push($root.CoreML.Specification.FeatureVectorizer.InputColumn.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FeatureVectorizer.InputColumn = class InputColumn { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FeatureVectorizer.InputColumn(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputColumn = reader.string(); + break; + case 2: + message.inputDimensions = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FeatureVectorizer.InputColumn(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputColumn": + message.inputColumn = reader.string(); + break; + case "inputDimensions": + message.inputDimensions = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FeatureVectorizer.InputColumn.prototype.inputColumn = ""; +$root.CoreML.Specification.FeatureVectorizer.InputColumn.prototype.inputDimensions = protobuf.Uint64.create(0); + +$root.CoreML.Specification.GLMRegressor = class GLMRegressor { + + constructor() { + this.weights = []; + this.offset = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GLMRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.weights.push($root.CoreML.Specification.GLMRegressor.DoubleArray.decode(reader, reader.uint32())); + break; + case 2: + message.offset = reader.doubles(message.offset, tag); + break; + case 3: + message.postEvaluationTransform = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GLMRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "weights": + message.weights.push($root.CoreML.Specification.GLMRegressor.DoubleArray.decodeText(reader)); + break; + case "offset": + reader.array(message.offset, () => reader.double()); + break; + case "postEvaluationTransform": + message.postEvaluationTransform = reader.enum($root.CoreML.Specification.GLMRegressor.PostEvaluationTransform); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GLMRegressor.prototype.postEvaluationTransform = 0; + +$root.CoreML.Specification.GLMRegressor.DoubleArray = class DoubleArray { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GLMRegressor.DoubleArray(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.doubles(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GLMRegressor.DoubleArray(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GLMRegressor.PostEvaluationTransform = { + "NoTransform": 0, + "Logit": 1, + "Probit": 2 +}; + +$root.CoreML.Specification.GLMClassifier = class GLMClassifier { + + constructor() { + this.weights = []; + this.offset = []; + } + + get ClassLabels() { + $root.CoreML.Specification.GLMClassifier.ClassLabelsSet = $root.CoreML.Specification.GLMClassifier.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.GLMClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GLMClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.weights.push($root.CoreML.Specification.GLMClassifier.DoubleArray.decode(reader, reader.uint32())); + break; + case 2: + message.offset = reader.doubles(message.offset, tag); + break; + case 3: + message.postEvaluationTransform = reader.int32(); + break; + case 4: + message.classEncoding = reader.int32(); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GLMClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "weights": + message.weights.push($root.CoreML.Specification.GLMClassifier.DoubleArray.decodeText(reader)); + break; + case "offset": + reader.array(message.offset, () => reader.double()); + break; + case "postEvaluationTransform": + message.postEvaluationTransform = reader.enum($root.CoreML.Specification.GLMClassifier.PostEvaluationTransform); + break; + case "classEncoding": + message.classEncoding = reader.enum($root.CoreML.Specification.GLMClassifier.ClassEncoding); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GLMClassifier.prototype.postEvaluationTransform = 0; +$root.CoreML.Specification.GLMClassifier.prototype.classEncoding = 0; + +$root.CoreML.Specification.GLMClassifier.DoubleArray = class DoubleArray { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GLMClassifier.DoubleArray(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.doubles(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GLMClassifier.DoubleArray(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GLMClassifier.PostEvaluationTransform = { + "Logit": 0, + "Probit": 1 +}; + +$root.CoreML.Specification.GLMClassifier.ClassEncoding = { + "ReferenceClass": 0, + "OneVsRest": 1 +}; + +$root.CoreML.Specification.KNearestNeighborsClassifier = class KNearestNeighborsClassifier { + + constructor() { + } + + get ClassLabels() { + $root.CoreML.Specification.KNearestNeighborsClassifier.ClassLabelsSet = $root.CoreML.Specification.KNearestNeighborsClassifier.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.KNearestNeighborsClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + get DefaultClassLabel() { + $root.CoreML.Specification.KNearestNeighborsClassifier.DefaultClassLabelSet = $root.CoreML.Specification.KNearestNeighborsClassifier.DefaultClassLabelSet || new Set([ "defaultStringLabel", "defaultInt64Label"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.KNearestNeighborsClassifier.DefaultClassLabelSet.has(key) && this[key] != null); + } + + get WeightingScheme() { + $root.CoreML.Specification.KNearestNeighborsClassifier.WeightingSchemeSet = $root.CoreML.Specification.KNearestNeighborsClassifier.WeightingSchemeSet || new Set([ "uniformWeighting", "inverseDistanceWeighting"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.KNearestNeighborsClassifier.WeightingSchemeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.KNearestNeighborsClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nearestNeighborsIndex = $root.CoreML.Specification.NearestNeighborsIndex.decode(reader, reader.uint32()); + break; + case 3: + message.numberOfNeighbors = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 110: + message.defaultStringLabel = reader.string(); + break; + case 111: + message.defaultInt64Label = reader.int64(); + break; + case 200: + message.uniformWeighting = $root.CoreML.Specification.UniformWeighting.decode(reader, reader.uint32()); + break; + case 210: + message.inverseDistanceWeighting = $root.CoreML.Specification.InverseDistanceWeighting.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.KNearestNeighborsClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nearestNeighborsIndex": + message.nearestNeighborsIndex = $root.CoreML.Specification.NearestNeighborsIndex.decodeText(reader); + break; + case "numberOfNeighbors": + message.numberOfNeighbors = $root.CoreML.Specification.Int64Parameter.decodeText(reader); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "defaultStringLabel": + message.defaultStringLabel = reader.string(); + break; + case "defaultInt64Label": + message.defaultInt64Label = reader.int64(); + break; + case "uniformWeighting": + message.uniformWeighting = $root.CoreML.Specification.UniformWeighting.decodeText(reader); + break; + case "inverseDistanceWeighting": + message.inverseDistanceWeighting = $root.CoreML.Specification.InverseDistanceWeighting.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.KNearestNeighborsClassifier.prototype.nearestNeighborsIndex = null; +$root.CoreML.Specification.KNearestNeighborsClassifier.prototype.numberOfNeighbors = null; + +$root.CoreML.Specification.NearestNeighborsIndex = class NearestNeighborsIndex { + + constructor() { + this.floatSamples = []; + } + + get IndexType() { + $root.CoreML.Specification.NearestNeighborsIndex.IndexTypeSet = $root.CoreML.Specification.NearestNeighborsIndex.IndexTypeSet || new Set([ "linearIndex", "singleKdTreeIndex"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NearestNeighborsIndex.IndexTypeSet.has(key) && this[key] != null); + } + + get DistanceFunction() { + $root.CoreML.Specification.NearestNeighborsIndex.DistanceFunctionSet = $root.CoreML.Specification.NearestNeighborsIndex.DistanceFunctionSet || new Set([ "squaredEuclideanDistance"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NearestNeighborsIndex.DistanceFunctionSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NearestNeighborsIndex(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfDimensions = reader.int32(); + break; + case 2: + message.floatSamples.push($root.CoreML.Specification.FloatVector.decode(reader, reader.uint32())); + break; + case 100: + message.linearIndex = $root.CoreML.Specification.LinearIndex.decode(reader, reader.uint32()); + break; + case 110: + message.singleKdTreeIndex = $root.CoreML.Specification.SingleKdTreeIndex.decode(reader, reader.uint32()); + break; + case 200: + message.squaredEuclideanDistance = $root.CoreML.Specification.SquaredEuclideanDistance.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NearestNeighborsIndex(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "numberOfDimensions": + message.numberOfDimensions = reader.int32(); + break; + case "floatSamples": + message.floatSamples.push($root.CoreML.Specification.FloatVector.decodeText(reader)); + break; + case "linearIndex": + message.linearIndex = $root.CoreML.Specification.LinearIndex.decodeText(reader); + break; + case "singleKdTreeIndex": + message.singleKdTreeIndex = $root.CoreML.Specification.SingleKdTreeIndex.decodeText(reader); + break; + case "squaredEuclideanDistance": + message.squaredEuclideanDistance = $root.CoreML.Specification.SquaredEuclideanDistance.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NearestNeighborsIndex.prototype.numberOfDimensions = 0; + +$root.CoreML.Specification.UniformWeighting = class UniformWeighting { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.UniformWeighting(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.UniformWeighting(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.InverseDistanceWeighting = class InverseDistanceWeighting { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.InverseDistanceWeighting(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.InverseDistanceWeighting(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LinearIndex = class LinearIndex { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LinearIndex(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LinearIndex(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SingleKdTreeIndex = class SingleKdTreeIndex { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SingleKdTreeIndex(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.leafSize = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SingleKdTreeIndex(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "leafSize": + message.leafSize = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SingleKdTreeIndex.prototype.leafSize = 0; + +$root.CoreML.Specification.SquaredEuclideanDistance = class SquaredEuclideanDistance { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SquaredEuclideanDistance(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SquaredEuclideanDistance(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64Parameter = class Int64Parameter { + + constructor() { + } + + get AllowedValues() { + $root.CoreML.Specification.Int64Parameter.AllowedValuesSet = $root.CoreML.Specification.Int64Parameter.AllowedValuesSet || new Set([ "range", "set"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Int64Parameter.AllowedValuesSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Int64Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.int64(); + break; + case 10: + message.range = $root.CoreML.Specification.Int64Range.decode(reader, reader.uint32()); + break; + case 11: + message.set = $root.CoreML.Specification.Int64Set.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Int64Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "defaultValue": + message.defaultValue = reader.int64(); + break; + case "range": + message.range = $root.CoreML.Specification.Int64Range.decodeText(reader); + break; + case "set": + message.set = $root.CoreML.Specification.Int64Set.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Int64Parameter.prototype.defaultValue = protobuf.Int64.create(0); + +$root.CoreML.Specification.DoubleParameter = class DoubleParameter { + + constructor() { + } + + get AllowedValues() { + $root.CoreML.Specification.DoubleParameter.AllowedValuesSet = $root.CoreML.Specification.DoubleParameter.AllowedValuesSet || new Set([ "range"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.DoubleParameter.AllowedValuesSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DoubleParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.double(); + break; + case 10: + message.range = $root.CoreML.Specification.DoubleRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DoubleParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "defaultValue": + message.defaultValue = reader.double(); + break; + case "range": + message.range = $root.CoreML.Specification.DoubleRange.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DoubleParameter.prototype.defaultValue = 0; + +$root.CoreML.Specification.StringParameter = class StringParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StringParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StringParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "defaultValue": + message.defaultValue = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StringParameter.prototype.defaultValue = ""; + +$root.CoreML.Specification.BoolParameter = class BoolParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BoolParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BoolParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "defaultValue": + message.defaultValue = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BoolParameter.prototype.defaultValue = false; + +$root.CoreML.Specification.Identity = class Identity { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Identity(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Identity(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Imputer = class Imputer { + + constructor() { + } + + get ImputedValue() { + $root.CoreML.Specification.Imputer.ImputedValueSet = $root.CoreML.Specification.Imputer.ImputedValueSet || new Set([ "imputedDoubleValue", "imputedInt64Value", "imputedStringValue", "imputedDoubleArray", "imputedInt64Array", "imputedStringDictionary", "imputedInt64Dictionary"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Imputer.ImputedValueSet.has(key) && this[key] != null); + } + + get ReplaceValue() { + $root.CoreML.Specification.Imputer.ReplaceValueSet = $root.CoreML.Specification.Imputer.ReplaceValueSet || new Set([ "replaceDoubleValue", "replaceInt64Value", "replaceStringValue"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Imputer.ReplaceValueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Imputer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.imputedDoubleValue = reader.double(); + break; + case 2: + message.imputedInt64Value = reader.int64(); + break; + case 3: + message.imputedStringValue = reader.string(); + break; + case 4: + message.imputedDoubleArray = $root.CoreML.Specification.DoubleVector.decode(reader, reader.uint32()); + break; + case 5: + message.imputedInt64Array = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 6: + message.imputedStringDictionary = $root.CoreML.Specification.StringToDoubleMap.decode(reader, reader.uint32()); + break; + case 7: + message.imputedInt64Dictionary = $root.CoreML.Specification.Int64ToDoubleMap.decode(reader, reader.uint32()); + break; + case 11: + message.replaceDoubleValue = reader.double(); + break; + case 12: + message.replaceInt64Value = reader.int64(); + break; + case 13: + message.replaceStringValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Imputer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "imputedDoubleValue": + message.imputedDoubleValue = reader.double(); + break; + case "imputedInt64Value": + message.imputedInt64Value = reader.int64(); + break; + case "imputedStringValue": + message.imputedStringValue = reader.string(); + break; + case "imputedDoubleArray": + message.imputedDoubleArray = $root.CoreML.Specification.DoubleVector.decodeText(reader); + break; + case "imputedInt64Array": + message.imputedInt64Array = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "imputedStringDictionary": + message.imputedStringDictionary = $root.CoreML.Specification.StringToDoubleMap.decodeText(reader); + break; + case "imputedInt64Dictionary": + message.imputedInt64Dictionary = $root.CoreML.Specification.Int64ToDoubleMap.decodeText(reader); + break; + case "replaceDoubleValue": + message.replaceDoubleValue = reader.double(); + break; + case "replaceInt64Value": + message.replaceInt64Value = reader.int64(); + break; + case "replaceStringValue": + message.replaceStringValue = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec = {}; + +$root.CoreML.Specification.MILSpec.Program = class Program { + + constructor() { + this.functions = {}; + this.attributes = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Program(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int64(); + break; + case 2: + reader.entry(message.functions, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Function.decode(reader, reader.uint32())); + break; + case 3: + message.docString = reader.string(); + break; + case 4: + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Program(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int64(); + break; + case "functions": + reader.entry(message.functions, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Function.decodeText(reader)); + break; + case "docString": + message.docString = reader.string(); + break; + case "attributes": + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Program.prototype.version = protobuf.Int64.create(0); +$root.CoreML.Specification.MILSpec.Program.prototype.docString = ""; + +$root.CoreML.Specification.MILSpec.Function = class Function { + + constructor() { + this.inputs = []; + this.block_specializations = {}; + this.attributes = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Function(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decode(reader, reader.uint32())); + break; + case 2: + message.opset = reader.string(); + break; + case 3: + reader.entry(message.block_specializations, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Block.decode(reader, reader.uint32())); + break; + case 4: + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Function(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputs": + message.inputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decodeText(reader)); + break; + case "opset": + message.opset = reader.string(); + break; + case "block_specializations": + reader.entry(message.block_specializations, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Block.decodeText(reader)); + break; + case "attributes": + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Function.prototype.opset = ""; + +$root.CoreML.Specification.MILSpec.Block = class Block { + + constructor() { + this.inputs = []; + this.outputs = []; + this.operations = []; + this.attributes = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Block(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decode(reader, reader.uint32())); + break; + case 2: + message.outputs.push(reader.string()); + break; + case 3: + message.operations.push($root.CoreML.Specification.MILSpec.Operation.decode(reader, reader.uint32())); + break; + case 4: + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Block(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputs": + message.inputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decodeText(reader)); + break; + case "outputs": + reader.array(message.outputs, () => reader.string()); + break; + case "operations": + message.operations.push($root.CoreML.Specification.MILSpec.Operation.decodeText(reader)); + break; + case "attributes": + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Argument = class Argument { + + constructor() { + this["arguments"] = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Argument(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message["arguments"].push($root.CoreML.Specification.MILSpec.Argument.Binding.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Argument(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "arguments": + message["arguments"].push($root.CoreML.Specification.MILSpec.Argument.Binding.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Argument.Binding = class Binding { + + constructor() { + } + + get binding() { + $root.CoreML.Specification.MILSpec.Argument.Binding.bindingSet = $root.CoreML.Specification.MILSpec.Argument.Binding.bindingSet || new Set([ "name", "value"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.Argument.Binding.bindingSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Argument.Binding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.value = $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Argument.Binding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "value": + message.value = $root.CoreML.Specification.MILSpec.Value.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Operation = class Operation { + + constructor() { + this.inputs = {}; + this.outputs = []; + this.blocks = []; + this.attributes = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Operation(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.string(); + break; + case 2: + reader.entry(message.inputs, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Argument.decode(reader, reader.uint32())); + break; + case 3: + message.outputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decode(reader, reader.uint32())); + break; + case 4: + message.blocks.push($root.CoreML.Specification.MILSpec.Block.decode(reader, reader.uint32())); + break; + case 5: + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Operation(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "inputs": + reader.entry(message.inputs, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Argument.decodeText(reader)); + break; + case "outputs": + message.outputs.push($root.CoreML.Specification.MILSpec.NamedValueType.decodeText(reader)); + break; + case "blocks": + message.blocks.push($root.CoreML.Specification.MILSpec.Block.decodeText(reader)); + break; + case "attributes": + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Operation.prototype.type = ""; + +$root.CoreML.Specification.MILSpec.NamedValueType = class NamedValueType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.NamedValueType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = $root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.NamedValueType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = $root.CoreML.Specification.MILSpec.ValueType.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.NamedValueType.prototype.name = ""; +$root.CoreML.Specification.MILSpec.NamedValueType.prototype.type = null; + +$root.CoreML.Specification.MILSpec.ValueType = class ValueType { + + constructor() { + } + + get type() { + $root.CoreML.Specification.MILSpec.ValueType.typeSet = $root.CoreML.Specification.MILSpec.ValueType.typeSet || new Set([ "tensorType", "listType", "tupleType", "dictionaryType"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.ValueType.typeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.ValueType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensorType = $root.CoreML.Specification.MILSpec.TensorType.decode(reader, reader.uint32()); + break; + case 2: + message.listType = $root.CoreML.Specification.MILSpec.ListType.decode(reader, reader.uint32()); + break; + case 3: + message.tupleType = $root.CoreML.Specification.MILSpec.TupleType.decode(reader, reader.uint32()); + break; + case 4: + message.dictionaryType = $root.CoreML.Specification.MILSpec.DictionaryType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.ValueType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensorType": + message.tensorType = $root.CoreML.Specification.MILSpec.TensorType.decodeText(reader); + break; + case "listType": + message.listType = $root.CoreML.Specification.MILSpec.ListType.decodeText(reader); + break; + case "tupleType": + message.tupleType = $root.CoreML.Specification.MILSpec.TupleType.decodeText(reader); + break; + case "dictionaryType": + message.dictionaryType = $root.CoreML.Specification.MILSpec.DictionaryType.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.DataType = { + "UNUSED_TYPE": 0, + "BOOL": 1, + "STRING": 2, + "FLOAT16": 10, + "FLOAT32": 11, + "FLOAT64": 12, + "BFLOAT16": 13, + "INT8": 21, + "INT16": 22, + "INT32": 23, + "INT64": 24, + "UINT8": 31, + "UINT16": 32, + "UINT32": 33, + "UINT64": 34 +}; + +$root.CoreML.Specification.MILSpec.TensorType = class TensorType { + + constructor() { + this.dimensions = []; + this.attributes = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataType = reader.int32(); + break; + case 2: + message.rank = reader.int64(); + break; + case 3: + message.dimensions.push($root.CoreML.Specification.MILSpec.Dimension.decode(reader, reader.uint32())); + break; + case 4: + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dataType": + message.dataType = reader.enum($root.CoreML.Specification.MILSpec.DataType); + break; + case "rank": + message.rank = reader.int64(); + break; + case "dimensions": + message.dimensions.push($root.CoreML.Specification.MILSpec.Dimension.decodeText(reader)); + break; + case "attributes": + reader.entry(message.attributes, () => reader.string(), () => $root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorType.prototype.dataType = 0; +$root.CoreML.Specification.MILSpec.TensorType.prototype.rank = protobuf.Int64.create(0); + +$root.CoreML.Specification.MILSpec.TupleType = class TupleType { + + constructor() { + this.types = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TupleType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.types.push($root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TupleType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "types": + message.types.push($root.CoreML.Specification.MILSpec.ValueType.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.ListType = class ListType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.ListType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = $root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32()); + break; + case 2: + message.length = $root.CoreML.Specification.MILSpec.Dimension.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.ListType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = $root.CoreML.Specification.MILSpec.ValueType.decodeText(reader); + break; + case "length": + message.length = $root.CoreML.Specification.MILSpec.Dimension.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.ListType.prototype.type = null; +$root.CoreML.Specification.MILSpec.ListType.prototype.length = null; + +$root.CoreML.Specification.MILSpec.DictionaryType = class DictionaryType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.keyType = $root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32()); + break; + case 2: + message.valueType = $root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "keyType": + message.keyType = $root.CoreML.Specification.MILSpec.ValueType.decodeText(reader); + break; + case "valueType": + message.valueType = $root.CoreML.Specification.MILSpec.ValueType.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.DictionaryType.prototype.keyType = null; +$root.CoreML.Specification.MILSpec.DictionaryType.prototype.valueType = null; + +$root.CoreML.Specification.MILSpec.Dimension = class Dimension { + + constructor() { + } + + get dimension() { + $root.CoreML.Specification.MILSpec.Dimension.dimensionSet = $root.CoreML.Specification.MILSpec.Dimension.dimensionSet || new Set([ "constant", "unknown"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.Dimension.dimensionSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Dimension(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.constant = $root.CoreML.Specification.MILSpec.Dimension.ConstantDimension.decode(reader, reader.uint32()); + break; + case 2: + message.unknown = $root.CoreML.Specification.MILSpec.Dimension.UnknownDimension.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Dimension(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "constant": + message.constant = $root.CoreML.Specification.MILSpec.Dimension.ConstantDimension.decodeText(reader); + break; + case "unknown": + message.unknown = $root.CoreML.Specification.MILSpec.Dimension.UnknownDimension.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Dimension.ConstantDimension = class ConstantDimension { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Dimension.ConstantDimension(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Dimension.ConstantDimension(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "size": + message.size = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Dimension.ConstantDimension.prototype.size = protobuf.Uint64.create(0); + +$root.CoreML.Specification.MILSpec.Dimension.UnknownDimension = class UnknownDimension { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Dimension.UnknownDimension(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variadic = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Dimension.UnknownDimension(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variadic": + message.variadic = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Dimension.UnknownDimension.prototype.variadic = false; + +$root.CoreML.Specification.MILSpec.Value = class Value { + + constructor() { + } + + get value() { + $root.CoreML.Specification.MILSpec.Value.valueSet = $root.CoreML.Specification.MILSpec.Value.valueSet || new Set([ "immediateValue", "blobFileValue"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.Value.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Value(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.docString = reader.string(); + break; + case 2: + message.type = $root.CoreML.Specification.MILSpec.ValueType.decode(reader, reader.uint32()); + break; + case 3: + message.immediateValue = $root.CoreML.Specification.MILSpec.Value.ImmediateValue.decode(reader, reader.uint32()); + break; + case 5: + message.blobFileValue = $root.CoreML.Specification.MILSpec.Value.BlobFileValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Value(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "docString": + message.docString = reader.string(); + break; + case "type": + message.type = $root.CoreML.Specification.MILSpec.ValueType.decodeText(reader); + break; + case "immediateValue": + message.immediateValue = $root.CoreML.Specification.MILSpec.Value.ImmediateValue.decodeText(reader); + break; + case "blobFileValue": + message.blobFileValue = $root.CoreML.Specification.MILSpec.Value.BlobFileValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Value.prototype.docString = ""; +$root.CoreML.Specification.MILSpec.Value.prototype.type = null; + +$root.CoreML.Specification.MILSpec.Value.ImmediateValue = class ImmediateValue { + + constructor() { + } + + get value() { + $root.CoreML.Specification.MILSpec.Value.ImmediateValue.valueSet = $root.CoreML.Specification.MILSpec.Value.ImmediateValue.valueSet || new Set([ "tensor", "tuple", "list", "dictionary"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.Value.ImmediateValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Value.ImmediateValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor = $root.CoreML.Specification.MILSpec.TensorValue.decode(reader, reader.uint32()); + break; + case 2: + message.tuple = $root.CoreML.Specification.MILSpec.TupleValue.decode(reader, reader.uint32()); + break; + case 3: + message.list = $root.CoreML.Specification.MILSpec.ListValue.decode(reader, reader.uint32()); + break; + case 4: + message.dictionary = $root.CoreML.Specification.MILSpec.DictionaryValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Value.ImmediateValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor": + message.tensor = $root.CoreML.Specification.MILSpec.TensorValue.decodeText(reader); + break; + case "tuple": + message.tuple = $root.CoreML.Specification.MILSpec.TupleValue.decodeText(reader); + break; + case "list": + message.list = $root.CoreML.Specification.MILSpec.ListValue.decodeText(reader); + break; + case "dictionary": + message.dictionary = $root.CoreML.Specification.MILSpec.DictionaryValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Value.BlobFileValue = class BlobFileValue { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.Value.BlobFileValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fileName = reader.string(); + break; + case 2: + message.offset = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.Value.BlobFileValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "fileName": + message.fileName = reader.string(); + break; + case "offset": + message.offset = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.Value.BlobFileValue.prototype.fileName = ""; +$root.CoreML.Specification.MILSpec.Value.BlobFileValue.prototype.offset = protobuf.Uint64.create(0); + +$root.CoreML.Specification.MILSpec.TensorValue = class TensorValue { + + constructor() { + } + + get value() { + $root.CoreML.Specification.MILSpec.TensorValue.valueSet = $root.CoreML.Specification.MILSpec.TensorValue.valueSet || new Set([ "floats", "ints", "bools", "strings", "longInts", "doubles", "bytes"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.MILSpec.TensorValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.floats = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedFloats.decode(reader, reader.uint32()); + break; + case 2: + message.ints = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedInts.decode(reader, reader.uint32()); + break; + case 3: + message.bools = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBools.decode(reader, reader.uint32()); + break; + case 4: + message.strings = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedStrings.decode(reader, reader.uint32()); + break; + case 5: + message.longInts = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts.decode(reader, reader.uint32()); + break; + case 6: + message.doubles = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles.decode(reader, reader.uint32()); + break; + case 7: + message.bytes = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "floats": + message.floats = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedFloats.decodeText(reader); + break; + case "ints": + message.ints = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedInts.decodeText(reader); + break; + case "bools": + message.bools = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBools.decodeText(reader); + break; + case "strings": + message.strings = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedStrings.decodeText(reader); + break; + case "longInts": + message.longInts = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts.decodeText(reader); + break; + case "doubles": + message.doubles = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles.decodeText(reader); + break; + case "bytes": + message.bytes = $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedFloats = class RepeatedFloats { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedFloats(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.floats(message.values, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedFloats(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles = class RepeatedDoubles { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.doubles(message.values, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedInts = class RepeatedInts { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedInts(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.array(message.values, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedInts(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts = class RepeatedLongInts { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.array(message.values, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedBools = class RepeatedBools { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBools(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.array(message.values, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBools(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedStrings = class RepeatedStrings { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedStrings(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedStrings(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes = class RepeatedBytes { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.TensorValue.RepeatedBytes.prototype.values = new Uint8Array([]); + +$root.CoreML.Specification.MILSpec.TupleValue = class TupleValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.TupleValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push($root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.TupleValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values.push($root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.ListValue = class ListValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.ListValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push($root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.ListValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values.push($root.CoreML.Specification.MILSpec.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.DictionaryValue = class DictionaryValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push($root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values.push($root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair = class KeyValuePair { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32()); + break; + case 2: + message.value = $root.CoreML.Specification.MILSpec.Value.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = $root.CoreML.Specification.MILSpec.Value.decodeText(reader); + break; + case "value": + message.value = $root.CoreML.Specification.MILSpec.Value.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.prototype.key = null; +$root.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.prototype.value = null; + +$root.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping = { + "RANK5_ARRAY_MAPPING": 0, + "EXACT_ARRAY_MAPPING": 1 +}; + +$root.CoreML.Specification.NeuralNetworkImageShapeMapping = { + "RANK5_IMAGE_MAPPING": 0, + "RANK4_IMAGE_MAPPING": 1 +}; + +$root.CoreML.Specification.NeuralNetwork = class NeuralNetwork { + + constructor() { + this.layers = []; + this.preprocessing = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetwork(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetwork(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "layers": + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decodeText(reader)); + break; + case "preprocessing": + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decodeText(reader)); + break; + case "arrayInputShapeMapping": + message.arrayInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping); + break; + case "imageInputShapeMapping": + message.imageInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkImageShapeMapping); + break; + case "updateParams": + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetwork.prototype.arrayInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetwork.prototype.imageInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetwork.prototype.updateParams = null; + +$root.CoreML.Specification.NeuralNetworkImageScaler = class NeuralNetworkImageScaler { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkImageScaler(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.channelScale = reader.float(); + break; + case 20: + message.blueBias = reader.float(); + break; + case 21: + message.greenBias = reader.float(); + break; + case 22: + message.redBias = reader.float(); + break; + case 30: + message.grayBias = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkImageScaler(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "channelScale": + message.channelScale = reader.float(); + break; + case "blueBias": + message.blueBias = reader.float(); + break; + case "greenBias": + message.greenBias = reader.float(); + break; + case "redBias": + message.redBias = reader.float(); + break; + case "grayBias": + message.grayBias = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkImageScaler.prototype.channelScale = 0; +$root.CoreML.Specification.NeuralNetworkImageScaler.prototype.blueBias = 0; +$root.CoreML.Specification.NeuralNetworkImageScaler.prototype.greenBias = 0; +$root.CoreML.Specification.NeuralNetworkImageScaler.prototype.redBias = 0; +$root.CoreML.Specification.NeuralNetworkImageScaler.prototype.grayBias = 0; + +$root.CoreML.Specification.NeuralNetworkMeanImage = class NeuralNetworkMeanImage { + + constructor() { + this.meanImage = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkMeanImage(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meanImage = reader.floats(message.meanImage, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkMeanImage(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "meanImage": + reader.array(message.meanImage, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkPreprocessing = class NeuralNetworkPreprocessing { + + constructor() { + } + + get preprocessor() { + $root.CoreML.Specification.NeuralNetworkPreprocessing.preprocessorSet = $root.CoreML.Specification.NeuralNetworkPreprocessing.preprocessorSet || new Set([ "scaler", "meanImage"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NeuralNetworkPreprocessing.preprocessorSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkPreprocessing(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureName = reader.string(); + break; + case 10: + message.scaler = $root.CoreML.Specification.NeuralNetworkImageScaler.decode(reader, reader.uint32()); + break; + case 11: + message.meanImage = $root.CoreML.Specification.NeuralNetworkMeanImage.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkPreprocessing(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "featureName": + message.featureName = reader.string(); + break; + case "scaler": + message.scaler = $root.CoreML.Specification.NeuralNetworkImageScaler.decodeText(reader); + break; + case "meanImage": + message.meanImage = $root.CoreML.Specification.NeuralNetworkMeanImage.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkPreprocessing.prototype.featureName = ""; + +$root.CoreML.Specification.ActivationReLU = class ActivationReLU { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationReLU(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationReLU(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationLeakyReLU = class ActivationLeakyReLU { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationLeakyReLU(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationLeakyReLU(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationLeakyReLU.prototype.alpha = 0; + +$root.CoreML.Specification.ActivationTanh = class ActivationTanh { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationTanh(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationTanh(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationScaledTanh = class ActivationScaledTanh { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationScaledTanh(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationScaledTanh(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationScaledTanh.prototype.alpha = 0; +$root.CoreML.Specification.ActivationScaledTanh.prototype.beta = 0; + +$root.CoreML.Specification.ActivationSigmoid = class ActivationSigmoid { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationSigmoid(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationSigmoid(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationLinear = class ActivationLinear { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationLinear(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationLinear(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationLinear.prototype.alpha = 0; +$root.CoreML.Specification.ActivationLinear.prototype.beta = 0; + +$root.CoreML.Specification.ActivationSigmoidHard = class ActivationSigmoidHard { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationSigmoidHard(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationSigmoidHard(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationSigmoidHard.prototype.alpha = 0; +$root.CoreML.Specification.ActivationSigmoidHard.prototype.beta = 0; + +$root.CoreML.Specification.ActivationPReLU = class ActivationPReLU { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationPReLU(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationPReLU(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationPReLU.prototype.alpha = null; + +$root.CoreML.Specification.ActivationELU = class ActivationELU { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationELU(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationELU(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationELU.prototype.alpha = 0; + +$root.CoreML.Specification.ActivationThresholdedReLU = class ActivationThresholdedReLU { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationThresholdedReLU(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationThresholdedReLU(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationThresholdedReLU.prototype.alpha = 0; + +$root.CoreML.Specification.ActivationSoftsign = class ActivationSoftsign { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationSoftsign(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationSoftsign(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationSoftplus = class ActivationSoftplus { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationSoftplus(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationSoftplus(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationParametricSoftplus = class ActivationParametricSoftplus { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationParametricSoftplus(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 2: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationParametricSoftplus(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "beta": + message.beta = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ActivationParametricSoftplus.prototype.alpha = null; +$root.CoreML.Specification.ActivationParametricSoftplus.prototype.beta = null; + +$root.CoreML.Specification.ActivationParams = class ActivationParams { + + constructor() { + } + + get NonlinearityType() { + $root.CoreML.Specification.ActivationParams.NonlinearityTypeSet = $root.CoreML.Specification.ActivationParams.NonlinearityTypeSet || new Set([ "linear", "ReLU", "leakyReLU", "thresholdedReLU", "PReLU", "tanh", "scaledTanh", "sigmoid", "sigmoidHard", "ELU", "softsign", "softplus", "parametricSoftplus"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.ActivationParams.NonlinearityTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ActivationParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + message.linear = $root.CoreML.Specification.ActivationLinear.decode(reader, reader.uint32()); + break; + case 10: + message.ReLU = $root.CoreML.Specification.ActivationReLU.decode(reader, reader.uint32()); + break; + case 15: + message.leakyReLU = $root.CoreML.Specification.ActivationLeakyReLU.decode(reader, reader.uint32()); + break; + case 20: + message.thresholdedReLU = $root.CoreML.Specification.ActivationThresholdedReLU.decode(reader, reader.uint32()); + break; + case 25: + message.PReLU = $root.CoreML.Specification.ActivationPReLU.decode(reader, reader.uint32()); + break; + case 30: + message.tanh = $root.CoreML.Specification.ActivationTanh.decode(reader, reader.uint32()); + break; + case 31: + message.scaledTanh = $root.CoreML.Specification.ActivationScaledTanh.decode(reader, reader.uint32()); + break; + case 40: + message.sigmoid = $root.CoreML.Specification.ActivationSigmoid.decode(reader, reader.uint32()); + break; + case 41: + message.sigmoidHard = $root.CoreML.Specification.ActivationSigmoidHard.decode(reader, reader.uint32()); + break; + case 50: + message.ELU = $root.CoreML.Specification.ActivationELU.decode(reader, reader.uint32()); + break; + case 60: + message.softsign = $root.CoreML.Specification.ActivationSoftsign.decode(reader, reader.uint32()); + break; + case 70: + message.softplus = $root.CoreML.Specification.ActivationSoftplus.decode(reader, reader.uint32()); + break; + case 71: + message.parametricSoftplus = $root.CoreML.Specification.ActivationParametricSoftplus.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ActivationParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "linear": + message.linear = $root.CoreML.Specification.ActivationLinear.decodeText(reader); + break; + case "ReLU": + message.ReLU = $root.CoreML.Specification.ActivationReLU.decodeText(reader); + break; + case "leakyReLU": + message.leakyReLU = $root.CoreML.Specification.ActivationLeakyReLU.decodeText(reader); + break; + case "thresholdedReLU": + message.thresholdedReLU = $root.CoreML.Specification.ActivationThresholdedReLU.decodeText(reader); + break; + case "PReLU": + message.PReLU = $root.CoreML.Specification.ActivationPReLU.decodeText(reader); + break; + case "tanh": + message.tanh = $root.CoreML.Specification.ActivationTanh.decodeText(reader); + break; + case "scaledTanh": + message.scaledTanh = $root.CoreML.Specification.ActivationScaledTanh.decodeText(reader); + break; + case "sigmoid": + message.sigmoid = $root.CoreML.Specification.ActivationSigmoid.decodeText(reader); + break; + case "sigmoidHard": + message.sigmoidHard = $root.CoreML.Specification.ActivationSigmoidHard.decodeText(reader); + break; + case "ELU": + message.ELU = $root.CoreML.Specification.ActivationELU.decodeText(reader); + break; + case "softsign": + message.softsign = $root.CoreML.Specification.ActivationSoftsign.decodeText(reader); + break; + case "softplus": + message.softplus = $root.CoreML.Specification.ActivationSoftplus.decodeText(reader); + break; + case "parametricSoftplus": + message.parametricSoftplus = $root.CoreML.Specification.ActivationParametricSoftplus.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Tensor = class Tensor { + + constructor() { + this.dimValue = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Tensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rank = reader.uint32(); + break; + case 2: + message.dimValue = reader.array(message.dimValue, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Tensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "rank": + message.rank = reader.uint32(); + break; + case "dimValue": + reader.array(message.dimValue, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Tensor.prototype.rank = 0; + +$root.CoreML.Specification.NeuralNetworkLayer = class NeuralNetworkLayer { + + constructor() { + this.input = []; + this.output = []; + this.inputTensor = []; + this.outputTensor = []; + } + + get layer() { + $root.CoreML.Specification.NeuralNetworkLayer.layerSet = $root.CoreML.Specification.NeuralNetworkLayer.layerSet || new Set([ "convolution", "pooling", "activation", "innerProduct", "embedding", "batchnorm", "mvn", "l2normalize", "softmax", "lrn", "crop", "padding", "upsample", "resizeBilinear", "cropResize", "unary", "add", "multiply", "average", "scale", "bias", "max", "min", "dot", "reduce", "loadConstant", "reshape", "flatten", "permute", "concat", "split", "sequenceRepeat", "reorganizeData", "slice", "simpleRecurrent", "gru", "uniDirectionalLSTM", "biDirectionalLSTM", "custom", "copy", "branch", "loop", "loopBreak", "loopContinue", "rangeStatic", "rangeDynamic", "clip", "ceil", "floor", "sign", "round", "exp2", "sin", "cos", "tan", "asin", "acos", "atan", "sinh", "cosh", "tanh", "asinh", "acosh", "atanh", "erf", "gelu", "equal", "notEqual", "lessThan", "lessEqual", "greaterThan", "greaterEqual", "logicalOr", "logicalXor", "logicalNot", "logicalAnd", "modBroadcastable", "minBroadcastable", "maxBroadcastable", "addBroadcastable", "powBroadcastable", "divideBroadcastable", "floorDivBroadcastable", "multiplyBroadcastable", "subtractBroadcastable", "tile", "stack", "gather", "scatter", "gatherND", "scatterND", "softmaxND", "gatherAlongAxis", "scatterAlongAxis", "reverse", "reverseSeq", "splitND", "concatND", "transpose", "sliceStatic", "sliceDynamic", "slidingWindows", "topK", "argMin", "argMax", "embeddingND", "batchedMatmul", "getShape", "loadConstantND", "fillLike", "fillStatic", "fillDynamic", "broadcastToLike", "broadcastToStatic", "broadcastToDynamic", "squeeze", "expandDims", "flattenTo2D", "reshapeLike", "reshapeStatic", "reshapeDynamic", "rankPreservingReshape", "constantPad", "randomNormalLike", "randomNormalStatic", "randomNormalDynamic", "randomUniformLike", "randomUniformStatic", "randomUniformDynamic", "randomBernoulliLike", "randomBernoulliStatic", "randomBernoulliDynamic", "categoricalDistribution", "reduceL1", "reduceL2", "reduceMax", "reduceMin", "reduceSum", "reduceProd", "reduceMean", "reduceLogSum", "reduceSumSquare", "reduceLogSumExp", "whereNonZero", "matrixBandPart", "lowerTriangular", "upperTriangular", "whereBroadcastable", "layerNormalization", "NonMaximumSuppression", "oneHot", "cumSum", "clampedReLU", "argSort", "pooling3d", "globalPooling3d", "sliceBySize", "convolution3d"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NeuralNetworkLayer.layerSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkLayer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.input.push(reader.string()); + break; + case 3: + message.output.push(reader.string()); + break; + case 4: + message.inputTensor.push($root.CoreML.Specification.Tensor.decode(reader, reader.uint32())); + break; + case 5: + message.outputTensor.push($root.CoreML.Specification.Tensor.decode(reader, reader.uint32())); + break; + case 10: + message.isUpdatable = reader.bool(); + break; + case 100: + message.convolution = $root.CoreML.Specification.ConvolutionLayerParams.decode(reader, reader.uint32()); + break; + case 120: + message.pooling = $root.CoreML.Specification.PoolingLayerParams.decode(reader, reader.uint32()); + break; + case 130: + message.activation = $root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32()); + break; + case 140: + message.innerProduct = $root.CoreML.Specification.InnerProductLayerParams.decode(reader, reader.uint32()); + break; + case 150: + message.embedding = $root.CoreML.Specification.EmbeddingLayerParams.decode(reader, reader.uint32()); + break; + case 160: + message.batchnorm = $root.CoreML.Specification.BatchnormLayerParams.decode(reader, reader.uint32()); + break; + case 165: + message.mvn = $root.CoreML.Specification.MeanVarianceNormalizeLayerParams.decode(reader, reader.uint32()); + break; + case 170: + message.l2normalize = $root.CoreML.Specification.L2NormalizeLayerParams.decode(reader, reader.uint32()); + break; + case 175: + message.softmax = $root.CoreML.Specification.SoftmaxLayerParams.decode(reader, reader.uint32()); + break; + case 180: + message.lrn = $root.CoreML.Specification.LRNLayerParams.decode(reader, reader.uint32()); + break; + case 190: + message.crop = $root.CoreML.Specification.CropLayerParams.decode(reader, reader.uint32()); + break; + case 200: + message.padding = $root.CoreML.Specification.PaddingLayerParams.decode(reader, reader.uint32()); + break; + case 210: + message.upsample = $root.CoreML.Specification.UpsampleLayerParams.decode(reader, reader.uint32()); + break; + case 211: + message.resizeBilinear = $root.CoreML.Specification.ResizeBilinearLayerParams.decode(reader, reader.uint32()); + break; + case 212: + message.cropResize = $root.CoreML.Specification.CropResizeLayerParams.decode(reader, reader.uint32()); + break; + case 220: + message.unary = $root.CoreML.Specification.UnaryFunctionLayerParams.decode(reader, reader.uint32()); + break; + case 230: + message.add = $root.CoreML.Specification.AddLayerParams.decode(reader, reader.uint32()); + break; + case 231: + message.multiply = $root.CoreML.Specification.MultiplyLayerParams.decode(reader, reader.uint32()); + break; + case 240: + message.average = $root.CoreML.Specification.AverageLayerParams.decode(reader, reader.uint32()); + break; + case 245: + message.scale = $root.CoreML.Specification.ScaleLayerParams.decode(reader, reader.uint32()); + break; + case 250: + message.bias = $root.CoreML.Specification.BiasLayerParams.decode(reader, reader.uint32()); + break; + case 260: + message.max = $root.CoreML.Specification.MaxLayerParams.decode(reader, reader.uint32()); + break; + case 261: + message.min = $root.CoreML.Specification.MinLayerParams.decode(reader, reader.uint32()); + break; + case 270: + message.dot = $root.CoreML.Specification.DotProductLayerParams.decode(reader, reader.uint32()); + break; + case 280: + message.reduce = $root.CoreML.Specification.ReduceLayerParams.decode(reader, reader.uint32()); + break; + case 290: + message.loadConstant = $root.CoreML.Specification.LoadConstantLayerParams.decode(reader, reader.uint32()); + break; + case 300: + message.reshape = $root.CoreML.Specification.ReshapeLayerParams.decode(reader, reader.uint32()); + break; + case 301: + message.flatten = $root.CoreML.Specification.FlattenLayerParams.decode(reader, reader.uint32()); + break; + case 310: + message.permute = $root.CoreML.Specification.PermuteLayerParams.decode(reader, reader.uint32()); + break; + case 320: + message.concat = $root.CoreML.Specification.ConcatLayerParams.decode(reader, reader.uint32()); + break; + case 330: + message.split = $root.CoreML.Specification.SplitLayerParams.decode(reader, reader.uint32()); + break; + case 340: + message.sequenceRepeat = $root.CoreML.Specification.SequenceRepeatLayerParams.decode(reader, reader.uint32()); + break; + case 345: + message.reorganizeData = $root.CoreML.Specification.ReorganizeDataLayerParams.decode(reader, reader.uint32()); + break; + case 350: + message.slice = $root.CoreML.Specification.SliceLayerParams.decode(reader, reader.uint32()); + break; + case 400: + message.simpleRecurrent = $root.CoreML.Specification.SimpleRecurrentLayerParams.decode(reader, reader.uint32()); + break; + case 410: + message.gru = $root.CoreML.Specification.GRULayerParams.decode(reader, reader.uint32()); + break; + case 420: + message.uniDirectionalLSTM = $root.CoreML.Specification.UniDirectionalLSTMLayerParams.decode(reader, reader.uint32()); + break; + case 430: + message.biDirectionalLSTM = $root.CoreML.Specification.BiDirectionalLSTMLayerParams.decode(reader, reader.uint32()); + break; + case 500: + message.custom = $root.CoreML.Specification.CustomLayerParams.decode(reader, reader.uint32()); + break; + case 600: + message.copy = $root.CoreML.Specification.CopyLayerParams.decode(reader, reader.uint32()); + break; + case 605: + message.branch = $root.CoreML.Specification.BranchLayerParams.decode(reader, reader.uint32()); + break; + case 615: + message.loop = $root.CoreML.Specification.LoopLayerParams.decode(reader, reader.uint32()); + break; + case 620: + message.loopBreak = $root.CoreML.Specification.LoopBreakLayerParams.decode(reader, reader.uint32()); + break; + case 625: + message.loopContinue = $root.CoreML.Specification.LoopContinueLayerParams.decode(reader, reader.uint32()); + break; + case 635: + message.rangeStatic = $root.CoreML.Specification.RangeStaticLayerParams.decode(reader, reader.uint32()); + break; + case 640: + message.rangeDynamic = $root.CoreML.Specification.RangeDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 660: + message.clip = $root.CoreML.Specification.ClipLayerParams.decode(reader, reader.uint32()); + break; + case 665: + message.ceil = $root.CoreML.Specification.CeilLayerParams.decode(reader, reader.uint32()); + break; + case 670: + message.floor = $root.CoreML.Specification.FloorLayerParams.decode(reader, reader.uint32()); + break; + case 680: + message.sign = $root.CoreML.Specification.SignLayerParams.decode(reader, reader.uint32()); + break; + case 685: + message.round = $root.CoreML.Specification.RoundLayerParams.decode(reader, reader.uint32()); + break; + case 700: + message.exp2 = $root.CoreML.Specification.Exp2LayerParams.decode(reader, reader.uint32()); + break; + case 710: + message.sin = $root.CoreML.Specification.SinLayerParams.decode(reader, reader.uint32()); + break; + case 715: + message.cos = $root.CoreML.Specification.CosLayerParams.decode(reader, reader.uint32()); + break; + case 720: + message.tan = $root.CoreML.Specification.TanLayerParams.decode(reader, reader.uint32()); + break; + case 730: + message.asin = $root.CoreML.Specification.AsinLayerParams.decode(reader, reader.uint32()); + break; + case 735: + message.acos = $root.CoreML.Specification.AcosLayerParams.decode(reader, reader.uint32()); + break; + case 740: + message.atan = $root.CoreML.Specification.AtanLayerParams.decode(reader, reader.uint32()); + break; + case 750: + message.sinh = $root.CoreML.Specification.SinhLayerParams.decode(reader, reader.uint32()); + break; + case 755: + message.cosh = $root.CoreML.Specification.CoshLayerParams.decode(reader, reader.uint32()); + break; + case 760: + message.tanh = $root.CoreML.Specification.TanhLayerParams.decode(reader, reader.uint32()); + break; + case 770: + message.asinh = $root.CoreML.Specification.AsinhLayerParams.decode(reader, reader.uint32()); + break; + case 775: + message.acosh = $root.CoreML.Specification.AcoshLayerParams.decode(reader, reader.uint32()); + break; + case 780: + message.atanh = $root.CoreML.Specification.AtanhLayerParams.decode(reader, reader.uint32()); + break; + case 790: + message.erf = $root.CoreML.Specification.ErfLayerParams.decode(reader, reader.uint32()); + break; + case 795: + message.gelu = $root.CoreML.Specification.GeluLayerParams.decode(reader, reader.uint32()); + break; + case 815: + message.equal = $root.CoreML.Specification.EqualLayerParams.decode(reader, reader.uint32()); + break; + case 820: + message.notEqual = $root.CoreML.Specification.NotEqualLayerParams.decode(reader, reader.uint32()); + break; + case 825: + message.lessThan = $root.CoreML.Specification.LessThanLayerParams.decode(reader, reader.uint32()); + break; + case 827: + message.lessEqual = $root.CoreML.Specification.LessEqualLayerParams.decode(reader, reader.uint32()); + break; + case 830: + message.greaterThan = $root.CoreML.Specification.GreaterThanLayerParams.decode(reader, reader.uint32()); + break; + case 832: + message.greaterEqual = $root.CoreML.Specification.GreaterEqualLayerParams.decode(reader, reader.uint32()); + break; + case 840: + message.logicalOr = $root.CoreML.Specification.LogicalOrLayerParams.decode(reader, reader.uint32()); + break; + case 845: + message.logicalXor = $root.CoreML.Specification.LogicalXorLayerParams.decode(reader, reader.uint32()); + break; + case 850: + message.logicalNot = $root.CoreML.Specification.LogicalNotLayerParams.decode(reader, reader.uint32()); + break; + case 855: + message.logicalAnd = $root.CoreML.Specification.LogicalAndLayerParams.decode(reader, reader.uint32()); + break; + case 865: + message.modBroadcastable = $root.CoreML.Specification.ModBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 870: + message.minBroadcastable = $root.CoreML.Specification.MinBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 875: + message.maxBroadcastable = $root.CoreML.Specification.MaxBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 880: + message.addBroadcastable = $root.CoreML.Specification.AddBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 885: + message.powBroadcastable = $root.CoreML.Specification.PowBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 890: + message.divideBroadcastable = $root.CoreML.Specification.DivideBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 895: + message.floorDivBroadcastable = $root.CoreML.Specification.FloorDivBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 900: + message.multiplyBroadcastable = $root.CoreML.Specification.MultiplyBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 905: + message.subtractBroadcastable = $root.CoreML.Specification.SubtractBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 920: + message.tile = $root.CoreML.Specification.TileLayerParams.decode(reader, reader.uint32()); + break; + case 925: + message.stack = $root.CoreML.Specification.StackLayerParams.decode(reader, reader.uint32()); + break; + case 930: + message.gather = $root.CoreML.Specification.GatherLayerParams.decode(reader, reader.uint32()); + break; + case 935: + message.scatter = $root.CoreML.Specification.ScatterLayerParams.decode(reader, reader.uint32()); + break; + case 940: + message.gatherND = $root.CoreML.Specification.GatherNDLayerParams.decode(reader, reader.uint32()); + break; + case 945: + message.scatterND = $root.CoreML.Specification.ScatterNDLayerParams.decode(reader, reader.uint32()); + break; + case 950: + message.softmaxND = $root.CoreML.Specification.SoftmaxNDLayerParams.decode(reader, reader.uint32()); + break; + case 952: + message.gatherAlongAxis = $root.CoreML.Specification.GatherAlongAxisLayerParams.decode(reader, reader.uint32()); + break; + case 954: + message.scatterAlongAxis = $root.CoreML.Specification.ScatterAlongAxisLayerParams.decode(reader, reader.uint32()); + break; + case 960: + message.reverse = $root.CoreML.Specification.ReverseLayerParams.decode(reader, reader.uint32()); + break; + case 965: + message.reverseSeq = $root.CoreML.Specification.ReverseSeqLayerParams.decode(reader, reader.uint32()); + break; + case 975: + message.splitND = $root.CoreML.Specification.SplitNDLayerParams.decode(reader, reader.uint32()); + break; + case 980: + message.concatND = $root.CoreML.Specification.ConcatNDLayerParams.decode(reader, reader.uint32()); + break; + case 985: + message.transpose = $root.CoreML.Specification.TransposeLayerParams.decode(reader, reader.uint32()); + break; + case 995: + message.sliceStatic = $root.CoreML.Specification.SliceStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1000: + message.sliceDynamic = $root.CoreML.Specification.SliceDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1005: + message.slidingWindows = $root.CoreML.Specification.SlidingWindowsLayerParams.decode(reader, reader.uint32()); + break; + case 1015: + message.topK = $root.CoreML.Specification.TopKLayerParams.decode(reader, reader.uint32()); + break; + case 1020: + message.argMin = $root.CoreML.Specification.ArgMinLayerParams.decode(reader, reader.uint32()); + break; + case 1025: + message.argMax = $root.CoreML.Specification.ArgMaxLayerParams.decode(reader, reader.uint32()); + break; + case 1040: + message.embeddingND = $root.CoreML.Specification.EmbeddingNDLayerParams.decode(reader, reader.uint32()); + break; + case 1045: + message.batchedMatmul = $root.CoreML.Specification.BatchedMatMulLayerParams.decode(reader, reader.uint32()); + break; + case 1065: + message.getShape = $root.CoreML.Specification.GetShapeLayerParams.decode(reader, reader.uint32()); + break; + case 1070: + message.loadConstantND = $root.CoreML.Specification.LoadConstantNDLayerParams.decode(reader, reader.uint32()); + break; + case 1080: + message.fillLike = $root.CoreML.Specification.FillLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1085: + message.fillStatic = $root.CoreML.Specification.FillStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1090: + message.fillDynamic = $root.CoreML.Specification.FillDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1100: + message.broadcastToLike = $root.CoreML.Specification.BroadcastToLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1105: + message.broadcastToStatic = $root.CoreML.Specification.BroadcastToStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1110: + message.broadcastToDynamic = $root.CoreML.Specification.BroadcastToDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1120: + message.squeeze = $root.CoreML.Specification.SqueezeLayerParams.decode(reader, reader.uint32()); + break; + case 1125: + message.expandDims = $root.CoreML.Specification.ExpandDimsLayerParams.decode(reader, reader.uint32()); + break; + case 1130: + message.flattenTo2D = $root.CoreML.Specification.FlattenTo2DLayerParams.decode(reader, reader.uint32()); + break; + case 1135: + message.reshapeLike = $root.CoreML.Specification.ReshapeLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1140: + message.reshapeStatic = $root.CoreML.Specification.ReshapeStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1145: + message.reshapeDynamic = $root.CoreML.Specification.ReshapeDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1150: + message.rankPreservingReshape = $root.CoreML.Specification.RankPreservingReshapeLayerParams.decode(reader, reader.uint32()); + break; + case 1155: + message.constantPad = $root.CoreML.Specification.ConstantPaddingLayerParams.decode(reader, reader.uint32()); + break; + case 1170: + message.randomNormalLike = $root.CoreML.Specification.RandomNormalLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1175: + message.randomNormalStatic = $root.CoreML.Specification.RandomNormalStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1180: + message.randomNormalDynamic = $root.CoreML.Specification.RandomNormalDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1190: + message.randomUniformLike = $root.CoreML.Specification.RandomUniformLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1195: + message.randomUniformStatic = $root.CoreML.Specification.RandomUniformStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1200: + message.randomUniformDynamic = $root.CoreML.Specification.RandomUniformDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1210: + message.randomBernoulliLike = $root.CoreML.Specification.RandomBernoulliLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1215: + message.randomBernoulliStatic = $root.CoreML.Specification.RandomBernoulliStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1220: + message.randomBernoulliDynamic = $root.CoreML.Specification.RandomBernoulliDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1230: + message.categoricalDistribution = $root.CoreML.Specification.CategoricalDistributionLayerParams.decode(reader, reader.uint32()); + break; + case 1250: + message.reduceL1 = $root.CoreML.Specification.ReduceL1LayerParams.decode(reader, reader.uint32()); + break; + case 1255: + message.reduceL2 = $root.CoreML.Specification.ReduceL2LayerParams.decode(reader, reader.uint32()); + break; + case 1260: + message.reduceMax = $root.CoreML.Specification.ReduceMaxLayerParams.decode(reader, reader.uint32()); + break; + case 1265: + message.reduceMin = $root.CoreML.Specification.ReduceMinLayerParams.decode(reader, reader.uint32()); + break; + case 1270: + message.reduceSum = $root.CoreML.Specification.ReduceSumLayerParams.decode(reader, reader.uint32()); + break; + case 1275: + message.reduceProd = $root.CoreML.Specification.ReduceProdLayerParams.decode(reader, reader.uint32()); + break; + case 1280: + message.reduceMean = $root.CoreML.Specification.ReduceMeanLayerParams.decode(reader, reader.uint32()); + break; + case 1285: + message.reduceLogSum = $root.CoreML.Specification.ReduceLogSumLayerParams.decode(reader, reader.uint32()); + break; + case 1290: + message.reduceSumSquare = $root.CoreML.Specification.ReduceSumSquareLayerParams.decode(reader, reader.uint32()); + break; + case 1295: + message.reduceLogSumExp = $root.CoreML.Specification.ReduceLogSumExpLayerParams.decode(reader, reader.uint32()); + break; + case 1313: + message.whereNonZero = $root.CoreML.Specification.WhereNonZeroLayerParams.decode(reader, reader.uint32()); + break; + case 1315: + message.matrixBandPart = $root.CoreML.Specification.MatrixBandPartLayerParams.decode(reader, reader.uint32()); + break; + case 1320: + message.lowerTriangular = $root.CoreML.Specification.LowerTriangularLayerParams.decode(reader, reader.uint32()); + break; + case 1325: + message.upperTriangular = $root.CoreML.Specification.UpperTriangularLayerParams.decode(reader, reader.uint32()); + break; + case 1330: + message.whereBroadcastable = $root.CoreML.Specification.WhereBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 1350: + message.layerNormalization = $root.CoreML.Specification.LayerNormalizationLayerParams.decode(reader, reader.uint32()); + break; + case 1400: + message.NonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppressionLayerParams.decode(reader, reader.uint32()); + break; + case 1450: + message.oneHot = $root.CoreML.Specification.OneHotLayerParams.decode(reader, reader.uint32()); + break; + case 1455: + message.cumSum = $root.CoreML.Specification.CumSumLayerParams.decode(reader, reader.uint32()); + break; + case 1460: + message.clampedReLU = $root.CoreML.Specification.ClampedReLULayerParams.decode(reader, reader.uint32()); + break; + case 1461: + message.argSort = $root.CoreML.Specification.ArgSortLayerParams.decode(reader, reader.uint32()); + break; + case 1465: + message.pooling3d = $root.CoreML.Specification.Pooling3DLayerParams.decode(reader, reader.uint32()); + break; + case 1466: + message.globalPooling3d = $root.CoreML.Specification.GlobalPooling3DLayerParams.decode(reader, reader.uint32()); + break; + case 1470: + message.sliceBySize = $root.CoreML.Specification.SliceBySizeLayerParams.decode(reader, reader.uint32()); + break; + case 1471: + message.convolution3d = $root.CoreML.Specification.Convolution3DLayerParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkLayer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "inputTensor": + message.inputTensor.push($root.CoreML.Specification.Tensor.decodeText(reader)); + break; + case "outputTensor": + message.outputTensor.push($root.CoreML.Specification.Tensor.decodeText(reader)); + break; + case "isUpdatable": + message.isUpdatable = reader.bool(); + break; + case "convolution": + message.convolution = $root.CoreML.Specification.ConvolutionLayerParams.decodeText(reader); + break; + case "pooling": + message.pooling = $root.CoreML.Specification.PoolingLayerParams.decodeText(reader); + break; + case "activation": + message.activation = $root.CoreML.Specification.ActivationParams.decodeText(reader); + break; + case "innerProduct": + message.innerProduct = $root.CoreML.Specification.InnerProductLayerParams.decodeText(reader); + break; + case "embedding": + message.embedding = $root.CoreML.Specification.EmbeddingLayerParams.decodeText(reader); + break; + case "batchnorm": + message.batchnorm = $root.CoreML.Specification.BatchnormLayerParams.decodeText(reader); + break; + case "mvn": + message.mvn = $root.CoreML.Specification.MeanVarianceNormalizeLayerParams.decodeText(reader); + break; + case "l2normalize": + message.l2normalize = $root.CoreML.Specification.L2NormalizeLayerParams.decodeText(reader); + break; + case "softmax": + message.softmax = $root.CoreML.Specification.SoftmaxLayerParams.decodeText(reader); + break; + case "lrn": + message.lrn = $root.CoreML.Specification.LRNLayerParams.decodeText(reader); + break; + case "crop": + message.crop = $root.CoreML.Specification.CropLayerParams.decodeText(reader); + break; + case "padding": + message.padding = $root.CoreML.Specification.PaddingLayerParams.decodeText(reader); + break; + case "upsample": + message.upsample = $root.CoreML.Specification.UpsampleLayerParams.decodeText(reader); + break; + case "resizeBilinear": + message.resizeBilinear = $root.CoreML.Specification.ResizeBilinearLayerParams.decodeText(reader); + break; + case "cropResize": + message.cropResize = $root.CoreML.Specification.CropResizeLayerParams.decodeText(reader); + break; + case "unary": + message.unary = $root.CoreML.Specification.UnaryFunctionLayerParams.decodeText(reader); + break; + case "add": + message.add = $root.CoreML.Specification.AddLayerParams.decodeText(reader); + break; + case "multiply": + message.multiply = $root.CoreML.Specification.MultiplyLayerParams.decodeText(reader); + break; + case "average": + message.average = $root.CoreML.Specification.AverageLayerParams.decodeText(reader); + break; + case "scale": + message.scale = $root.CoreML.Specification.ScaleLayerParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.BiasLayerParams.decodeText(reader); + break; + case "max": + message.max = $root.CoreML.Specification.MaxLayerParams.decodeText(reader); + break; + case "min": + message.min = $root.CoreML.Specification.MinLayerParams.decodeText(reader); + break; + case "dot": + message.dot = $root.CoreML.Specification.DotProductLayerParams.decodeText(reader); + break; + case "reduce": + message.reduce = $root.CoreML.Specification.ReduceLayerParams.decodeText(reader); + break; + case "loadConstant": + message.loadConstant = $root.CoreML.Specification.LoadConstantLayerParams.decodeText(reader); + break; + case "reshape": + message.reshape = $root.CoreML.Specification.ReshapeLayerParams.decodeText(reader); + break; + case "flatten": + message.flatten = $root.CoreML.Specification.FlattenLayerParams.decodeText(reader); + break; + case "permute": + message.permute = $root.CoreML.Specification.PermuteLayerParams.decodeText(reader); + break; + case "concat": + message.concat = $root.CoreML.Specification.ConcatLayerParams.decodeText(reader); + break; + case "split": + message.split = $root.CoreML.Specification.SplitLayerParams.decodeText(reader); + break; + case "sequenceRepeat": + message.sequenceRepeat = $root.CoreML.Specification.SequenceRepeatLayerParams.decodeText(reader); + break; + case "reorganizeData": + message.reorganizeData = $root.CoreML.Specification.ReorganizeDataLayerParams.decodeText(reader); + break; + case "slice": + message.slice = $root.CoreML.Specification.SliceLayerParams.decodeText(reader); + break; + case "simpleRecurrent": + message.simpleRecurrent = $root.CoreML.Specification.SimpleRecurrentLayerParams.decodeText(reader); + break; + case "gru": + message.gru = $root.CoreML.Specification.GRULayerParams.decodeText(reader); + break; + case "uniDirectionalLSTM": + message.uniDirectionalLSTM = $root.CoreML.Specification.UniDirectionalLSTMLayerParams.decodeText(reader); + break; + case "biDirectionalLSTM": + message.biDirectionalLSTM = $root.CoreML.Specification.BiDirectionalLSTMLayerParams.decodeText(reader); + break; + case "custom": + message.custom = $root.CoreML.Specification.CustomLayerParams.decodeText(reader); + break; + case "copy": + message.copy = $root.CoreML.Specification.CopyLayerParams.decodeText(reader); + break; + case "branch": + message.branch = $root.CoreML.Specification.BranchLayerParams.decodeText(reader); + break; + case "loop": + message.loop = $root.CoreML.Specification.LoopLayerParams.decodeText(reader); + break; + case "loopBreak": + message.loopBreak = $root.CoreML.Specification.LoopBreakLayerParams.decodeText(reader); + break; + case "loopContinue": + message.loopContinue = $root.CoreML.Specification.LoopContinueLayerParams.decodeText(reader); + break; + case "rangeStatic": + message.rangeStatic = $root.CoreML.Specification.RangeStaticLayerParams.decodeText(reader); + break; + case "rangeDynamic": + message.rangeDynamic = $root.CoreML.Specification.RangeDynamicLayerParams.decodeText(reader); + break; + case "clip": + message.clip = $root.CoreML.Specification.ClipLayerParams.decodeText(reader); + break; + case "ceil": + message.ceil = $root.CoreML.Specification.CeilLayerParams.decodeText(reader); + break; + case "floor": + message.floor = $root.CoreML.Specification.FloorLayerParams.decodeText(reader); + break; + case "sign": + message.sign = $root.CoreML.Specification.SignLayerParams.decodeText(reader); + break; + case "round": + message.round = $root.CoreML.Specification.RoundLayerParams.decodeText(reader); + break; + case "exp2": + message.exp2 = $root.CoreML.Specification.Exp2LayerParams.decodeText(reader); + break; + case "sin": + message.sin = $root.CoreML.Specification.SinLayerParams.decodeText(reader); + break; + case "cos": + message.cos = $root.CoreML.Specification.CosLayerParams.decodeText(reader); + break; + case "tan": + message.tan = $root.CoreML.Specification.TanLayerParams.decodeText(reader); + break; + case "asin": + message.asin = $root.CoreML.Specification.AsinLayerParams.decodeText(reader); + break; + case "acos": + message.acos = $root.CoreML.Specification.AcosLayerParams.decodeText(reader); + break; + case "atan": + message.atan = $root.CoreML.Specification.AtanLayerParams.decodeText(reader); + break; + case "sinh": + message.sinh = $root.CoreML.Specification.SinhLayerParams.decodeText(reader); + break; + case "cosh": + message.cosh = $root.CoreML.Specification.CoshLayerParams.decodeText(reader); + break; + case "tanh": + message.tanh = $root.CoreML.Specification.TanhLayerParams.decodeText(reader); + break; + case "asinh": + message.asinh = $root.CoreML.Specification.AsinhLayerParams.decodeText(reader); + break; + case "acosh": + message.acosh = $root.CoreML.Specification.AcoshLayerParams.decodeText(reader); + break; + case "atanh": + message.atanh = $root.CoreML.Specification.AtanhLayerParams.decodeText(reader); + break; + case "erf": + message.erf = $root.CoreML.Specification.ErfLayerParams.decodeText(reader); + break; + case "gelu": + message.gelu = $root.CoreML.Specification.GeluLayerParams.decodeText(reader); + break; + case "equal": + message.equal = $root.CoreML.Specification.EqualLayerParams.decodeText(reader); + break; + case "notEqual": + message.notEqual = $root.CoreML.Specification.NotEqualLayerParams.decodeText(reader); + break; + case "lessThan": + message.lessThan = $root.CoreML.Specification.LessThanLayerParams.decodeText(reader); + break; + case "lessEqual": + message.lessEqual = $root.CoreML.Specification.LessEqualLayerParams.decodeText(reader); + break; + case "greaterThan": + message.greaterThan = $root.CoreML.Specification.GreaterThanLayerParams.decodeText(reader); + break; + case "greaterEqual": + message.greaterEqual = $root.CoreML.Specification.GreaterEqualLayerParams.decodeText(reader); + break; + case "logicalOr": + message.logicalOr = $root.CoreML.Specification.LogicalOrLayerParams.decodeText(reader); + break; + case "logicalXor": + message.logicalXor = $root.CoreML.Specification.LogicalXorLayerParams.decodeText(reader); + break; + case "logicalNot": + message.logicalNot = $root.CoreML.Specification.LogicalNotLayerParams.decodeText(reader); + break; + case "logicalAnd": + message.logicalAnd = $root.CoreML.Specification.LogicalAndLayerParams.decodeText(reader); + break; + case "modBroadcastable": + message.modBroadcastable = $root.CoreML.Specification.ModBroadcastableLayerParams.decodeText(reader); + break; + case "minBroadcastable": + message.minBroadcastable = $root.CoreML.Specification.MinBroadcastableLayerParams.decodeText(reader); + break; + case "maxBroadcastable": + message.maxBroadcastable = $root.CoreML.Specification.MaxBroadcastableLayerParams.decodeText(reader); + break; + case "addBroadcastable": + message.addBroadcastable = $root.CoreML.Specification.AddBroadcastableLayerParams.decodeText(reader); + break; + case "powBroadcastable": + message.powBroadcastable = $root.CoreML.Specification.PowBroadcastableLayerParams.decodeText(reader); + break; + case "divideBroadcastable": + message.divideBroadcastable = $root.CoreML.Specification.DivideBroadcastableLayerParams.decodeText(reader); + break; + case "floorDivBroadcastable": + message.floorDivBroadcastable = $root.CoreML.Specification.FloorDivBroadcastableLayerParams.decodeText(reader); + break; + case "multiplyBroadcastable": + message.multiplyBroadcastable = $root.CoreML.Specification.MultiplyBroadcastableLayerParams.decodeText(reader); + break; + case "subtractBroadcastable": + message.subtractBroadcastable = $root.CoreML.Specification.SubtractBroadcastableLayerParams.decodeText(reader); + break; + case "tile": + message.tile = $root.CoreML.Specification.TileLayerParams.decodeText(reader); + break; + case "stack": + message.stack = $root.CoreML.Specification.StackLayerParams.decodeText(reader); + break; + case "gather": + message.gather = $root.CoreML.Specification.GatherLayerParams.decodeText(reader); + break; + case "scatter": + message.scatter = $root.CoreML.Specification.ScatterLayerParams.decodeText(reader); + break; + case "gatherND": + message.gatherND = $root.CoreML.Specification.GatherNDLayerParams.decodeText(reader); + break; + case "scatterND": + message.scatterND = $root.CoreML.Specification.ScatterNDLayerParams.decodeText(reader); + break; + case "softmaxND": + message.softmaxND = $root.CoreML.Specification.SoftmaxNDLayerParams.decodeText(reader); + break; + case "gatherAlongAxis": + message.gatherAlongAxis = $root.CoreML.Specification.GatherAlongAxisLayerParams.decodeText(reader); + break; + case "scatterAlongAxis": + message.scatterAlongAxis = $root.CoreML.Specification.ScatterAlongAxisLayerParams.decodeText(reader); + break; + case "reverse": + message.reverse = $root.CoreML.Specification.ReverseLayerParams.decodeText(reader); + break; + case "reverseSeq": + message.reverseSeq = $root.CoreML.Specification.ReverseSeqLayerParams.decodeText(reader); + break; + case "splitND": + message.splitND = $root.CoreML.Specification.SplitNDLayerParams.decodeText(reader); + break; + case "concatND": + message.concatND = $root.CoreML.Specification.ConcatNDLayerParams.decodeText(reader); + break; + case "transpose": + message.transpose = $root.CoreML.Specification.TransposeLayerParams.decodeText(reader); + break; + case "sliceStatic": + message.sliceStatic = $root.CoreML.Specification.SliceStaticLayerParams.decodeText(reader); + break; + case "sliceDynamic": + message.sliceDynamic = $root.CoreML.Specification.SliceDynamicLayerParams.decodeText(reader); + break; + case "slidingWindows": + message.slidingWindows = $root.CoreML.Specification.SlidingWindowsLayerParams.decodeText(reader); + break; + case "topK": + message.topK = $root.CoreML.Specification.TopKLayerParams.decodeText(reader); + break; + case "argMin": + message.argMin = $root.CoreML.Specification.ArgMinLayerParams.decodeText(reader); + break; + case "argMax": + message.argMax = $root.CoreML.Specification.ArgMaxLayerParams.decodeText(reader); + break; + case "embeddingND": + message.embeddingND = $root.CoreML.Specification.EmbeddingNDLayerParams.decodeText(reader); + break; + case "batchedMatmul": + message.batchedMatmul = $root.CoreML.Specification.BatchedMatMulLayerParams.decodeText(reader); + break; + case "getShape": + message.getShape = $root.CoreML.Specification.GetShapeLayerParams.decodeText(reader); + break; + case "loadConstantND": + message.loadConstantND = $root.CoreML.Specification.LoadConstantNDLayerParams.decodeText(reader); + break; + case "fillLike": + message.fillLike = $root.CoreML.Specification.FillLikeLayerParams.decodeText(reader); + break; + case "fillStatic": + message.fillStatic = $root.CoreML.Specification.FillStaticLayerParams.decodeText(reader); + break; + case "fillDynamic": + message.fillDynamic = $root.CoreML.Specification.FillDynamicLayerParams.decodeText(reader); + break; + case "broadcastToLike": + message.broadcastToLike = $root.CoreML.Specification.BroadcastToLikeLayerParams.decodeText(reader); + break; + case "broadcastToStatic": + message.broadcastToStatic = $root.CoreML.Specification.BroadcastToStaticLayerParams.decodeText(reader); + break; + case "broadcastToDynamic": + message.broadcastToDynamic = $root.CoreML.Specification.BroadcastToDynamicLayerParams.decodeText(reader); + break; + case "squeeze": + message.squeeze = $root.CoreML.Specification.SqueezeLayerParams.decodeText(reader); + break; + case "expandDims": + message.expandDims = $root.CoreML.Specification.ExpandDimsLayerParams.decodeText(reader); + break; + case "flattenTo2D": + message.flattenTo2D = $root.CoreML.Specification.FlattenTo2DLayerParams.decodeText(reader); + break; + case "reshapeLike": + message.reshapeLike = $root.CoreML.Specification.ReshapeLikeLayerParams.decodeText(reader); + break; + case "reshapeStatic": + message.reshapeStatic = $root.CoreML.Specification.ReshapeStaticLayerParams.decodeText(reader); + break; + case "reshapeDynamic": + message.reshapeDynamic = $root.CoreML.Specification.ReshapeDynamicLayerParams.decodeText(reader); + break; + case "rankPreservingReshape": + message.rankPreservingReshape = $root.CoreML.Specification.RankPreservingReshapeLayerParams.decodeText(reader); + break; + case "constantPad": + message.constantPad = $root.CoreML.Specification.ConstantPaddingLayerParams.decodeText(reader); + break; + case "randomNormalLike": + message.randomNormalLike = $root.CoreML.Specification.RandomNormalLikeLayerParams.decodeText(reader); + break; + case "randomNormalStatic": + message.randomNormalStatic = $root.CoreML.Specification.RandomNormalStaticLayerParams.decodeText(reader); + break; + case "randomNormalDynamic": + message.randomNormalDynamic = $root.CoreML.Specification.RandomNormalDynamicLayerParams.decodeText(reader); + break; + case "randomUniformLike": + message.randomUniformLike = $root.CoreML.Specification.RandomUniformLikeLayerParams.decodeText(reader); + break; + case "randomUniformStatic": + message.randomUniformStatic = $root.CoreML.Specification.RandomUniformStaticLayerParams.decodeText(reader); + break; + case "randomUniformDynamic": + message.randomUniformDynamic = $root.CoreML.Specification.RandomUniformDynamicLayerParams.decodeText(reader); + break; + case "randomBernoulliLike": + message.randomBernoulliLike = $root.CoreML.Specification.RandomBernoulliLikeLayerParams.decodeText(reader); + break; + case "randomBernoulliStatic": + message.randomBernoulliStatic = $root.CoreML.Specification.RandomBernoulliStaticLayerParams.decodeText(reader); + break; + case "randomBernoulliDynamic": + message.randomBernoulliDynamic = $root.CoreML.Specification.RandomBernoulliDynamicLayerParams.decodeText(reader); + break; + case "categoricalDistribution": + message.categoricalDistribution = $root.CoreML.Specification.CategoricalDistributionLayerParams.decodeText(reader); + break; + case "reduceL1": + message.reduceL1 = $root.CoreML.Specification.ReduceL1LayerParams.decodeText(reader); + break; + case "reduceL2": + message.reduceL2 = $root.CoreML.Specification.ReduceL2LayerParams.decodeText(reader); + break; + case "reduceMax": + message.reduceMax = $root.CoreML.Specification.ReduceMaxLayerParams.decodeText(reader); + break; + case "reduceMin": + message.reduceMin = $root.CoreML.Specification.ReduceMinLayerParams.decodeText(reader); + break; + case "reduceSum": + message.reduceSum = $root.CoreML.Specification.ReduceSumLayerParams.decodeText(reader); + break; + case "reduceProd": + message.reduceProd = $root.CoreML.Specification.ReduceProdLayerParams.decodeText(reader); + break; + case "reduceMean": + message.reduceMean = $root.CoreML.Specification.ReduceMeanLayerParams.decodeText(reader); + break; + case "reduceLogSum": + message.reduceLogSum = $root.CoreML.Specification.ReduceLogSumLayerParams.decodeText(reader); + break; + case "reduceSumSquare": + message.reduceSumSquare = $root.CoreML.Specification.ReduceSumSquareLayerParams.decodeText(reader); + break; + case "reduceLogSumExp": + message.reduceLogSumExp = $root.CoreML.Specification.ReduceLogSumExpLayerParams.decodeText(reader); + break; + case "whereNonZero": + message.whereNonZero = $root.CoreML.Specification.WhereNonZeroLayerParams.decodeText(reader); + break; + case "matrixBandPart": + message.matrixBandPart = $root.CoreML.Specification.MatrixBandPartLayerParams.decodeText(reader); + break; + case "lowerTriangular": + message.lowerTriangular = $root.CoreML.Specification.LowerTriangularLayerParams.decodeText(reader); + break; + case "upperTriangular": + message.upperTriangular = $root.CoreML.Specification.UpperTriangularLayerParams.decodeText(reader); + break; + case "whereBroadcastable": + message.whereBroadcastable = $root.CoreML.Specification.WhereBroadcastableLayerParams.decodeText(reader); + break; + case "layerNormalization": + message.layerNormalization = $root.CoreML.Specification.LayerNormalizationLayerParams.decodeText(reader); + break; + case "NonMaximumSuppression": + message.NonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppressionLayerParams.decodeText(reader); + break; + case "oneHot": + message.oneHot = $root.CoreML.Specification.OneHotLayerParams.decodeText(reader); + break; + case "cumSum": + message.cumSum = $root.CoreML.Specification.CumSumLayerParams.decodeText(reader); + break; + case "clampedReLU": + message.clampedReLU = $root.CoreML.Specification.ClampedReLULayerParams.decodeText(reader); + break; + case "argSort": + message.argSort = $root.CoreML.Specification.ArgSortLayerParams.decodeText(reader); + break; + case "pooling3d": + message.pooling3d = $root.CoreML.Specification.Pooling3DLayerParams.decodeText(reader); + break; + case "globalPooling3d": + message.globalPooling3d = $root.CoreML.Specification.GlobalPooling3DLayerParams.decodeText(reader); + break; + case "sliceBySize": + message.sliceBySize = $root.CoreML.Specification.SliceBySizeLayerParams.decodeText(reader); + break; + case "convolution3d": + message.convolution3d = $root.CoreML.Specification.Convolution3DLayerParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkLayer.prototype.name = ""; +$root.CoreML.Specification.NeuralNetworkLayer.prototype.isUpdatable = false; + +$root.CoreML.Specification.BranchLayerParams = class BranchLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BranchLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ifBranch = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 2: + message.elseBranch = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BranchLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "ifBranch": + message.ifBranch = $root.CoreML.Specification.NeuralNetwork.decodeText(reader); + break; + case "elseBranch": + message.elseBranch = $root.CoreML.Specification.NeuralNetwork.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BranchLayerParams.prototype.ifBranch = null; +$root.CoreML.Specification.BranchLayerParams.prototype.elseBranch = null; + +$root.CoreML.Specification.LoopLayerParams = class LoopLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LoopLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxLoopIterations = reader.uint64(); + break; + case 2: + message.conditionVar = reader.string(); + break; + case 3: + message.conditionNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 4: + message.bodyNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LoopLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "maxLoopIterations": + message.maxLoopIterations = reader.uint64(); + break; + case "conditionVar": + message.conditionVar = reader.string(); + break; + case "conditionNetwork": + message.conditionNetwork = $root.CoreML.Specification.NeuralNetwork.decodeText(reader); + break; + case "bodyNetwork": + message.bodyNetwork = $root.CoreML.Specification.NeuralNetwork.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LoopLayerParams.prototype.maxLoopIterations = protobuf.Uint64.create(0); +$root.CoreML.Specification.LoopLayerParams.prototype.conditionVar = ""; +$root.CoreML.Specification.LoopLayerParams.prototype.conditionNetwork = null; +$root.CoreML.Specification.LoopLayerParams.prototype.bodyNetwork = null; + +$root.CoreML.Specification.LoopBreakLayerParams = class LoopBreakLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LoopBreakLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LoopBreakLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LoopContinueLayerParams = class LoopContinueLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LoopContinueLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LoopContinueLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CopyLayerParams = class CopyLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CopyLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CopyLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GreaterThanLayerParams = class GreaterThanLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GreaterThanLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GreaterThanLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GreaterThanLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.GreaterEqualLayerParams = class GreaterEqualLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GreaterEqualLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GreaterEqualLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GreaterEqualLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.LessThanLayerParams = class LessThanLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LessThanLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LessThanLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LessThanLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.LessEqualLayerParams = class LessEqualLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LessEqualLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LessEqualLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LessEqualLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.EqualLayerParams = class EqualLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.EqualLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.EqualLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.EqualLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.NotEqualLayerParams = class NotEqualLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NotEqualLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NotEqualLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NotEqualLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.LogicalAndLayerParams = class LogicalAndLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LogicalAndLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LogicalAndLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LogicalOrLayerParams = class LogicalOrLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LogicalOrLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LogicalOrLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LogicalXorLayerParams = class LogicalXorLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LogicalXorLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LogicalXorLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LogicalNotLayerParams = class LogicalNotLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LogicalNotLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LogicalNotLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BorderAmounts = class BorderAmounts { + + constructor() { + this.borderAmounts = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BorderAmounts(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.borderAmounts.push($root.CoreML.Specification.BorderAmounts.EdgeSizes.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BorderAmounts(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "borderAmounts": + message.borderAmounts.push($root.CoreML.Specification.BorderAmounts.EdgeSizes.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BorderAmounts.EdgeSizes = class EdgeSizes { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BorderAmounts.EdgeSizes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startEdgeSize = reader.uint64(); + break; + case 2: + message.endEdgeSize = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BorderAmounts.EdgeSizes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "startEdgeSize": + message.startEdgeSize = reader.uint64(); + break; + case "endEdgeSize": + message.endEdgeSize = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BorderAmounts.EdgeSizes.prototype.startEdgeSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.BorderAmounts.EdgeSizes.prototype.endEdgeSize = protobuf.Uint64.create(0); + +$root.CoreML.Specification.ValidPadding = class ValidPadding { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ValidPadding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ValidPadding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "paddingAmounts": + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ValidPadding.prototype.paddingAmounts = null; + +$root.CoreML.Specification.SamePadding = class SamePadding { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SamePadding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.asymmetryMode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SamePadding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "asymmetryMode": + message.asymmetryMode = reader.enum($root.CoreML.Specification.SamePadding.SamePaddingMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SamePadding.prototype.asymmetryMode = 0; + +$root.CoreML.Specification.SamePadding.SamePaddingMode = { + "BOTTOM_RIGHT_HEAVY": 0, + "TOP_LEFT_HEAVY": 1 +}; + +$root.CoreML.Specification.SamplingMode = class SamplingMode { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SamplingMode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.samplingMethod = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SamplingMode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "samplingMethod": + message.samplingMethod = reader.enum($root.CoreML.Specification.SamplingMode.Method); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SamplingMode.prototype.samplingMethod = 0; + +$root.CoreML.Specification.SamplingMode.Method = { + "STRICT_ALIGN_ENDPOINTS_MODE": 0, + "ALIGN_ENDPOINTS_MODE": 1, + "UPSAMPLE_MODE": 2, + "ROI_ALIGN_MODE": 3 +}; + +$root.CoreML.Specification.BoxCoordinatesMode = class BoxCoordinatesMode { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BoxCoordinatesMode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.boxMode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BoxCoordinatesMode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "boxMode": + message.boxMode = reader.enum($root.CoreML.Specification.BoxCoordinatesMode.Coordinates); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BoxCoordinatesMode.prototype.boxMode = 0; + +$root.CoreML.Specification.BoxCoordinatesMode.Coordinates = { + "CORNERS_HEIGHT_FIRST": 0, + "CORNERS_WIDTH_FIRST": 1, + "CENTER_SIZE_HEIGHT_FIRST": 2, + "CENTER_SIZE_WIDTH_FIRST": 3 +}; + +$root.CoreML.Specification.WeightParams = class WeightParams { + + constructor() { + this.floatValue = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.WeightParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.floatValue = reader.floats(message.floatValue, tag); + break; + case 2: + message.float16Value = reader.bytes(); + break; + case 30: + message.rawValue = reader.bytes(); + break; + case 31: + message.int8RawValue = reader.bytes(); + break; + case 40: + message.quantization = $root.CoreML.Specification.QuantizationParams.decode(reader, reader.uint32()); + break; + case 50: + message.isUpdatable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.WeightParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "floatValue": + reader.array(message.floatValue, () => reader.float()); + break; + case "float16Value": + message.float16Value = reader.bytes(); + break; + case "rawValue": + message.rawValue = reader.bytes(); + break; + case "int8RawValue": + message.int8RawValue = reader.bytes(); + break; + case "quantization": + message.quantization = $root.CoreML.Specification.QuantizationParams.decodeText(reader); + break; + case "isUpdatable": + message.isUpdatable = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.WeightParams.prototype.float16Value = new Uint8Array([]); +$root.CoreML.Specification.WeightParams.prototype.rawValue = new Uint8Array([]); +$root.CoreML.Specification.WeightParams.prototype.int8RawValue = new Uint8Array([]); +$root.CoreML.Specification.WeightParams.prototype.quantization = null; +$root.CoreML.Specification.WeightParams.prototype.isUpdatable = false; + +$root.CoreML.Specification.QuantizationParams = class QuantizationParams { + + constructor() { + } + + get QuantizationType() { + $root.CoreML.Specification.QuantizationParams.QuantizationTypeSet = $root.CoreML.Specification.QuantizationParams.QuantizationTypeSet || new Set([ "linearQuantization", "lookupTableQuantization"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.QuantizationParams.QuantizationTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.QuantizationParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfBits = reader.uint64(); + break; + case 101: + message.linearQuantization = $root.CoreML.Specification.LinearQuantizationParams.decode(reader, reader.uint32()); + break; + case 102: + message.lookupTableQuantization = $root.CoreML.Specification.LookUpTableQuantizationParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.QuantizationParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "numberOfBits": + message.numberOfBits = reader.uint64(); + break; + case "linearQuantization": + message.linearQuantization = $root.CoreML.Specification.LinearQuantizationParams.decodeText(reader); + break; + case "lookupTableQuantization": + message.lookupTableQuantization = $root.CoreML.Specification.LookUpTableQuantizationParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.QuantizationParams.prototype.numberOfBits = protobuf.Uint64.create(0); + +$root.CoreML.Specification.LinearQuantizationParams = class LinearQuantizationParams { + + constructor() { + this.scale = []; + this.bias = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LinearQuantizationParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scale = reader.floats(message.scale, tag); + break; + case 2: + message.bias = reader.floats(message.bias, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LinearQuantizationParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "scale": + reader.array(message.scale, () => reader.float()); + break; + case "bias": + reader.array(message.bias, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LookUpTableQuantizationParams = class LookUpTableQuantizationParams { + + constructor() { + this.floatValue = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LookUpTableQuantizationParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.floatValue = reader.floats(message.floatValue, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LookUpTableQuantizationParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "floatValue": + reader.array(message.floatValue, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConvolutionLayerParams = class ConvolutionLayerParams { + + constructor() { + this.kernelSize = []; + this.stride = []; + this.dilationFactor = []; + this.outputShape = []; + } + + get ConvolutionPaddingType() { + $root.CoreML.Specification.ConvolutionLayerParams.ConvolutionPaddingTypeSet = $root.CoreML.Specification.ConvolutionLayerParams.ConvolutionPaddingTypeSet || new Set([ "valid", "same"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.ConvolutionLayerParams.ConvolutionPaddingTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ConvolutionLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.outputChannels = reader.uint64(); + break; + case 2: + message.kernelChannels = reader.uint64(); + break; + case 10: + message.nGroups = reader.uint64(); + break; + case 20: + message.kernelSize = reader.array(message.kernelSize, () => reader.uint64(), tag); + break; + case 30: + message.stride = reader.array(message.stride, () => reader.uint64(), tag); + break; + case 40: + message.dilationFactor = reader.array(message.dilationFactor, () => reader.uint64(), tag); + break; + case 50: + message.valid = $root.CoreML.Specification.ValidPadding.decode(reader, reader.uint32()); + break; + case 51: + message.same = $root.CoreML.Specification.SamePadding.decode(reader, reader.uint32()); + break; + case 60: + message.isDeconvolution = reader.bool(); + break; + case 70: + message.hasBias = reader.bool(); + break; + case 90: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 91: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.outputShape = reader.array(message.outputShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ConvolutionLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "outputChannels": + message.outputChannels = reader.uint64(); + break; + case "kernelChannels": + message.kernelChannels = reader.uint64(); + break; + case "nGroups": + message.nGroups = reader.uint64(); + break; + case "kernelSize": + reader.array(message.kernelSize, () => reader.uint64()); + break; + case "stride": + reader.array(message.stride, () => reader.uint64()); + break; + case "dilationFactor": + reader.array(message.dilationFactor, () => reader.uint64()); + break; + case "valid": + message.valid = $root.CoreML.Specification.ValidPadding.decodeText(reader); + break; + case "same": + message.same = $root.CoreML.Specification.SamePadding.decodeText(reader); + break; + case "isDeconvolution": + message.isDeconvolution = reader.bool(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputShape": + reader.array(message.outputShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConvolutionLayerParams.prototype.outputChannels = protobuf.Uint64.create(0); +$root.CoreML.Specification.ConvolutionLayerParams.prototype.kernelChannels = protobuf.Uint64.create(0); +$root.CoreML.Specification.ConvolutionLayerParams.prototype.nGroups = protobuf.Uint64.create(0); +$root.CoreML.Specification.ConvolutionLayerParams.prototype.isDeconvolution = false; +$root.CoreML.Specification.ConvolutionLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.ConvolutionLayerParams.prototype.weights = null; +$root.CoreML.Specification.ConvolutionLayerParams.prototype.bias = null; + +$root.CoreML.Specification.Convolution3DLayerParams = class Convolution3DLayerParams { + + constructor() { + this.outputShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Convolution3DLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.outputChannels = reader.int32(); + break; + case 2: + message.inputChannels = reader.int32(); + break; + case 10: + message.nGroups = reader.int32(); + break; + case 20: + message.kernelDepth = reader.int32(); + break; + case 21: + message.kernelHeight = reader.int32(); + break; + case 22: + message.kernelWidth = reader.int32(); + break; + case 31: + message.strideDepth = reader.int32(); + break; + case 32: + message.strideHeight = reader.int32(); + break; + case 33: + message.strideWidth = reader.int32(); + break; + case 40: + message.dilationDepth = reader.int32(); + break; + case 41: + message.dilationHeight = reader.int32(); + break; + case 42: + message.dilationWidth = reader.int32(); + break; + case 50: + message.hasBias = reader.bool(); + break; + case 60: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 61: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 70: + message.paddingType = reader.int32(); + break; + case 80: + message.customPaddingFront = reader.int32(); + break; + case 81: + message.customPaddingBack = reader.int32(); + break; + case 82: + message.customPaddingTop = reader.int32(); + break; + case 83: + message.customPaddingBottom = reader.int32(); + break; + case 84: + message.customPaddingLeft = reader.int32(); + break; + case 85: + message.customPaddingRight = reader.int32(); + break; + case 86: + message.isDeconvolution = reader.bool(); + break; + case 87: + message.outputShape = reader.array(message.outputShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Convolution3DLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "outputChannels": + message.outputChannels = reader.int32(); + break; + case "inputChannels": + message.inputChannels = reader.int32(); + break; + case "nGroups": + message.nGroups = reader.int32(); + break; + case "kernelDepth": + message.kernelDepth = reader.int32(); + break; + case "kernelHeight": + message.kernelHeight = reader.int32(); + break; + case "kernelWidth": + message.kernelWidth = reader.int32(); + break; + case "strideDepth": + message.strideDepth = reader.int32(); + break; + case "strideHeight": + message.strideHeight = reader.int32(); + break; + case "strideWidth": + message.strideWidth = reader.int32(); + break; + case "dilationDepth": + message.dilationDepth = reader.int32(); + break; + case "dilationHeight": + message.dilationHeight = reader.int32(); + break; + case "dilationWidth": + message.dilationWidth = reader.int32(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "paddingType": + message.paddingType = reader.enum($root.CoreML.Specification.Convolution3DLayerParams.PaddingType); + break; + case "customPaddingFront": + message.customPaddingFront = reader.int32(); + break; + case "customPaddingBack": + message.customPaddingBack = reader.int32(); + break; + case "customPaddingTop": + message.customPaddingTop = reader.int32(); + break; + case "customPaddingBottom": + message.customPaddingBottom = reader.int32(); + break; + case "customPaddingLeft": + message.customPaddingLeft = reader.int32(); + break; + case "customPaddingRight": + message.customPaddingRight = reader.int32(); + break; + case "isDeconvolution": + message.isDeconvolution = reader.bool(); + break; + case "outputShape": + reader.array(message.outputShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Convolution3DLayerParams.prototype.outputChannels = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.inputChannels = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.nGroups = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.kernelDepth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.kernelHeight = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.kernelWidth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.strideDepth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.strideHeight = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.strideWidth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.dilationDepth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.dilationHeight = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.dilationWidth = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.weights = null; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.bias = null; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.paddingType = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingFront = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingBack = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingTop = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingBottom = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingLeft = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.customPaddingRight = 0; +$root.CoreML.Specification.Convolution3DLayerParams.prototype.isDeconvolution = false; + +$root.CoreML.Specification.Convolution3DLayerParams.PaddingType = { + "CUSTOM": 0, + "VALID": 1, + "SAME": 2 +}; + +$root.CoreML.Specification.InnerProductLayerParams = class InnerProductLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.InnerProductLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputChannels = reader.uint64(); + break; + case 2: + message.outputChannels = reader.uint64(); + break; + case 10: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 22: + message.int8DynamicQuantize = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.InnerProductLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputChannels": + message.inputChannels = reader.uint64(); + break; + case "outputChannels": + message.outputChannels = reader.uint64(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "int8DynamicQuantize": + message.int8DynamicQuantize = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.InnerProductLayerParams.prototype.inputChannels = protobuf.Uint64.create(0); +$root.CoreML.Specification.InnerProductLayerParams.prototype.outputChannels = protobuf.Uint64.create(0); +$root.CoreML.Specification.InnerProductLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.InnerProductLayerParams.prototype.weights = null; +$root.CoreML.Specification.InnerProductLayerParams.prototype.bias = null; +$root.CoreML.Specification.InnerProductLayerParams.prototype.int8DynamicQuantize = false; + +$root.CoreML.Specification.EmbeddingLayerParams = class EmbeddingLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.EmbeddingLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputDim = reader.uint64(); + break; + case 2: + message.outputChannels = reader.uint64(); + break; + case 10: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.EmbeddingLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputDim": + message.inputDim = reader.uint64(); + break; + case "outputChannels": + message.outputChannels = reader.uint64(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.EmbeddingLayerParams.prototype.inputDim = protobuf.Uint64.create(0); +$root.CoreML.Specification.EmbeddingLayerParams.prototype.outputChannels = protobuf.Uint64.create(0); +$root.CoreML.Specification.EmbeddingLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.EmbeddingLayerParams.prototype.weights = null; +$root.CoreML.Specification.EmbeddingLayerParams.prototype.bias = null; + +$root.CoreML.Specification.EmbeddingNDLayerParams = class EmbeddingNDLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.EmbeddingNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vocabSize = reader.uint64(); + break; + case 2: + message.embeddingSize = reader.uint64(); + break; + case 3: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.EmbeddingNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vocabSize": + message.vocabSize = reader.uint64(); + break; + case "embeddingSize": + message.embeddingSize = reader.uint64(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.EmbeddingNDLayerParams.prototype.vocabSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.EmbeddingNDLayerParams.prototype.embeddingSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.EmbeddingNDLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.EmbeddingNDLayerParams.prototype.weights = null; +$root.CoreML.Specification.EmbeddingNDLayerParams.prototype.bias = null; + +$root.CoreML.Specification.BatchnormLayerParams = class BatchnormLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BatchnormLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channels = reader.uint64(); + break; + case 5: + message.computeMeanVar = reader.bool(); + break; + case 6: + message.instanceNormalization = reader.bool(); + break; + case 10: + message.epsilon = reader.float(); + break; + case 15: + message.gamma = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 16: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 17: + message.mean = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 18: + message.variance = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BatchnormLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "channels": + message.channels = reader.uint64(); + break; + case "computeMeanVar": + message.computeMeanVar = reader.bool(); + break; + case "instanceNormalization": + message.instanceNormalization = reader.bool(); + break; + case "epsilon": + message.epsilon = reader.float(); + break; + case "gamma": + message.gamma = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "beta": + message.beta = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "mean": + message.mean = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "variance": + message.variance = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BatchnormLayerParams.prototype.channels = protobuf.Uint64.create(0); +$root.CoreML.Specification.BatchnormLayerParams.prototype.computeMeanVar = false; +$root.CoreML.Specification.BatchnormLayerParams.prototype.instanceNormalization = false; +$root.CoreML.Specification.BatchnormLayerParams.prototype.epsilon = 0; +$root.CoreML.Specification.BatchnormLayerParams.prototype.gamma = null; +$root.CoreML.Specification.BatchnormLayerParams.prototype.beta = null; +$root.CoreML.Specification.BatchnormLayerParams.prototype.mean = null; +$root.CoreML.Specification.BatchnormLayerParams.prototype.variance = null; + +$root.CoreML.Specification.PoolingLayerParams = class PoolingLayerParams { + + constructor() { + this.kernelSize = []; + this.stride = []; + } + + get PoolingPaddingType() { + $root.CoreML.Specification.PoolingLayerParams.PoolingPaddingTypeSet = $root.CoreML.Specification.PoolingLayerParams.PoolingPaddingTypeSet || new Set([ "valid", "same", "includeLastPixel"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.PoolingLayerParams.PoolingPaddingTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PoolingLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 10: + message.kernelSize = reader.array(message.kernelSize, () => reader.uint64(), tag); + break; + case 20: + message.stride = reader.array(message.stride, () => reader.uint64(), tag); + break; + case 30: + message.valid = $root.CoreML.Specification.ValidPadding.decode(reader, reader.uint32()); + break; + case 31: + message.same = $root.CoreML.Specification.SamePadding.decode(reader, reader.uint32()); + break; + case 32: + message.includeLastPixel = $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding.decode(reader, reader.uint32()); + break; + case 50: + message.avgPoolExcludePadding = reader.bool(); + break; + case 60: + message.globalPooling = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PoolingLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.CoreML.Specification.PoolingLayerParams.PoolingType); + break; + case "kernelSize": + reader.array(message.kernelSize, () => reader.uint64()); + break; + case "stride": + reader.array(message.stride, () => reader.uint64()); + break; + case "valid": + message.valid = $root.CoreML.Specification.ValidPadding.decodeText(reader); + break; + case "same": + message.same = $root.CoreML.Specification.SamePadding.decodeText(reader); + break; + case "includeLastPixel": + message.includeLastPixel = $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding.decodeText(reader); + break; + case "avgPoolExcludePadding": + message.avgPoolExcludePadding = reader.bool(); + break; + case "globalPooling": + message.globalPooling = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PoolingLayerParams.prototype.type = 0; +$root.CoreML.Specification.PoolingLayerParams.prototype.avgPoolExcludePadding = false; +$root.CoreML.Specification.PoolingLayerParams.prototype.globalPooling = false; + +$root.CoreML.Specification.PoolingLayerParams.PoolingType = { + "MAX": 0, + "AVERAGE": 1, + "L2": 2 +}; + +$root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding = class ValidCompletePadding { + + constructor() { + this.paddingAmounts = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.paddingAmounts = reader.array(message.paddingAmounts, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "paddingAmounts": + reader.array(message.paddingAmounts, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Pooling3DLayerParams = class Pooling3DLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Pooling3DLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.kernelDepth = reader.int32(); + break; + case 3: + message.kernelHeight = reader.int32(); + break; + case 4: + message.kernelWidth = reader.int32(); + break; + case 5: + message.strideDepth = reader.int32(); + break; + case 6: + message.strideHeight = reader.int32(); + break; + case 7: + message.strideWidth = reader.int32(); + break; + case 15: + message.paddingType = reader.int32(); + break; + case 8: + message.customPaddingFront = reader.int32(); + break; + case 9: + message.customPaddingBack = reader.int32(); + break; + case 10: + message.customPaddingTop = reader.int32(); + break; + case 11: + message.customPaddingBottom = reader.int32(); + break; + case 12: + message.customPaddingLeft = reader.int32(); + break; + case 13: + message.customPaddingRight = reader.int32(); + break; + case 14: + message.countExcludePadding = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Pooling3DLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.CoreML.Specification.Pooling3DLayerParams.PoolingType3D); + break; + case "kernelDepth": + message.kernelDepth = reader.int32(); + break; + case "kernelHeight": + message.kernelHeight = reader.int32(); + break; + case "kernelWidth": + message.kernelWidth = reader.int32(); + break; + case "strideDepth": + message.strideDepth = reader.int32(); + break; + case "strideHeight": + message.strideHeight = reader.int32(); + break; + case "strideWidth": + message.strideWidth = reader.int32(); + break; + case "paddingType": + message.paddingType = reader.enum($root.CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType); + break; + case "customPaddingFront": + message.customPaddingFront = reader.int32(); + break; + case "customPaddingBack": + message.customPaddingBack = reader.int32(); + break; + case "customPaddingTop": + message.customPaddingTop = reader.int32(); + break; + case "customPaddingBottom": + message.customPaddingBottom = reader.int32(); + break; + case "customPaddingLeft": + message.customPaddingLeft = reader.int32(); + break; + case "customPaddingRight": + message.customPaddingRight = reader.int32(); + break; + case "countExcludePadding": + message.countExcludePadding = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Pooling3DLayerParams.prototype.type = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.kernelDepth = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.kernelHeight = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.kernelWidth = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.strideDepth = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.strideHeight = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.strideWidth = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.paddingType = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingFront = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingBack = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingTop = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingBottom = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingLeft = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.customPaddingRight = 0; +$root.CoreML.Specification.Pooling3DLayerParams.prototype.countExcludePadding = false; + +$root.CoreML.Specification.Pooling3DLayerParams.PoolingType3D = { + "MAX": 0, + "AVERAGE": 1 +}; + +$root.CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType = { + "CUSTOM": 0, + "VALID": 1, + "SAME": 2 +}; + +$root.CoreML.Specification.GlobalPooling3DLayerParams = class GlobalPooling3DLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GlobalPooling3DLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GlobalPooling3DLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GlobalPooling3DLayerParams.prototype.type = 0; + +$root.CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D = { + "MAX": 0, + "AVERAGE": 1 +}; + +$root.CoreML.Specification.PaddingLayerParams = class PaddingLayerParams { + + constructor() { + } + + get PaddingType() { + $root.CoreML.Specification.PaddingLayerParams.PaddingTypeSet = $root.CoreML.Specification.PaddingLayerParams.PaddingTypeSet || new Set([ "constant", "reflection", "replication"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.PaddingLayerParams.PaddingTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PaddingLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.constant = $root.CoreML.Specification.PaddingLayerParams.PaddingConstant.decode(reader, reader.uint32()); + break; + case 2: + message.reflection = $root.CoreML.Specification.PaddingLayerParams.PaddingReflection.decode(reader, reader.uint32()); + break; + case 3: + message.replication = $root.CoreML.Specification.PaddingLayerParams.PaddingReplication.decode(reader, reader.uint32()); + break; + case 10: + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PaddingLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "constant": + message.constant = $root.CoreML.Specification.PaddingLayerParams.PaddingConstant.decodeText(reader); + break; + case "reflection": + message.reflection = $root.CoreML.Specification.PaddingLayerParams.PaddingReflection.decodeText(reader); + break; + case "replication": + message.replication = $root.CoreML.Specification.PaddingLayerParams.PaddingReplication.decodeText(reader); + break; + case "paddingAmounts": + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PaddingLayerParams.prototype.paddingAmounts = null; + +$root.CoreML.Specification.PaddingLayerParams.PaddingConstant = class PaddingConstant { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingConstant(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingConstant(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PaddingLayerParams.PaddingConstant.prototype.value = 0; + +$root.CoreML.Specification.PaddingLayerParams.PaddingReflection = class PaddingReflection { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReflection(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReflection(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PaddingLayerParams.PaddingReplication = class PaddingReplication { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReplication(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReplication(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConcatLayerParams = class ConcatLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ConcatLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 100: + message.sequenceConcat = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ConcatLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sequenceConcat": + message.sequenceConcat = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConcatLayerParams.prototype.sequenceConcat = false; + +$root.CoreML.Specification.LRNLayerParams = class LRNLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LRNLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + case 3: + message.localSize = reader.uint64(); + break; + case 4: + message.k = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LRNLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "localSize": + message.localSize = reader.uint64(); + break; + case "k": + message.k = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LRNLayerParams.prototype.alpha = 0; +$root.CoreML.Specification.LRNLayerParams.prototype.beta = 0; +$root.CoreML.Specification.LRNLayerParams.prototype.localSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.LRNLayerParams.prototype.k = 0; + +$root.CoreML.Specification.SoftmaxLayerParams = class SoftmaxLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SoftmaxLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SoftmaxLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SplitLayerParams = class SplitLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SplitLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nOutputs = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SplitLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nOutputs": + message.nOutputs = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SplitLayerParams.prototype.nOutputs = protobuf.Uint64.create(0); + +$root.CoreML.Specification.AddLayerParams = class AddLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AddLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AddLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AddLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.MultiplyLayerParams = class MultiplyLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MultiplyLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MultiplyLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MultiplyLayerParams.prototype.alpha = 0; + +$root.CoreML.Specification.UnaryFunctionLayerParams = class UnaryFunctionLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.UnaryFunctionLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.alpha = reader.float(); + break; + case 3: + message.epsilon = reader.float(); + break; + case 4: + message.shift = reader.float(); + break; + case 5: + message.scale = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.UnaryFunctionLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.CoreML.Specification.UnaryFunctionLayerParams.Operation); + break; + case "alpha": + message.alpha = reader.float(); + break; + case "epsilon": + message.epsilon = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.UnaryFunctionLayerParams.prototype.type = 0; +$root.CoreML.Specification.UnaryFunctionLayerParams.prototype.alpha = 0; +$root.CoreML.Specification.UnaryFunctionLayerParams.prototype.epsilon = 0; +$root.CoreML.Specification.UnaryFunctionLayerParams.prototype.shift = 0; +$root.CoreML.Specification.UnaryFunctionLayerParams.prototype.scale = 0; + +$root.CoreML.Specification.UnaryFunctionLayerParams.Operation = { + "SQRT": 0, + "RSQRT": 1, + "INVERSE": 2, + "POWER": 3, + "EXP": 4, + "LOG": 5, + "ABS": 6, + "THRESHOLD": 7 +}; + +$root.CoreML.Specification.UpsampleLayerParams = class UpsampleLayerParams { + + constructor() { + this.scalingFactor = []; + this.fractionalScalingFactor = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.UpsampleLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scalingFactor = reader.array(message.scalingFactor, () => reader.uint64(), tag); + break; + case 7: + message.fractionalScalingFactor = reader.floats(message.fractionalScalingFactor, tag); + break; + case 5: + message.mode = reader.int32(); + break; + case 6: + message.linearUpsampleMode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.UpsampleLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "scalingFactor": + reader.array(message.scalingFactor, () => reader.uint64()); + break; + case "fractionalScalingFactor": + reader.array(message.fractionalScalingFactor, () => reader.float()); + break; + case "mode": + message.mode = reader.enum($root.CoreML.Specification.UpsampleLayerParams.InterpolationMode); + break; + case "linearUpsampleMode": + message.linearUpsampleMode = reader.enum($root.CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.UpsampleLayerParams.prototype.mode = 0; +$root.CoreML.Specification.UpsampleLayerParams.prototype.linearUpsampleMode = 0; + +$root.CoreML.Specification.UpsampleLayerParams.InterpolationMode = { + "NN": 0, + "BILINEAR": 1 +}; + +$root.CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode = { + "DEFAULT": 0, + "ALIGN_CORNERS_TRUE": 1, + "ALIGN_CORNERS_FALSE": 2 +}; + +$root.CoreML.Specification.ResizeBilinearLayerParams = class ResizeBilinearLayerParams { + + constructor() { + this.targetSize = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ResizeBilinearLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetSize = reader.array(message.targetSize, () => reader.uint64(), tag); + break; + case 2: + message.mode = $root.CoreML.Specification.SamplingMode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ResizeBilinearLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetSize": + reader.array(message.targetSize, () => reader.uint64()); + break; + case "mode": + message.mode = $root.CoreML.Specification.SamplingMode.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ResizeBilinearLayerParams.prototype.mode = null; + +$root.CoreML.Specification.CropResizeLayerParams = class CropResizeLayerParams { + + constructor() { + this.targetSize = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CropResizeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetSize = reader.array(message.targetSize, () => reader.uint64(), tag); + break; + case 2: + message.normalizedCoordinates = reader.bool(); + break; + case 3: + message.mode = $root.CoreML.Specification.SamplingMode.decode(reader, reader.uint32()); + break; + case 4: + message.boxIndicesMode = $root.CoreML.Specification.BoxCoordinatesMode.decode(reader, reader.uint32()); + break; + case 5: + message.spatialScale = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CropResizeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetSize": + reader.array(message.targetSize, () => reader.uint64()); + break; + case "normalizedCoordinates": + message.normalizedCoordinates = reader.bool(); + break; + case "mode": + message.mode = $root.CoreML.Specification.SamplingMode.decodeText(reader); + break; + case "boxIndicesMode": + message.boxIndicesMode = $root.CoreML.Specification.BoxCoordinatesMode.decodeText(reader); + break; + case "spatialScale": + message.spatialScale = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CropResizeLayerParams.prototype.normalizedCoordinates = false; +$root.CoreML.Specification.CropResizeLayerParams.prototype.mode = null; +$root.CoreML.Specification.CropResizeLayerParams.prototype.boxIndicesMode = null; +$root.CoreML.Specification.CropResizeLayerParams.prototype.spatialScale = 0; + +$root.CoreML.Specification.BiasLayerParams = class BiasLayerParams { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BiasLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.uint64(), tag); + break; + case 2: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BiasLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.uint64()); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BiasLayerParams.prototype.bias = null; + +$root.CoreML.Specification.ScaleLayerParams = class ScaleLayerParams { + + constructor() { + this.shapeScale = []; + this.shapeBias = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ScaleLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapeScale = reader.array(message.shapeScale, () => reader.uint64(), tag); + break; + case 2: + message.scale = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 3: + message.hasBias = reader.bool(); + break; + case 4: + message.shapeBias = reader.array(message.shapeBias, () => reader.uint64(), tag); + break; + case 5: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ScaleLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shapeScale": + reader.array(message.shapeScale, () => reader.uint64()); + break; + case "scale": + message.scale = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "shapeBias": + reader.array(message.shapeBias, () => reader.uint64()); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ScaleLayerParams.prototype.scale = null; +$root.CoreML.Specification.ScaleLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.ScaleLayerParams.prototype.bias = null; + +$root.CoreML.Specification.LoadConstantLayerParams = class LoadConstantLayerParams { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LoadConstantLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.uint64(), tag); + break; + case 2: + message.data = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LoadConstantLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.uint64()); + break; + case "data": + message.data = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LoadConstantLayerParams.prototype.data = null; + +$root.CoreML.Specification.L2NormalizeLayerParams = class L2NormalizeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.L2NormalizeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.epsilon = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.L2NormalizeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "epsilon": + message.epsilon = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.L2NormalizeLayerParams.prototype.epsilon = 0; + +$root.CoreML.Specification.FlattenLayerParams = class FlattenLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FlattenLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FlattenLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.enum($root.CoreML.Specification.FlattenLayerParams.FlattenOrder); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FlattenLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.FlattenLayerParams.FlattenOrder = { + "CHANNEL_FIRST": 0, + "CHANNEL_LAST": 1 +}; + +$root.CoreML.Specification.ReshapeLayerParams = class ReshapeLayerParams { + + constructor() { + this.targetShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReshapeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetShape = reader.array(message.targetShape, () => reader.int64(), tag); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReshapeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetShape": + reader.array(message.targetShape, () => reader.int64()); + break; + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ReshapeLayerParams.ReshapeOrder); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReshapeLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.ReshapeLayerParams.ReshapeOrder = { + "CHANNEL_FIRST": 0, + "CHANNEL_LAST": 1 +}; + +$root.CoreML.Specification.PermuteLayerParams = class PermuteLayerParams { + + constructor() { + this.axis = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PermuteLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.array(message.axis, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PermuteLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + reader.array(message.axis, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReorganizeDataLayerParams = class ReorganizeDataLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReorganizeDataLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + case 2: + message.blockSize = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReorganizeDataLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType); + break; + case "blockSize": + message.blockSize = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReorganizeDataLayerParams.prototype.mode = 0; +$root.CoreML.Specification.ReorganizeDataLayerParams.prototype.blockSize = protobuf.Uint64.create(0); + +$root.CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType = { + "SPACE_TO_DEPTH": 0, + "DEPTH_TO_SPACE": 1, + "PIXEL_SHUFFLE": 2 +}; + +$root.CoreML.Specification.SliceLayerParams = class SliceLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SliceLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startIndex = reader.int64(); + break; + case 2: + message.endIndex = reader.int64(); + break; + case 3: + message.stride = reader.uint64(); + break; + case 4: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SliceLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "startIndex": + message.startIndex = reader.int64(); + break; + case "endIndex": + message.endIndex = reader.int64(); + break; + case "stride": + message.stride = reader.uint64(); + break; + case "axis": + message.axis = reader.enum($root.CoreML.Specification.SliceLayerParams.SliceAxis); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SliceLayerParams.prototype.startIndex = protobuf.Int64.create(0); +$root.CoreML.Specification.SliceLayerParams.prototype.endIndex = protobuf.Int64.create(0); +$root.CoreML.Specification.SliceLayerParams.prototype.stride = protobuf.Uint64.create(0); +$root.CoreML.Specification.SliceLayerParams.prototype.axis = 0; + +$root.CoreML.Specification.SliceLayerParams.SliceAxis = { + "CHANNEL_AXIS": 0, + "HEIGHT_AXIS": 1, + "WIDTH_AXIS": 2 +}; + +$root.CoreML.Specification.ReduceLayerParams = class ReduceLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + case 2: + message.epsilon = reader.float(); + break; + case 3: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ReduceLayerParams.ReduceOperation); + break; + case "epsilon": + message.epsilon = reader.float(); + break; + case "axis": + message.axis = reader.enum($root.CoreML.Specification.ReduceLayerParams.ReduceAxis); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceLayerParams.prototype.mode = 0; +$root.CoreML.Specification.ReduceLayerParams.prototype.epsilon = 0; +$root.CoreML.Specification.ReduceLayerParams.prototype.axis = 0; + +$root.CoreML.Specification.ReduceLayerParams.ReduceOperation = { + "SUM": 0, + "AVG": 1, + "PROD": 2, + "LOGSUM": 3, + "SUMSQUARE": 4, + "L1": 5, + "L2": 6, + "MAX": 7, + "MIN": 8, + "ARGMAX": 9 +}; + +$root.CoreML.Specification.ReduceLayerParams.ReduceAxis = { + "CHW": 0, + "HW": 1, + "C": 2, + "H": 3, + "W": 4 +}; + +$root.CoreML.Specification.CropLayerParams = class CropLayerParams { + + constructor() { + this.offset = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CropLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cropAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + case 5: + message.offset = reader.array(message.offset, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CropLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "cropAmounts": + message.cropAmounts = $root.CoreML.Specification.BorderAmounts.decodeText(reader); + break; + case "offset": + reader.array(message.offset, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CropLayerParams.prototype.cropAmounts = null; + +$root.CoreML.Specification.AverageLayerParams = class AverageLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AverageLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AverageLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MaxLayerParams = class MaxLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MaxLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MaxLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MinLayerParams = class MinLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MinLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MinLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DotProductLayerParams = class DotProductLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DotProductLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cosineSimilarity = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DotProductLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "cosineSimilarity": + message.cosineSimilarity = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DotProductLayerParams.prototype.cosineSimilarity = false; + +$root.CoreML.Specification.MeanVarianceNormalizeLayerParams = class MeanVarianceNormalizeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MeanVarianceNormalizeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.acrossChannels = reader.bool(); + break; + case 2: + message.normalizeVariance = reader.bool(); + break; + case 3: + message.epsilon = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MeanVarianceNormalizeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "acrossChannels": + message.acrossChannels = reader.bool(); + break; + case "normalizeVariance": + message.normalizeVariance = reader.bool(); + break; + case "epsilon": + message.epsilon = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MeanVarianceNormalizeLayerParams.prototype.acrossChannels = false; +$root.CoreML.Specification.MeanVarianceNormalizeLayerParams.prototype.normalizeVariance = false; +$root.CoreML.Specification.MeanVarianceNormalizeLayerParams.prototype.epsilon = 0; + +$root.CoreML.Specification.SequenceRepeatLayerParams = class SequenceRepeatLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SequenceRepeatLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nRepetitions = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SequenceRepeatLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nRepetitions": + message.nRepetitions = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SequenceRepeatLayerParams.prototype.nRepetitions = protobuf.Uint64.create(0); + +$root.CoreML.Specification.SimpleRecurrentLayerParams = class SimpleRecurrentLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SimpleRecurrentLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + message.activation = $root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32()); + break; + case 15: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVector = reader.bool(); + break; + case 30: + message.weightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 31: + message.recursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 32: + message.biasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SimpleRecurrentLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputVectorSize": + message.inputVectorSize = reader.uint64(); + break; + case "outputVectorSize": + message.outputVectorSize = reader.uint64(); + break; + case "activation": + message.activation = $root.CoreML.Specification.ActivationParams.decodeText(reader); + break; + case "sequenceOutput": + message.sequenceOutput = reader.bool(); + break; + case "hasBiasVector": + message.hasBiasVector = reader.bool(); + break; + case "weightMatrix": + message.weightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "recursionMatrix": + message.recursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "biasVector": + message.biasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "reverseInput": + message.reverseInput = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.inputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.outputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.activation = null; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.sequenceOutput = false; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.hasBiasVector = false; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.weightMatrix = null; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.recursionMatrix = null; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.biasVector = null; +$root.CoreML.Specification.SimpleRecurrentLayerParams.prototype.reverseInput = false; + +$root.CoreML.Specification.GRULayerParams = class GRULayerParams { + + constructor() { + this.activations = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GRULayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + message.activations.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVectors = reader.bool(); + break; + case 30: + message.updateGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 31: + message.resetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 32: + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 50: + message.updateGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 51: + message.resetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 52: + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 70: + message.updateGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 71: + message.resetGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 72: + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GRULayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputVectorSize": + message.inputVectorSize = reader.uint64(); + break; + case "outputVectorSize": + message.outputVectorSize = reader.uint64(); + break; + case "activations": + message.activations.push($root.CoreML.Specification.ActivationParams.decodeText(reader)); + break; + case "sequenceOutput": + message.sequenceOutput = reader.bool(); + break; + case "hasBiasVectors": + message.hasBiasVectors = reader.bool(); + break; + case "updateGateWeightMatrix": + message.updateGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "resetGateWeightMatrix": + message.resetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateWeightMatrix": + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "updateGateRecursionMatrix": + message.updateGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "resetGateRecursionMatrix": + message.resetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateRecursionMatrix": + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "updateGateBiasVector": + message.updateGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "resetGateBiasVector": + message.resetGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateBiasVector": + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "reverseInput": + message.reverseInput = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GRULayerParams.prototype.inputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.GRULayerParams.prototype.outputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.GRULayerParams.prototype.sequenceOutput = false; +$root.CoreML.Specification.GRULayerParams.prototype.hasBiasVectors = false; +$root.CoreML.Specification.GRULayerParams.prototype.updateGateWeightMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.resetGateWeightMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.outputGateWeightMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.updateGateRecursionMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.resetGateRecursionMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.outputGateRecursionMatrix = null; +$root.CoreML.Specification.GRULayerParams.prototype.updateGateBiasVector = null; +$root.CoreML.Specification.GRULayerParams.prototype.resetGateBiasVector = null; +$root.CoreML.Specification.GRULayerParams.prototype.outputGateBiasVector = null; +$root.CoreML.Specification.GRULayerParams.prototype.reverseInput = false; + +$root.CoreML.Specification.LSTMParams = class LSTMParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LSTMParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVectors = reader.bool(); + break; + case 30: + message.forgetBias = reader.bool(); + break; + case 40: + message.hasPeepholeVectors = reader.bool(); + break; + case 50: + message.coupledInputAndForgetGate = reader.bool(); + break; + case 60: + message.cellClipThreshold = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LSTMParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sequenceOutput": + message.sequenceOutput = reader.bool(); + break; + case "hasBiasVectors": + message.hasBiasVectors = reader.bool(); + break; + case "forgetBias": + message.forgetBias = reader.bool(); + break; + case "hasPeepholeVectors": + message.hasPeepholeVectors = reader.bool(); + break; + case "coupledInputAndForgetGate": + message.coupledInputAndForgetGate = reader.bool(); + break; + case "cellClipThreshold": + message.cellClipThreshold = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LSTMParams.prototype.sequenceOutput = false; +$root.CoreML.Specification.LSTMParams.prototype.hasBiasVectors = false; +$root.CoreML.Specification.LSTMParams.prototype.forgetBias = false; +$root.CoreML.Specification.LSTMParams.prototype.hasPeepholeVectors = false; +$root.CoreML.Specification.LSTMParams.prototype.coupledInputAndForgetGate = false; +$root.CoreML.Specification.LSTMParams.prototype.cellClipThreshold = 0; + +$root.CoreML.Specification.LSTMWeightParams = class LSTMWeightParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LSTMWeightParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 2: + message.forgetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 3: + message.blockInputWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 4: + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 20: + message.inputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.forgetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 22: + message.blockInputRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 23: + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 40: + message.inputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 41: + message.forgetGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 42: + message.blockInputBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 43: + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 60: + message.inputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 61: + message.forgetGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 62: + message.outputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LSTMWeightParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputGateWeightMatrix": + message.inputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "forgetGateWeightMatrix": + message.forgetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "blockInputWeightMatrix": + message.blockInputWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateWeightMatrix": + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "inputGateRecursionMatrix": + message.inputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "forgetGateRecursionMatrix": + message.forgetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "blockInputRecursionMatrix": + message.blockInputRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateRecursionMatrix": + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "inputGateBiasVector": + message.inputGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "forgetGateBiasVector": + message.forgetGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "blockInputBiasVector": + message.blockInputBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGateBiasVector": + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "inputGatePeepholeVector": + message.inputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "forgetGatePeepholeVector": + message.forgetGatePeepholeVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "outputGatePeepholeVector": + message.outputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LSTMWeightParams.prototype.inputGateWeightMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.forgetGateWeightMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.blockInputWeightMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.outputGateWeightMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.inputGateRecursionMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.forgetGateRecursionMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.blockInputRecursionMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.outputGateRecursionMatrix = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.inputGateBiasVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.forgetGateBiasVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.blockInputBiasVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.outputGateBiasVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.inputGatePeepholeVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.forgetGatePeepholeVector = null; +$root.CoreML.Specification.LSTMWeightParams.prototype.outputGatePeepholeVector = null; + +$root.CoreML.Specification.UniDirectionalLSTMLayerParams = class UniDirectionalLSTMLayerParams { + + constructor() { + this.activations = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.UniDirectionalLSTMLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + message.activations.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.params = $root.CoreML.Specification.LSTMParams.decode(reader, reader.uint32()); + break; + case 20: + message.weightParams = $root.CoreML.Specification.LSTMWeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.UniDirectionalLSTMLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputVectorSize": + message.inputVectorSize = reader.uint64(); + break; + case "outputVectorSize": + message.outputVectorSize = reader.uint64(); + break; + case "activations": + message.activations.push($root.CoreML.Specification.ActivationParams.decodeText(reader)); + break; + case "params": + message.params = $root.CoreML.Specification.LSTMParams.decodeText(reader); + break; + case "weightParams": + message.weightParams = $root.CoreML.Specification.LSTMWeightParams.decodeText(reader); + break; + case "reverseInput": + message.reverseInput = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.UniDirectionalLSTMLayerParams.prototype.inputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.UniDirectionalLSTMLayerParams.prototype.outputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.UniDirectionalLSTMLayerParams.prototype.params = null; +$root.CoreML.Specification.UniDirectionalLSTMLayerParams.prototype.weightParams = null; +$root.CoreML.Specification.UniDirectionalLSTMLayerParams.prototype.reverseInput = false; + +$root.CoreML.Specification.BiDirectionalLSTMLayerParams = class BiDirectionalLSTMLayerParams { + + constructor() { + this.activationsForwardLSTM = []; + this.activationsBackwardLSTM = []; + this.weightParams = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BiDirectionalLSTMLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + message.activationsForwardLSTM.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 11: + message.activationsBackwardLSTM.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.params = $root.CoreML.Specification.LSTMParams.decode(reader, reader.uint32()); + break; + case 20: + message.weightParams.push($root.CoreML.Specification.LSTMWeightParams.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BiDirectionalLSTMLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputVectorSize": + message.inputVectorSize = reader.uint64(); + break; + case "outputVectorSize": + message.outputVectorSize = reader.uint64(); + break; + case "activationsForwardLSTM": + message.activationsForwardLSTM.push($root.CoreML.Specification.ActivationParams.decodeText(reader)); + break; + case "activationsBackwardLSTM": + message.activationsBackwardLSTM.push($root.CoreML.Specification.ActivationParams.decodeText(reader)); + break; + case "params": + message.params = $root.CoreML.Specification.LSTMParams.decodeText(reader); + break; + case "weightParams": + message.weightParams.push($root.CoreML.Specification.LSTMWeightParams.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BiDirectionalLSTMLayerParams.prototype.inputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.BiDirectionalLSTMLayerParams.prototype.outputVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.BiDirectionalLSTMLayerParams.prototype.params = null; + +$root.CoreML.Specification.CustomLayerParams = class CustomLayerParams { + + constructor() { + this.weights = []; + this.parameters = {}; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CustomLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.className = reader.string(); + break; + case 20: + message.weights.push($root.CoreML.Specification.WeightParams.decode(reader, reader.uint32())); + break; + case 30: + reader.entry(message.parameters, () => reader.string(), () => $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.decode(reader, reader.uint32())); + break; + case 40: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CustomLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "className": + message.className = reader.string(); + break; + case "weights": + message.weights.push($root.CoreML.Specification.WeightParams.decodeText(reader)); + break; + case "parameters": + reader.entry(message.parameters, () => reader.string(), () => $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.decodeText(reader)); + break; + case "description": + message.description = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CustomLayerParams.prototype.className = ""; +$root.CoreML.Specification.CustomLayerParams.prototype.description = ""; + +$root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue = class CustomLayerParamValue { + + constructor() { + } + + get value() { + $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.valueSet = $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.valueSet || new Set([ "doubleValue", "stringValue", "intValue", "longValue", "boolValue"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.doubleValue = reader.double(); + break; + case 20: + message.stringValue = reader.string(); + break; + case 30: + message.intValue = reader.int32(); + break; + case 40: + message.longValue = reader.int64(); + break; + case 50: + message.boolValue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "doubleValue": + message.doubleValue = reader.double(); + break; + case "stringValue": + message.stringValue = reader.string(); + break; + case "intValue": + message.intValue = reader.int32(); + break; + case "longValue": + message.longValue = reader.int64(); + break; + case "boolValue": + message.boolValue = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TransposeLayerParams = class TransposeLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TransposeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TransposeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BatchedMatMulLayerParams = class BatchedMatMulLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BatchedMatMulLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.transposeA = reader.bool(); + break; + case 2: + message.transposeB = reader.bool(); + break; + case 5: + message.weightMatrixFirstDimension = reader.uint64(); + break; + case 6: + message.weightMatrixSecondDimension = reader.uint64(); + break; + case 7: + message.hasBias = reader.bool(); + break; + case 8: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 9: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 10: + message.int8DynamicQuantize = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BatchedMatMulLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "transposeA": + message.transposeA = reader.bool(); + break; + case "transposeB": + message.transposeB = reader.bool(); + break; + case "weightMatrixFirstDimension": + message.weightMatrixFirstDimension = reader.uint64(); + break; + case "weightMatrixSecondDimension": + message.weightMatrixSecondDimension = reader.uint64(); + break; + case "hasBias": + message.hasBias = reader.bool(); + break; + case "weights": + message.weights = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "bias": + message.bias = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "int8DynamicQuantize": + message.int8DynamicQuantize = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.transposeA = false; +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.transposeB = false; +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.weightMatrixFirstDimension = protobuf.Uint64.create(0); +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.weightMatrixSecondDimension = protobuf.Uint64.create(0); +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.hasBias = false; +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.weights = null; +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.bias = null; +$root.CoreML.Specification.BatchedMatMulLayerParams.prototype.int8DynamicQuantize = false; + +$root.CoreML.Specification.ConcatNDLayerParams = class ConcatNDLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ConcatNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.interleave = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ConcatNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "interleave": + message.interleave = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConcatNDLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ConcatNDLayerParams.prototype.interleave = false; + +$root.CoreML.Specification.SoftmaxNDLayerParams = class SoftmaxNDLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SoftmaxNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SoftmaxNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SoftmaxNDLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.ReverseLayerParams = class ReverseLayerParams { + + constructor() { + this.reverseDim = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReverseLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.reverseDim = reader.array(message.reverseDim, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReverseLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "reverseDim": + reader.array(message.reverseDim, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReverseSeqLayerParams = class ReverseSeqLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReverseSeqLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batchAxis = reader.int64(); + break; + case 2: + message.sequenceAxis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReverseSeqLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "batchAxis": + message.batchAxis = reader.int64(); + break; + case "sequenceAxis": + message.sequenceAxis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReverseSeqLayerParams.prototype.batchAxis = protobuf.Int64.create(0); +$root.CoreML.Specification.ReverseSeqLayerParams.prototype.sequenceAxis = protobuf.Int64.create(0); + +$root.CoreML.Specification.LoadConstantNDLayerParams = class LoadConstantNDLayerParams { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LoadConstantNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.uint64(), tag); + break; + case 2: + message.data = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LoadConstantNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.uint64()); + break; + case "data": + message.data = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LoadConstantNDLayerParams.prototype.data = null; + +$root.CoreML.Specification.FillLikeLayerParams = class FillLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FillLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FillLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FillLikeLayerParams.prototype.value = 0; + +$root.CoreML.Specification.FillStaticLayerParams = class FillStaticLayerParams { + + constructor() { + this.targetShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FillStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + case 2: + message.targetShape = reader.array(message.targetShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FillStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + case "targetShape": + reader.array(message.targetShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FillStaticLayerParams.prototype.value = 0; + +$root.CoreML.Specification.FillDynamicLayerParams = class FillDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FillDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FillDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FillDynamicLayerParams.prototype.value = 0; + +$root.CoreML.Specification.WhereBroadcastableLayerParams = class WhereBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.WhereBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.WhereBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SinLayerParams = class SinLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SinLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SinLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CosLayerParams = class CosLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CosLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CosLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TanLayerParams = class TanLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TanLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TanLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AsinLayerParams = class AsinLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AsinLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AsinLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AcosLayerParams = class AcosLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AcosLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AcosLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AtanLayerParams = class AtanLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AtanLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AtanLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SinhLayerParams = class SinhLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SinhLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SinhLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CoshLayerParams = class CoshLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CoshLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CoshLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TanhLayerParams = class TanhLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TanhLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TanhLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AsinhLayerParams = class AsinhLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AsinhLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AsinhLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AcoshLayerParams = class AcoshLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AcoshLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AcoshLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AtanhLayerParams = class AtanhLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AtanhLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AtanhLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PowBroadcastableLayerParams = class PowBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PowBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PowBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Exp2LayerParams = class Exp2LayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Exp2LayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Exp2LayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.WhereNonZeroLayerParams = class WhereNonZeroLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.WhereNonZeroLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.WhereNonZeroLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MatrixBandPartLayerParams = class MatrixBandPartLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MatrixBandPartLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numLower = reader.int64(); + break; + case 2: + message.numUpper = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MatrixBandPartLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "numLower": + message.numLower = reader.int64(); + break; + case "numUpper": + message.numUpper = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MatrixBandPartLayerParams.prototype.numLower = protobuf.Int64.create(0); +$root.CoreML.Specification.MatrixBandPartLayerParams.prototype.numUpper = protobuf.Int64.create(0); + +$root.CoreML.Specification.UpperTriangularLayerParams = class UpperTriangularLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.UpperTriangularLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.UpperTriangularLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.UpperTriangularLayerParams.prototype.k = protobuf.Int64.create(0); + +$root.CoreML.Specification.LowerTriangularLayerParams = class LowerTriangularLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LowerTriangularLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LowerTriangularLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LowerTriangularLayerParams.prototype.k = protobuf.Int64.create(0); + +$root.CoreML.Specification.BroadcastToLikeLayerParams = class BroadcastToLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BroadcastToLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BroadcastToLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BroadcastToStaticLayerParams = class BroadcastToStaticLayerParams { + + constructor() { + this.targetShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BroadcastToStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetShape = reader.array(message.targetShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BroadcastToStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetShape": + reader.array(message.targetShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.BroadcastToDynamicLayerParams = class BroadcastToDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.BroadcastToDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.BroadcastToDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AddBroadcastableLayerParams = class AddBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AddBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AddBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MaxBroadcastableLayerParams = class MaxBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MaxBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MaxBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MinBroadcastableLayerParams = class MinBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MinBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MinBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ModBroadcastableLayerParams = class ModBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ModBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ModBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FloorDivBroadcastableLayerParams = class FloorDivBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FloorDivBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FloorDivBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SubtractBroadcastableLayerParams = class SubtractBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SubtractBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SubtractBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MultiplyBroadcastableLayerParams = class MultiplyBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MultiplyBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MultiplyBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DivideBroadcastableLayerParams = class DivideBroadcastableLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DivideBroadcastableLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DivideBroadcastableLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GatherLayerParams = class GatherLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GatherLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GatherLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GatherLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.ScatterMode = { + "SCATTER_UPDATE": 0, + "SCATTER_ADD": 1, + "SCATTER_SUB": 2, + "SCATTER_MUL": 3, + "SCATTER_DIV": 4, + "SCATTER_MAX": 5, + "SCATTER_MIN": 6 +}; + +$root.CoreML.Specification.ScatterLayerParams = class ScatterLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ScatterLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ScatterLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ScatterMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ScatterLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ScatterLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.GatherNDLayerParams = class GatherNDLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GatherNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GatherNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ScatterNDLayerParams = class ScatterNDLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ScatterNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ScatterNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ScatterMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ScatterNDLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.GatherAlongAxisLayerParams = class GatherAlongAxisLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GatherAlongAxisLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GatherAlongAxisLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GatherAlongAxisLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.ScatterAlongAxisLayerParams = class ScatterAlongAxisLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ScatterAlongAxisLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ScatterAlongAxisLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "mode": + message.mode = reader.enum($root.CoreML.Specification.ScatterMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ScatterAlongAxisLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ScatterAlongAxisLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.StackLayerParams = class StackLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.StackLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.StackLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.StackLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.RankPreservingReshapeLayerParams = class RankPreservingReshapeLayerParams { + + constructor() { + this.targetShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RankPreservingReshapeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetShape = reader.array(message.targetShape, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RankPreservingReshapeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetShape": + reader.array(message.targetShape, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConstantPaddingLayerParams = class ConstantPaddingLayerParams { + + constructor() { + this.padAmounts = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ConstantPaddingLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + case 2: + message.padAmounts = reader.array(message.padAmounts, () => reader.uint64(), tag); + break; + case 3: + message.padToGivenOutputSizeMode = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ConstantPaddingLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + case "padAmounts": + reader.array(message.padAmounts, () => reader.uint64()); + break; + case "padToGivenOutputSizeMode": + message.padToGivenOutputSizeMode = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ConstantPaddingLayerParams.prototype.value = 0; +$root.CoreML.Specification.ConstantPaddingLayerParams.prototype.padToGivenOutputSizeMode = false; + +$root.CoreML.Specification.RandomNormalLikeLayerParams = class RandomNormalLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomNormalLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomNormalLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "mean": + message.mean = reader.float(); + break; + case "stdDev": + message.stdDev = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomNormalLikeLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomNormalLikeLayerParams.prototype.mean = 0; +$root.CoreML.Specification.RandomNormalLikeLayerParams.prototype.stdDev = 0; + +$root.CoreML.Specification.RandomNormalStaticLayerParams = class RandomNormalStaticLayerParams { + + constructor() { + this.outputShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomNormalStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + case 4: + message.outputShape = reader.array(message.outputShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomNormalStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "mean": + message.mean = reader.float(); + break; + case "stdDev": + message.stdDev = reader.float(); + break; + case "outputShape": + reader.array(message.outputShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomNormalStaticLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomNormalStaticLayerParams.prototype.mean = 0; +$root.CoreML.Specification.RandomNormalStaticLayerParams.prototype.stdDev = 0; + +$root.CoreML.Specification.RandomNormalDynamicLayerParams = class RandomNormalDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomNormalDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomNormalDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "mean": + message.mean = reader.float(); + break; + case "stdDev": + message.stdDev = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomNormalDynamicLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomNormalDynamicLayerParams.prototype.mean = 0; +$root.CoreML.Specification.RandomNormalDynamicLayerParams.prototype.stdDev = 0; + +$root.CoreML.Specification.RandomUniformLikeLayerParams = class RandomUniformLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomUniformLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomUniformLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "minVal": + message.minVal = reader.float(); + break; + case "maxVal": + message.maxVal = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomUniformLikeLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomUniformLikeLayerParams.prototype.minVal = 0; +$root.CoreML.Specification.RandomUniformLikeLayerParams.prototype.maxVal = 0; + +$root.CoreML.Specification.RandomUniformStaticLayerParams = class RandomUniformStaticLayerParams { + + constructor() { + this.outputShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomUniformStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + case 4: + message.outputShape = reader.array(message.outputShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomUniformStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "minVal": + message.minVal = reader.float(); + break; + case "maxVal": + message.maxVal = reader.float(); + break; + case "outputShape": + reader.array(message.outputShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomUniformStaticLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomUniformStaticLayerParams.prototype.minVal = 0; +$root.CoreML.Specification.RandomUniformStaticLayerParams.prototype.maxVal = 0; + +$root.CoreML.Specification.RandomUniformDynamicLayerParams = class RandomUniformDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomUniformDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomUniformDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "minVal": + message.minVal = reader.float(); + break; + case "maxVal": + message.maxVal = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomUniformDynamicLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomUniformDynamicLayerParams.prototype.minVal = 0; +$root.CoreML.Specification.RandomUniformDynamicLayerParams.prototype.maxVal = 0; + +$root.CoreML.Specification.RandomBernoulliLikeLayerParams = class RandomBernoulliLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomBernoulliLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomBernoulliLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "prob": + message.prob = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomBernoulliLikeLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomBernoulliLikeLayerParams.prototype.prob = 0; + +$root.CoreML.Specification.RandomBernoulliStaticLayerParams = class RandomBernoulliStaticLayerParams { + + constructor() { + this.outputShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomBernoulliStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + case 3: + message.outputShape = reader.array(message.outputShape, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomBernoulliStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "prob": + message.prob = reader.float(); + break; + case "outputShape": + reader.array(message.outputShape, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomBernoulliStaticLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomBernoulliStaticLayerParams.prototype.prob = 0; + +$root.CoreML.Specification.RandomBernoulliDynamicLayerParams = class RandomBernoulliDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RandomBernoulliDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RandomBernoulliDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "prob": + message.prob = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RandomBernoulliDynamicLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.RandomBernoulliDynamicLayerParams.prototype.prob = 0; + +$root.CoreML.Specification.CategoricalDistributionLayerParams = class CategoricalDistributionLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CategoricalDistributionLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.numSamples = reader.int64(); + break; + case 3: + message.isLogits = reader.bool(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.temperature = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CategoricalDistributionLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "seed": + message.seed = reader.int64(); + break; + case "numSamples": + message.numSamples = reader.int64(); + break; + case "isLogits": + message.isLogits = reader.bool(); + break; + case "eps": + message.eps = reader.float(); + break; + case "temperature": + message.temperature = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CategoricalDistributionLayerParams.prototype.seed = protobuf.Int64.create(0); +$root.CoreML.Specification.CategoricalDistributionLayerParams.prototype.numSamples = protobuf.Int64.create(0); +$root.CoreML.Specification.CategoricalDistributionLayerParams.prototype.isLogits = false; +$root.CoreML.Specification.CategoricalDistributionLayerParams.prototype.eps = 0; +$root.CoreML.Specification.CategoricalDistributionLayerParams.prototype.temperature = 0; + +$root.CoreML.Specification.ReduceL1LayerParams = class ReduceL1LayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceL1LayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceL1LayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceL1LayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceL1LayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceL2LayerParams = class ReduceL2LayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceL2LayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceL2LayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceL2LayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceL2LayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceMaxLayerParams = class ReduceMaxLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceMaxLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceMaxLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceMaxLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceMaxLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceMinLayerParams = class ReduceMinLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceMinLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceMinLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceMinLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceMinLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceSumLayerParams = class ReduceSumLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceSumLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceSumLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceSumLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceSumLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceProdLayerParams = class ReduceProdLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceProdLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceProdLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceProdLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceProdLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceMeanLayerParams = class ReduceMeanLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceMeanLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceMeanLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceMeanLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceMeanLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceLogSumLayerParams = class ReduceLogSumLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceLogSumLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceLogSumLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceLogSumLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceLogSumLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceSumSquareLayerParams = class ReduceSumSquareLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceSumSquareLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceSumSquareLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceSumSquareLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceSumSquareLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ReduceLogSumExpLayerParams = class ReduceLogSumExpLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReduceLogSumExpLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReduceLogSumExpLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keepDims": + message.keepDims = reader.bool(); + break; + case "reduceAll": + message.reduceAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReduceLogSumExpLayerParams.prototype.keepDims = false; +$root.CoreML.Specification.ReduceLogSumExpLayerParams.prototype.reduceAll = false; + +$root.CoreML.Specification.ExpandDimsLayerParams = class ExpandDimsLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ExpandDimsLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ExpandDimsLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FlattenTo2DLayerParams = class FlattenTo2DLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FlattenTo2DLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FlattenTo2DLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FlattenTo2DLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.ReshapeStaticLayerParams = class ReshapeStaticLayerParams { + + constructor() { + this.targetShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReshapeStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetShape = reader.array(message.targetShape, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReshapeStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "targetShape": + reader.array(message.targetShape, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReshapeLikeLayerParams = class ReshapeLikeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReshapeLikeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReshapeLikeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ReshapeDynamicLayerParams = class ReshapeDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ReshapeDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ReshapeDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SqueezeLayerParams = class SqueezeLayerParams { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SqueezeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.squeezeAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SqueezeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "squeezeAll": + message.squeezeAll = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SqueezeLayerParams.prototype.squeezeAll = false; + +$root.CoreML.Specification.TopKLayerParams = class TopKLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TopKLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.K = reader.uint64(); + break; + case 3: + message.useBottomK = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TopKLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "K": + message.K = reader.uint64(); + break; + case "useBottomK": + message.useBottomK = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TopKLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.TopKLayerParams.prototype.K = protobuf.Uint64.create(0); +$root.CoreML.Specification.TopKLayerParams.prototype.useBottomK = false; + +$root.CoreML.Specification.ArgMaxLayerParams = class ArgMaxLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArgMaxLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.removeDim = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArgMaxLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "removeDim": + message.removeDim = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArgMaxLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ArgMaxLayerParams.prototype.removeDim = false; + +$root.CoreML.Specification.ArgMinLayerParams = class ArgMinLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArgMinLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.removeDim = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArgMinLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "removeDim": + message.removeDim = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArgMinLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ArgMinLayerParams.prototype.removeDim = false; + +$root.CoreML.Specification.SplitNDLayerParams = class SplitNDLayerParams { + + constructor() { + this.splitSizes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SplitNDLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.numSplits = reader.uint64(); + break; + case 3: + message.splitSizes = reader.array(message.splitSizes, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SplitNDLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "numSplits": + message.numSplits = reader.uint64(); + break; + case "splitSizes": + reader.array(message.splitSizes, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SplitNDLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.SplitNDLayerParams.prototype.numSplits = protobuf.Uint64.create(0); + +$root.CoreML.Specification.CeilLayerParams = class CeilLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CeilLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CeilLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RoundLayerParams = class RoundLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RoundLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RoundLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.FloorLayerParams = class FloorLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.FloorLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.FloorLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SignLayerParams = class SignLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SignLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SignLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ClipLayerParams = class ClipLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ClipLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minVal = reader.float(); + break; + case 2: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ClipLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "minVal": + message.minVal = reader.float(); + break; + case "maxVal": + message.maxVal = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ClipLayerParams.prototype.minVal = 0; +$root.CoreML.Specification.ClipLayerParams.prototype.maxVal = 0; + +$root.CoreML.Specification.SliceStaticLayerParams = class SliceStaticLayerParams { + + constructor() { + this.beginIds = []; + this.beginMasks = []; + this.endIds = []; + this.endMasks = []; + this.strides = []; + this.squeezeMasks = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SliceStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.beginIds = reader.array(message.beginIds, () => reader.int64(), tag); + break; + case 2: + message.beginMasks = reader.array(message.beginMasks, () => reader.bool(), tag); + break; + case 3: + message.endIds = reader.array(message.endIds, () => reader.int64(), tag); + break; + case 4: + message.endMasks = reader.array(message.endMasks, () => reader.bool(), tag); + break; + case 5: + message.strides = reader.array(message.strides, () => reader.int64(), tag); + break; + case 6: + message.squeezeMasks = reader.array(message.squeezeMasks, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SliceStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "beginIds": + reader.array(message.beginIds, () => reader.int64()); + break; + case "beginMasks": + reader.array(message.beginMasks, () => reader.bool()); + break; + case "endIds": + reader.array(message.endIds, () => reader.int64()); + break; + case "endMasks": + reader.array(message.endMasks, () => reader.bool()); + break; + case "strides": + reader.array(message.strides, () => reader.int64()); + break; + case "squeezeMasks": + reader.array(message.squeezeMasks, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SliceDynamicLayerParams = class SliceDynamicLayerParams { + + constructor() { + this.beginMasks = []; + this.endIds = []; + this.endMasks = []; + this.strides = []; + this.squeezeMasks = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SliceDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.beginMasks = reader.array(message.beginMasks, () => reader.bool(), tag); + break; + case 3: + message.endIds = reader.array(message.endIds, () => reader.int64(), tag); + break; + case 4: + message.endMasks = reader.array(message.endMasks, () => reader.bool(), tag); + break; + case 5: + message.strides = reader.array(message.strides, () => reader.int64(), tag); + break; + case 6: + message.squeezeMasks = reader.array(message.squeezeMasks, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SliceDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "beginMasks": + reader.array(message.beginMasks, () => reader.bool()); + break; + case "endIds": + reader.array(message.endIds, () => reader.int64()); + break; + case "endMasks": + reader.array(message.endMasks, () => reader.bool()); + break; + case "strides": + reader.array(message.strides, () => reader.int64()); + break; + case "squeezeMasks": + reader.array(message.squeezeMasks, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TileLayerParams = class TileLayerParams { + + constructor() { + this.reps = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TileLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.reps = reader.array(message.reps, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TileLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "reps": + reader.array(message.reps, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GetShapeLayerParams = class GetShapeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GetShapeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GetShapeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ErfLayerParams = class ErfLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ErfLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ErfLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GeluLayerParams = class GeluLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.GeluLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.GeluLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.enum($root.CoreML.Specification.GeluLayerParams.GeluMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.GeluLayerParams.prototype.mode = 0; + +$root.CoreML.Specification.GeluLayerParams.GeluMode = { + "EXACT": 0, + "TANH_APPROXIMATION": 1, + "SIGMOID_APPROXIMATION": 2 +}; + +$root.CoreML.Specification.RangeStaticLayerParams = class RangeStaticLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RangeStaticLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.endValue = reader.float(); + break; + case 2: + message.startValue = reader.float(); + break; + case 3: + message.stepSizeValue = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RangeStaticLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "endValue": + message.endValue = reader.float(); + break; + case "startValue": + message.startValue = reader.float(); + break; + case "stepSizeValue": + message.stepSizeValue = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RangeStaticLayerParams.prototype.endValue = 0; +$root.CoreML.Specification.RangeStaticLayerParams.prototype.startValue = 0; +$root.CoreML.Specification.RangeStaticLayerParams.prototype.stepSizeValue = 0; + +$root.CoreML.Specification.RangeDynamicLayerParams = class RangeDynamicLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RangeDynamicLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.startValue = reader.float(); + break; + case 3: + message.stepSizeValue = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RangeDynamicLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "startValue": + message.startValue = reader.float(); + break; + case "stepSizeValue": + message.stepSizeValue = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RangeDynamicLayerParams.prototype.startValue = 0; +$root.CoreML.Specification.RangeDynamicLayerParams.prototype.stepSizeValue = 0; + +$root.CoreML.Specification.SlidingWindowsLayerParams = class SlidingWindowsLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SlidingWindowsLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.windowSize = reader.uint64(); + break; + case 3: + message.step = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SlidingWindowsLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "windowSize": + message.windowSize = reader.uint64(); + break; + case "step": + message.step = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SlidingWindowsLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.SlidingWindowsLayerParams.prototype.windowSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.SlidingWindowsLayerParams.prototype.step = protobuf.Uint64.create(0); + +$root.CoreML.Specification.LayerNormalizationLayerParams = class LayerNormalizationLayerParams { + + constructor() { + this.normalizedShape = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LayerNormalizationLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.normalizedShape = reader.array(message.normalizedShape, () => reader.int64(), tag); + break; + case 2: + message.eps = reader.float(); + break; + case 3: + message.gamma = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 4: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LayerNormalizationLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "normalizedShape": + reader.array(message.normalizedShape, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + case "gamma": + message.gamma = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + case "beta": + message.beta = $root.CoreML.Specification.WeightParams.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LayerNormalizationLayerParams.prototype.eps = 0; +$root.CoreML.Specification.LayerNormalizationLayerParams.prototype.gamma = null; +$root.CoreML.Specification.LayerNormalizationLayerParams.prototype.beta = null; + +$root.CoreML.Specification.NonMaximumSuppressionLayerParams = class NonMaximumSuppressionLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NonMaximumSuppressionLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.iouThreshold = reader.float(); + break; + case 2: + message.scoreThreshold = reader.float(); + break; + case 3: + message.maxBoxes = reader.uint64(); + break; + case 4: + message.perClassSuppression = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NonMaximumSuppressionLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "iouThreshold": + message.iouThreshold = reader.float(); + break; + case "scoreThreshold": + message.scoreThreshold = reader.float(); + break; + case "maxBoxes": + message.maxBoxes = reader.uint64(); + break; + case "perClassSuppression": + message.perClassSuppression = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NonMaximumSuppressionLayerParams.prototype.iouThreshold = 0; +$root.CoreML.Specification.NonMaximumSuppressionLayerParams.prototype.scoreThreshold = 0; +$root.CoreML.Specification.NonMaximumSuppressionLayerParams.prototype.maxBoxes = protobuf.Uint64.create(0); +$root.CoreML.Specification.NonMaximumSuppressionLayerParams.prototype.perClassSuppression = false; + +$root.CoreML.Specification.ClampedReLULayerParams = class ClampedReLULayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ClampedReLULayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ClampedReLULayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ClampedReLULayerParams.prototype.alpha = 0; +$root.CoreML.Specification.ClampedReLULayerParams.prototype.beta = 0; + +$root.CoreML.Specification.ArgSortLayerParams = class ArgSortLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ArgSortLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.descending = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ArgSortLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "descending": + message.descending = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ArgSortLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.ArgSortLayerParams.prototype.descending = false; + +$root.CoreML.Specification.SliceBySizeLayerParams = class SliceBySizeLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SliceBySizeLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.size = reader.int64(); + break; + case 3: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SliceBySizeLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "size": + message.size = reader.int64(); + break; + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SliceBySizeLayerParams.prototype.size = protobuf.Int64.create(0); +$root.CoreML.Specification.SliceBySizeLayerParams.prototype.axis = protobuf.Int64.create(0); + +$root.CoreML.Specification.NeuralNetworkClassifier = class NeuralNetworkClassifier { + + constructor() { + this.layers = []; + this.preprocessing = []; + } + + get ClassLabels() { + $root.CoreML.Specification.NeuralNetworkClassifier.ClassLabelsSet = $root.CoreML.Specification.NeuralNetworkClassifier.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NeuralNetworkClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 200: + message.labelProbabilityLayerName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "layers": + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decodeText(reader)); + break; + case "preprocessing": + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decodeText(reader)); + break; + case "arrayInputShapeMapping": + message.arrayInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping); + break; + case "imageInputShapeMapping": + message.imageInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkImageShapeMapping); + break; + case "updateParams": + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decodeText(reader); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "labelProbabilityLayerName": + message.labelProbabilityLayerName = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkClassifier.prototype.arrayInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetworkClassifier.prototype.imageInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetworkClassifier.prototype.updateParams = null; +$root.CoreML.Specification.NeuralNetworkClassifier.prototype.labelProbabilityLayerName = ""; + +$root.CoreML.Specification.OneHotLayerParams = class OneHotLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.OneHotLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.oneHotVectorSize = reader.uint64(); + break; + case 2: + message.axis = reader.int64(); + break; + case 3: + message.onValue = reader.float(); + break; + case 4: + message.offValue = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.OneHotLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "oneHotVectorSize": + message.oneHotVectorSize = reader.uint64(); + break; + case "axis": + message.axis = reader.int64(); + break; + case "onValue": + message.onValue = reader.float(); + break; + case "offValue": + message.offValue = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.OneHotLayerParams.prototype.oneHotVectorSize = protobuf.Uint64.create(0); +$root.CoreML.Specification.OneHotLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.OneHotLayerParams.prototype.onValue = 0; +$root.CoreML.Specification.OneHotLayerParams.prototype.offValue = 0; + +$root.CoreML.Specification.CumSumLayerParams = class CumSumLayerParams { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CumSumLayerParams(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.excludeFinalSum = reader.bool(); + break; + case 3: + message.reverse = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CumSumLayerParams(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "excludeFinalSum": + message.excludeFinalSum = reader.bool(); + break; + case "reverse": + message.reverse = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CumSumLayerParams.prototype.axis = protobuf.Int64.create(0); +$root.CoreML.Specification.CumSumLayerParams.prototype.excludeFinalSum = false; +$root.CoreML.Specification.CumSumLayerParams.prototype.reverse = false; + +$root.CoreML.Specification.NeuralNetworkRegressor = class NeuralNetworkRegressor { + + constructor() { + this.layers = []; + this.preprocessing = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NeuralNetworkRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NeuralNetworkRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "layers": + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decodeText(reader)); + break; + case "preprocessing": + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decodeText(reader)); + break; + case "arrayInputShapeMapping": + message.arrayInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping); + break; + case "imageInputShapeMapping": + message.imageInputShapeMapping = reader.enum($root.CoreML.Specification.NeuralNetworkImageShapeMapping); + break; + case "updateParams": + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NeuralNetworkRegressor.prototype.arrayInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetworkRegressor.prototype.imageInputShapeMapping = 0; +$root.CoreML.Specification.NeuralNetworkRegressor.prototype.updateParams = null; + +$root.CoreML.Specification.NetworkUpdateParameters = class NetworkUpdateParameters { + + constructor() { + this.lossLayers = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NetworkUpdateParameters(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lossLayers.push($root.CoreML.Specification.LossLayer.decode(reader, reader.uint32())); + break; + case 2: + message.optimizer = $root.CoreML.Specification.Optimizer.decode(reader, reader.uint32()); + break; + case 3: + message.epochs = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 10: + message.shuffle = $root.CoreML.Specification.BoolParameter.decode(reader, reader.uint32()); + break; + case 20: + message.seed = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NetworkUpdateParameters(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lossLayers": + message.lossLayers.push($root.CoreML.Specification.LossLayer.decodeText(reader)); + break; + case "optimizer": + message.optimizer = $root.CoreML.Specification.Optimizer.decodeText(reader); + break; + case "epochs": + message.epochs = $root.CoreML.Specification.Int64Parameter.decodeText(reader); + break; + case "shuffle": + message.shuffle = $root.CoreML.Specification.BoolParameter.decodeText(reader); + break; + case "seed": + message.seed = $root.CoreML.Specification.Int64Parameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NetworkUpdateParameters.prototype.optimizer = null; +$root.CoreML.Specification.NetworkUpdateParameters.prototype.epochs = null; +$root.CoreML.Specification.NetworkUpdateParameters.prototype.shuffle = null; +$root.CoreML.Specification.NetworkUpdateParameters.prototype.seed = null; + +$root.CoreML.Specification.LossLayer = class LossLayer { + + constructor() { + } + + get LossLayerType() { + $root.CoreML.Specification.LossLayer.LossLayerTypeSet = $root.CoreML.Specification.LossLayer.LossLayerTypeSet || new Set([ "categoricalCrossEntropyLossLayer", "meanSquaredErrorLossLayer"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.LossLayer.LossLayerTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LossLayer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 10: + message.categoricalCrossEntropyLossLayer = $root.CoreML.Specification.CategoricalCrossEntropyLossLayer.decode(reader, reader.uint32()); + break; + case 11: + message.meanSquaredErrorLossLayer = $root.CoreML.Specification.MeanSquaredErrorLossLayer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LossLayer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "categoricalCrossEntropyLossLayer": + message.categoricalCrossEntropyLossLayer = $root.CoreML.Specification.CategoricalCrossEntropyLossLayer.decodeText(reader); + break; + case "meanSquaredErrorLossLayer": + message.meanSquaredErrorLossLayer = $root.CoreML.Specification.MeanSquaredErrorLossLayer.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LossLayer.prototype.name = ""; + +$root.CoreML.Specification.CategoricalCrossEntropyLossLayer = class CategoricalCrossEntropyLossLayer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.CategoricalCrossEntropyLossLayer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input = reader.string(); + break; + case 2: + message.target = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.CategoricalCrossEntropyLossLayer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + message.input = reader.string(); + break; + case "target": + message.target = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.CategoricalCrossEntropyLossLayer.prototype.input = ""; +$root.CoreML.Specification.CategoricalCrossEntropyLossLayer.prototype.target = ""; + +$root.CoreML.Specification.MeanSquaredErrorLossLayer = class MeanSquaredErrorLossLayer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.MeanSquaredErrorLossLayer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input = reader.string(); + break; + case 2: + message.target = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.MeanSquaredErrorLossLayer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + message.input = reader.string(); + break; + case "target": + message.target = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.MeanSquaredErrorLossLayer.prototype.input = ""; +$root.CoreML.Specification.MeanSquaredErrorLossLayer.prototype.target = ""; + +$root.CoreML.Specification.Optimizer = class Optimizer { + + constructor() { + } + + get OptimizerType() { + $root.CoreML.Specification.Optimizer.OptimizerTypeSet = $root.CoreML.Specification.Optimizer.OptimizerTypeSet || new Set([ "sgdOptimizer", "adamOptimizer"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Optimizer.OptimizerTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Optimizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.sgdOptimizer = $root.CoreML.Specification.SGDOptimizer.decode(reader, reader.uint32()); + break; + case 11: + message.adamOptimizer = $root.CoreML.Specification.AdamOptimizer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Optimizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sgdOptimizer": + message.sgdOptimizer = $root.CoreML.Specification.SGDOptimizer.decodeText(reader); + break; + case "adamOptimizer": + message.adamOptimizer = $root.CoreML.Specification.AdamOptimizer.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SGDOptimizer = class SGDOptimizer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SGDOptimizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.learningRate = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 2: + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 3: + message.momentum = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SGDOptimizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "learningRate": + message.learningRate = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + case "miniBatchSize": + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decodeText(reader); + break; + case "momentum": + message.momentum = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SGDOptimizer.prototype.learningRate = null; +$root.CoreML.Specification.SGDOptimizer.prototype.miniBatchSize = null; +$root.CoreML.Specification.SGDOptimizer.prototype.momentum = null; + +$root.CoreML.Specification.AdamOptimizer = class AdamOptimizer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.AdamOptimizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.learningRate = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 2: + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 3: + message.beta1 = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 4: + message.beta2 = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 5: + message.eps = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.AdamOptimizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "learningRate": + message.learningRate = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + case "miniBatchSize": + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decodeText(reader); + break; + case "beta1": + message.beta1 = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + case "beta2": + message.beta2 = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + case "eps": + message.eps = $root.CoreML.Specification.DoubleParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.AdamOptimizer.prototype.learningRate = null; +$root.CoreML.Specification.AdamOptimizer.prototype.miniBatchSize = null; +$root.CoreML.Specification.AdamOptimizer.prototype.beta1 = null; +$root.CoreML.Specification.AdamOptimizer.prototype.beta2 = null; +$root.CoreML.Specification.AdamOptimizer.prototype.eps = null; + +$root.CoreML.Specification.Normalizer = class Normalizer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Normalizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.normType = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Normalizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "normType": + message.normType = reader.enum($root.CoreML.Specification.Normalizer.NormType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Normalizer.prototype.normType = 0; + +$root.CoreML.Specification.Normalizer.NormType = { + "LMax": 0, + "L1": 1, + "L2": 2 +}; + +$root.CoreML.Specification.OneHotEncoder = class OneHotEncoder { + + constructor() { + } + + get CategoryType() { + $root.CoreML.Specification.OneHotEncoder.CategoryTypeSet = $root.CoreML.Specification.OneHotEncoder.CategoryTypeSet || new Set([ "stringCategories", "int64Categories"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.OneHotEncoder.CategoryTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.OneHotEncoder(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringCategories = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 2: + message.int64Categories = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 10: + message.outputSparse = reader.bool(); + break; + case 11: + message.handleUnknown = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.OneHotEncoder(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "stringCategories": + message.stringCategories = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64Categories": + message.int64Categories = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "outputSparse": + message.outputSparse = reader.bool(); + break; + case "handleUnknown": + message.handleUnknown = reader.enum($root.CoreML.Specification.OneHotEncoder.HandleUnknown); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.OneHotEncoder.prototype.outputSparse = false; +$root.CoreML.Specification.OneHotEncoder.prototype.handleUnknown = 0; + +$root.CoreML.Specification.OneHotEncoder.HandleUnknown = { + "ErrorOnUnknown": 0, + "IgnoreUnknown": 1 +}; + +$root.CoreML.Specification.Scaler = class Scaler { + + constructor() { + this.shiftValue = []; + this.scaleValue = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Scaler(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shiftValue = reader.doubles(message.shiftValue, tag); + break; + case 2: + message.scaleValue = reader.doubles(message.scaleValue, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Scaler(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shiftValue": + reader.array(message.shiftValue, () => reader.double()); + break; + case "scaleValue": + reader.array(message.scaleValue, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NonMaximumSuppression = class NonMaximumSuppression { + + constructor() { + } + + get SuppressionMethod() { + $root.CoreML.Specification.NonMaximumSuppression.SuppressionMethodSet = $root.CoreML.Specification.NonMaximumSuppression.SuppressionMethodSet || new Set([ "pickTop"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NonMaximumSuppression.SuppressionMethodSet.has(key) && this[key] != null); + } + + get ClassLabels() { + $root.CoreML.Specification.NonMaximumSuppression.ClassLabelsSet = $root.CoreML.Specification.NonMaximumSuppression.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.NonMaximumSuppression.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NonMaximumSuppression(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pickTop = $root.CoreML.Specification.NonMaximumSuppression.PickTop.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 110: + message.iouThreshold = reader.double(); + break; + case 111: + message.confidenceThreshold = reader.double(); + break; + case 200: + message.confidenceInputFeatureName = reader.string(); + break; + case 201: + message.coordinatesInputFeatureName = reader.string(); + break; + case 202: + message.iouThresholdInputFeatureName = reader.string(); + break; + case 203: + message.confidenceThresholdInputFeatureName = reader.string(); + break; + case 210: + message.confidenceOutputFeatureName = reader.string(); + break; + case 211: + message.coordinatesOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NonMaximumSuppression(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pickTop": + message.pickTop = $root.CoreML.Specification.NonMaximumSuppression.PickTop.decodeText(reader); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "iouThreshold": + message.iouThreshold = reader.double(); + break; + case "confidenceThreshold": + message.confidenceThreshold = reader.double(); + break; + case "confidenceInputFeatureName": + message.confidenceInputFeatureName = reader.string(); + break; + case "coordinatesInputFeatureName": + message.coordinatesInputFeatureName = reader.string(); + break; + case "iouThresholdInputFeatureName": + message.iouThresholdInputFeatureName = reader.string(); + break; + case "confidenceThresholdInputFeatureName": + message.confidenceThresholdInputFeatureName = reader.string(); + break; + case "confidenceOutputFeatureName": + message.confidenceOutputFeatureName = reader.string(); + break; + case "coordinatesOutputFeatureName": + message.coordinatesOutputFeatureName = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NonMaximumSuppression.prototype.iouThreshold = 0; +$root.CoreML.Specification.NonMaximumSuppression.prototype.confidenceThreshold = 0; +$root.CoreML.Specification.NonMaximumSuppression.prototype.confidenceInputFeatureName = ""; +$root.CoreML.Specification.NonMaximumSuppression.prototype.coordinatesInputFeatureName = ""; +$root.CoreML.Specification.NonMaximumSuppression.prototype.iouThresholdInputFeatureName = ""; +$root.CoreML.Specification.NonMaximumSuppression.prototype.confidenceThresholdInputFeatureName = ""; +$root.CoreML.Specification.NonMaximumSuppression.prototype.confidenceOutputFeatureName = ""; +$root.CoreML.Specification.NonMaximumSuppression.prototype.coordinatesOutputFeatureName = ""; + +$root.CoreML.Specification.NonMaximumSuppression.PickTop = class PickTop { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.NonMaximumSuppression.PickTop(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.perClass = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.NonMaximumSuppression.PickTop(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "perClass": + message.perClass = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.NonMaximumSuppression.PickTop.prototype.perClass = false; + +$root.CoreML.Specification.LinearKernel = class LinearKernel { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LinearKernel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LinearKernel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RBFKernel = class RBFKernel { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.RBFKernel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.RBFKernel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "gamma": + message.gamma = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.RBFKernel.prototype.gamma = 0; + +$root.CoreML.Specification.PolyKernel = class PolyKernel { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.PolyKernel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.degree = reader.int32(); + break; + case 2: + message.c = reader.double(); + break; + case 3: + message.gamma = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.PolyKernel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "degree": + message.degree = reader.int32(); + break; + case "c": + message.c = reader.double(); + break; + case "gamma": + message.gamma = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.PolyKernel.prototype.degree = 0; +$root.CoreML.Specification.PolyKernel.prototype.c = 0; +$root.CoreML.Specification.PolyKernel.prototype.gamma = 0; + +$root.CoreML.Specification.SigmoidKernel = class SigmoidKernel { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SigmoidKernel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.double(); + break; + case 2: + message.c = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SigmoidKernel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "gamma": + message.gamma = reader.double(); + break; + case "c": + message.c = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SigmoidKernel.prototype.gamma = 0; +$root.CoreML.Specification.SigmoidKernel.prototype.c = 0; + +$root.CoreML.Specification.Kernel = class Kernel { + + constructor() { + } + + get kernel() { + $root.CoreML.Specification.Kernel.kernelSet = $root.CoreML.Specification.Kernel.kernelSet || new Set([ "linearKernel", "rbfKernel", "polyKernel", "sigmoidKernel"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.Kernel.kernelSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Kernel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linearKernel = $root.CoreML.Specification.LinearKernel.decode(reader, reader.uint32()); + break; + case 2: + message.rbfKernel = $root.CoreML.Specification.RBFKernel.decode(reader, reader.uint32()); + break; + case 3: + message.polyKernel = $root.CoreML.Specification.PolyKernel.decode(reader, reader.uint32()); + break; + case 4: + message.sigmoidKernel = $root.CoreML.Specification.SigmoidKernel.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Kernel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "linearKernel": + message.linearKernel = $root.CoreML.Specification.LinearKernel.decodeText(reader); + break; + case "rbfKernel": + message.rbfKernel = $root.CoreML.Specification.RBFKernel.decodeText(reader); + break; + case "polyKernel": + message.polyKernel = $root.CoreML.Specification.PolyKernel.decodeText(reader); + break; + case "sigmoidKernel": + message.sigmoidKernel = $root.CoreML.Specification.SigmoidKernel.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SparseNode = class SparseNode { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SparseNode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.index = reader.int32(); + break; + case 2: + message.value = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SparseNode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "index": + message.index = reader.int32(); + break; + case "value": + message.value = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SparseNode.prototype.index = 0; +$root.CoreML.Specification.SparseNode.prototype.value = 0; + +$root.CoreML.Specification.SparseVector = class SparseVector { + + constructor() { + this.nodes = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SparseVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nodes.push($root.CoreML.Specification.SparseNode.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SparseVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nodes": + message.nodes.push($root.CoreML.Specification.SparseNode.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SparseSupportVectors = class SparseSupportVectors { + + constructor() { + this.vectors = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SparseSupportVectors(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vectors.push($root.CoreML.Specification.SparseVector.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SparseSupportVectors(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vectors": + message.vectors.push($root.CoreML.Specification.SparseVector.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DenseVector = class DenseVector { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DenseVector(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = reader.doubles(message.values, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DenseVector(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + reader.array(message.values, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.DenseSupportVectors = class DenseSupportVectors { + + constructor() { + this.vectors = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.DenseSupportVectors(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vectors.push($root.CoreML.Specification.DenseVector.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.DenseSupportVectors(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "vectors": + message.vectors.push($root.CoreML.Specification.DenseVector.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.Coefficients = class Coefficients { + + constructor() { + this.alpha = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.Coefficients(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.doubles(message.alpha, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.Coefficients(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + reader.array(message.alpha, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SupportVectorRegressor = class SupportVectorRegressor { + + constructor() { + } + + get supportVectors() { + $root.CoreML.Specification.SupportVectorRegressor.supportVectorsSet = $root.CoreML.Specification.SupportVectorRegressor.supportVectorsSet || new Set([ "sparseSupportVectors", "denseSupportVectors"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.SupportVectorRegressor.supportVectorsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SupportVectorRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.CoreML.Specification.Kernel.decode(reader, reader.uint32()); + break; + case 2: + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decode(reader, reader.uint32()); + break; + case 3: + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decode(reader, reader.uint32()); + break; + case 4: + message.coefficients = $root.CoreML.Specification.Coefficients.decode(reader, reader.uint32()); + break; + case 5: + message.rho = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SupportVectorRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.CoreML.Specification.Kernel.decodeText(reader); + break; + case "sparseSupportVectors": + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decodeText(reader); + break; + case "denseSupportVectors": + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decodeText(reader); + break; + case "coefficients": + message.coefficients = $root.CoreML.Specification.Coefficients.decodeText(reader); + break; + case "rho": + message.rho = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SupportVectorRegressor.prototype.kernel = null; +$root.CoreML.Specification.SupportVectorRegressor.prototype.coefficients = null; +$root.CoreML.Specification.SupportVectorRegressor.prototype.rho = 0; + +$root.CoreML.Specification.SupportVectorClassifier = class SupportVectorClassifier { + + constructor() { + this.numberOfSupportVectorsPerClass = []; + this.coefficients = []; + this.rho = []; + this.probA = []; + this.probB = []; + } + + get supportVectors() { + $root.CoreML.Specification.SupportVectorClassifier.supportVectorsSet = $root.CoreML.Specification.SupportVectorClassifier.supportVectorsSet || new Set([ "sparseSupportVectors", "denseSupportVectors"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.SupportVectorClassifier.supportVectorsSet.has(key) && this[key] != null); + } + + get ClassLabels() { + $root.CoreML.Specification.SupportVectorClassifier.ClassLabelsSet = $root.CoreML.Specification.SupportVectorClassifier.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.SupportVectorClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.SupportVectorClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.CoreML.Specification.Kernel.decode(reader, reader.uint32()); + break; + case 2: + message.numberOfSupportVectorsPerClass = reader.array(message.numberOfSupportVectorsPerClass, () => reader.int32(), tag); + break; + case 3: + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decode(reader, reader.uint32()); + break; + case 4: + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decode(reader, reader.uint32()); + break; + case 5: + message.coefficients.push($root.CoreML.Specification.Coefficients.decode(reader, reader.uint32())); + break; + case 6: + message.rho = reader.doubles(message.rho, tag); + break; + case 7: + message.probA = reader.doubles(message.probA, tag); + break; + case 8: + message.probB = reader.doubles(message.probB, tag); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.SupportVectorClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.CoreML.Specification.Kernel.decodeText(reader); + break; + case "numberOfSupportVectorsPerClass": + reader.array(message.numberOfSupportVectorsPerClass, () => reader.int32()); + break; + case "sparseSupportVectors": + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decodeText(reader); + break; + case "denseSupportVectors": + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decodeText(reader); + break; + case "coefficients": + message.coefficients.push($root.CoreML.Specification.Coefficients.decodeText(reader)); + break; + case "rho": + reader.array(message.rho, () => reader.double()); + break; + case "probA": + reader.array(message.probA, () => reader.double()); + break; + case "probB": + reader.array(message.probB, () => reader.double()); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.SupportVectorClassifier.prototype.kernel = null; + +$root.CoreML.Specification.TreeEnsemblePostEvaluationTransform = { + "NoTransform": 0, + "Classification_SoftMax": 1, + "Regression_Logistic": 2, + "Classification_SoftMaxWithZeroClassReference": 3 +}; + +$root.CoreML.Specification.TreeEnsembleParameters = class TreeEnsembleParameters { + + constructor() { + this.nodes = []; + this.basePredictionValue = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nodes.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.decode(reader, reader.uint32())); + break; + case 2: + message.numPredictionDimensions = reader.uint64(); + break; + case 3: + message.basePredictionValue = reader.doubles(message.basePredictionValue, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nodes": + message.nodes.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.decodeText(reader)); + break; + case "numPredictionDimensions": + message.numPredictionDimensions = reader.uint64(); + break; + case "basePredictionValue": + reader.array(message.basePredictionValue, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TreeEnsembleParameters.prototype.numPredictionDimensions = protobuf.Uint64.create(0); + +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode = class TreeNode { + + constructor() { + this.evaluationInfo = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeId = reader.uint64(); + break; + case 2: + message.nodeId = reader.uint64(); + break; + case 3: + message.nodeBehavior = reader.int32(); + break; + case 10: + message.branchFeatureIndex = reader.uint64(); + break; + case 11: + message.branchFeatureValue = reader.double(); + break; + case 12: + message.trueChildNodeId = reader.uint64(); + break; + case 13: + message.falseChildNodeId = reader.uint64(); + break; + case 14: + message.missingValueTracksTrueChild = reader.bool(); + break; + case 20: + message.evaluationInfo.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.decode(reader, reader.uint32())); + break; + case 30: + message.relativeHitRate = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "treeId": + message.treeId = reader.uint64(); + break; + case "nodeId": + message.nodeId = reader.uint64(); + break; + case "nodeBehavior": + message.nodeBehavior = reader.enum($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior); + break; + case "branchFeatureIndex": + message.branchFeatureIndex = reader.uint64(); + break; + case "branchFeatureValue": + message.branchFeatureValue = reader.double(); + break; + case "trueChildNodeId": + message.trueChildNodeId = reader.uint64(); + break; + case "falseChildNodeId": + message.falseChildNodeId = reader.uint64(); + break; + case "missingValueTracksTrueChild": + message.missingValueTracksTrueChild = reader.bool(); + break; + case "evaluationInfo": + message.evaluationInfo.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.decodeText(reader)); + break; + case "relativeHitRate": + message.relativeHitRate = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.treeId = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.nodeId = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.nodeBehavior = 0; +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.branchFeatureIndex = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.branchFeatureValue = 0; +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.trueChildNodeId = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.falseChildNodeId = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.missingValueTracksTrueChild = false; +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.prototype.relativeHitRate = 0; + +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior = { + "BranchOnValueLessThanEqual": 0, + "BranchOnValueLessThan": 1, + "BranchOnValueGreaterThanEqual": 2, + "BranchOnValueGreaterThan": 3, + "BranchOnValueEqual": 4, + "BranchOnValueNotEqual": 5, + "LeafNode": 6 +}; + +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo = class EvaluationInfo { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.evaluationIndex = reader.uint64(); + break; + case 2: + message.evaluationValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "evaluationIndex": + message.evaluationIndex = reader.uint64(); + break; + case "evaluationValue": + message.evaluationValue = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.prototype.evaluationIndex = protobuf.Uint64.create(0); +$root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.prototype.evaluationValue = 0; + +$root.CoreML.Specification.TreeEnsembleClassifier = class TreeEnsembleClassifier { + + constructor() { + } + + get ClassLabels() { + $root.CoreML.Specification.TreeEnsembleClassifier.ClassLabelsSet = $root.CoreML.Specification.TreeEnsembleClassifier.ClassLabelsSet || new Set([ "stringClassLabels", "int64ClassLabels"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.TreeEnsembleClassifier.ClassLabelsSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TreeEnsembleClassifier(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decode(reader, reader.uint32()); + break; + case 2: + message.postEvaluationTransform = reader.int32(); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TreeEnsembleClassifier(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "treeEnsemble": + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decodeText(reader); + break; + case "postEvaluationTransform": + message.postEvaluationTransform = reader.enum($root.CoreML.Specification.TreeEnsemblePostEvaluationTransform); + break; + case "stringClassLabels": + message.stringClassLabels = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "int64ClassLabels": + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TreeEnsembleClassifier.prototype.treeEnsemble = null; +$root.CoreML.Specification.TreeEnsembleClassifier.prototype.postEvaluationTransform = 0; + +$root.CoreML.Specification.TreeEnsembleRegressor = class TreeEnsembleRegressor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.TreeEnsembleRegressor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decode(reader, reader.uint32()); + break; + case 2: + message.postEvaluationTransform = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.TreeEnsembleRegressor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "treeEnsemble": + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decodeText(reader); + break; + case "postEvaluationTransform": + message.postEvaluationTransform = reader.enum($root.CoreML.Specification.TreeEnsemblePostEvaluationTransform); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.TreeEnsembleRegressor.prototype.treeEnsemble = null; +$root.CoreML.Specification.TreeEnsembleRegressor.prototype.postEvaluationTransform = 0; + +$root.CoreML.Specification.ItemSimilarityRecommender = class ItemSimilarityRecommender { + + constructor() { + this.itemItemSimilarities = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.itemItemSimilarities.push($root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems.decode(reader, reader.uint32())); + break; + case 2: + message.itemStringIds = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 3: + message.itemInt64Ids = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 10: + message.itemInputFeatureName = reader.string(); + break; + case 11: + message.numRecommendationsInputFeatureName = reader.string(); + break; + case 12: + message.itemRestrictionInputFeatureName = reader.string(); + break; + case 13: + message.itemExclusionInputFeatureName = reader.string(); + break; + case 20: + message.recommendedItemListOutputFeatureName = reader.string(); + break; + case 21: + message.recommendedItemScoreOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "itemItemSimilarities": + message.itemItemSimilarities.push($root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems.decodeText(reader)); + break; + case "itemStringIds": + message.itemStringIds = $root.CoreML.Specification.StringVector.decodeText(reader); + break; + case "itemInt64Ids": + message.itemInt64Ids = $root.CoreML.Specification.Int64Vector.decodeText(reader); + break; + case "itemInputFeatureName": + message.itemInputFeatureName = reader.string(); + break; + case "numRecommendationsInputFeatureName": + message.numRecommendationsInputFeatureName = reader.string(); + break; + case "itemRestrictionInputFeatureName": + message.itemRestrictionInputFeatureName = reader.string(); + break; + case "itemExclusionInputFeatureName": + message.itemExclusionInputFeatureName = reader.string(); + break; + case "recommendedItemListOutputFeatureName": + message.recommendedItemListOutputFeatureName = reader.string(); + break; + case "recommendedItemScoreOutputFeatureName": + message.recommendedItemScoreOutputFeatureName = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.itemStringIds = null; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.itemInt64Ids = null; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.itemInputFeatureName = ""; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.numRecommendationsInputFeatureName = ""; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.itemRestrictionInputFeatureName = ""; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.itemExclusionInputFeatureName = ""; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.recommendedItemListOutputFeatureName = ""; +$root.CoreML.Specification.ItemSimilarityRecommender.prototype.recommendedItemScoreOutputFeatureName = ""; + +$root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem = class ConnectedItem { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.itemId = reader.uint64(); + break; + case 2: + message.similarityScore = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "itemId": + message.itemId = reader.uint64(); + break; + case "similarityScore": + message.similarityScore = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.prototype.itemId = protobuf.Uint64.create(0); +$root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.prototype.similarityScore = 0; + +$root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems = class SimilarItems { + + constructor() { + this.similarItemList = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.itemId = reader.uint64(); + break; + case 2: + message.similarItemList.push($root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.decode(reader, reader.uint32())); + break; + case 3: + message.itemScoreAdjustment = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "itemId": + message.itemId = reader.uint64(); + break; + case "similarItemList": + message.similarItemList.push($root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.decodeText(reader)); + break; + case "itemScoreAdjustment": + message.itemScoreAdjustment = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems.prototype.itemId = protobuf.Uint64.create(0); +$root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems.prototype.itemScoreAdjustment = 0; + +$root.CoreML.Specification.LinkedModel = class LinkedModel { + + constructor() { + } + + get LinkType() { + $root.CoreML.Specification.LinkedModel.LinkTypeSet = $root.CoreML.Specification.LinkedModel.LinkTypeSet || new Set([ "linkedModelFile"]); + return Object.keys(this).find((key) => $root.CoreML.Specification.LinkedModel.LinkTypeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LinkedModel(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linkedModelFile = $root.CoreML.Specification.LinkedModelFile.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LinkedModel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "linkedModelFile": + message.linkedModelFile = $root.CoreML.Specification.LinkedModelFile.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LinkedModelFile = class LinkedModelFile { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.LinkedModelFile(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linkedModelFileName = $root.CoreML.Specification.StringParameter.decode(reader, reader.uint32()); + break; + case 2: + message.linkedModelSearchPath = $root.CoreML.Specification.StringParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.LinkedModelFile(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "linkedModelFileName": + message.linkedModelFileName = $root.CoreML.Specification.StringParameter.decodeText(reader); + break; + case "linkedModelSearchPath": + message.linkedModelSearchPath = $root.CoreML.Specification.StringParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.CoreML.Specification.LinkedModelFile.prototype.linkedModelFileName = null; +$root.CoreML.Specification.LinkedModelFile.prototype.linkedModelSearchPath = null; + +$root.CoreML.Specification.ClassConfidenceThresholding = class ClassConfidenceThresholding { + + constructor() { + this.precisionRecallCurves = []; + } + + static decode(reader, length) { + const message = new $root.CoreML.Specification.ClassConfidenceThresholding(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 100: + message.precisionRecallCurves.push($root.CoreML.Specification.PrecisionRecallCurve.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.CoreML.Specification.ClassConfidenceThresholding(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "precisionRecallCurves": + message.precisionRecallCurves.push($root.CoreML.Specification.PrecisionRecallCurve.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; diff --git a/coreml.js b/coreml.js new file mode 100644 index 00000000000..03e6bd35147 --- /dev/null +++ b/coreml.js @@ -0,0 +1,1482 @@ + +import * as base from './base.js'; +import * as protobuf from './protobuf.js'; + +const coreml = {}; + +coreml.ModelFactory = class { + + match(context) { + const stream = context.stream; + const identifier = context.identifier.toLowerCase(); + const extension = identifier.split('.').pop().toLowerCase(); + const tags = context.tags('pb'); + if (tags.get(1) === 0 && tags.get(2) === 2) { + if (extension === 'pb') { + const tags = context.tags('pb+'); + const keys = Object.keys(tags).map((key) => parseInt(key, 10)); + const match = (key) => + (key >= 200 && key < 220) || + (key >= 300 && key < 320) || + (key >= 400 && key < 420) || + (key >= 500 && key < 520) || + (key >= 550 && key < 560) || + (key >= 600 && key < 620) || + (key === 900) || + (key >= 2000 && key < 2010) || + (key === 3000); + if (!keys.some((key) => match(key))) { + return null; + } + } + return 'coreml.pb'; + } + if (extension === 'pbtxt') { + const tags = context.tags('pbtxt'); + if (tags.has('specificationVersion') && tags.has('description')) { + return 'coreml.pbtxt'; + } + } + if (identifier === 'manifest.json') { + const obj = context.peek('json'); + if (obj && obj.rootModelIdentifier && obj.itemInfoEntries) { + const entries = Object.keys(obj.itemInfoEntries).map((key) => obj.itemInfoEntries[key]); + if (entries.filter((entry) => entry.path.toLowerCase().endsWith('.mlmodel').length === 1)) { + return 'coreml.manifest'; + } + } + } + if (identifier === 'metadata.json') { + const obj = context.peek('json'); + if (obj && obj.rootModelIdentifier && obj.itemInfoEntries) { + return 'coreml.metadata'; + } + } + if (identifier === 'featuredescriptions.json') { + const obj = context.peek('json'); + if (obj && (obj.Inputs || obj.Outputs)) { + return 'coreml.featuredescriptions'; + } + } + if (extension === 'bin' && stream.length > 16) { + const buffer = stream.peek(Math.min(256, stream.length)); + for (let i = 0; i < buffer.length - 4; i++) { + const signature = (buffer[i] | buffer[i + 1] << 8 | buffer[i + 2] << 16 | buffer [i + 3] << 24) >>> 0; + if (signature === 0xdeadbeef) { + return 'coreml.weights'; + } + } + } + return undefined; + } + + async open(context, target) { + await context.require('./coreml-proto'); + const metadata = await context.metadata('coreml-metadata.json'); + const openBinary = async (stream, context, path, format) => { + let model = null; + try { + coreml.proto = protobuf.get('coreml').CoreML.Specification; + const reader = protobuf.BinaryReader.open(stream); + model = coreml.proto.Model.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new coreml.Error(`File format is not coreml.Model (${message.replace(/\.$/, '')}).`); + } + const weightPaths = new Set(); + const walkProgram = (program) => { + for (const func of Object.values(program.functions)) { + for (const block of Object.values(func.block_specializations)) { + for (const operation of block.operations) { + for (const value of Object.values(operation.attributes)) { + if (value.blobFileValue && value.blobFileValue.fileName) { + weightPaths.add(value.blobFileValue.fileName); + } + } + } + } + } + }; + const walkModel = (model) => { + if (model.mlProgram) { + walkProgram(model.mlProgram); + } + if (model.pipeline && model.pipeline.models) { + for (const node of model.pipeline.models) { + walkModel(node); + } + } + if (model.pipelineClassifier && model.pipelineClassifier.pipeline && model.pipelineClassifier.pipeline.models) { + for (const node of model.pipelineClassifier.pipeline.models) { + walkModel(node); + } + } + if (model.pipelineRegressor && model.pipelineRegressor.pipeline && model.pipelineRegressor.pipeline.models) { + for (const node of model.pipelineRegressor.pipeline.models) { + walkModel(node); + } + } + }; + walkModel(model); + const weights = new Map(); + if (weightPaths.size > 0) { + const folder = path.replace(/\/[^/]*$/, ''); + const keys = Array.from(weightPaths); + const paths = keys.map((path) => path.replace(/^@model_path\//, `${folder}/`)); + try { + const contexts = await Promise.all(paths.map((path) => context.fetch(path))); + for (let i = 0; i < keys.length; i++) { + weights.set(keys[i], contexts[i].stream); + } + } catch (error) { + // continue regardless of error + } + } + return new coreml.Model(metadata, format, model, weights); + }; + const openText = async (stream) => { + let model = null; + try { + coreml.proto = protobuf.get('coreml').CoreML.Specification; + const reader = protobuf.TextReader.open(stream); + model = coreml.proto.Model.decodeText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new coreml.Error(`File format is not coreml.Model (${message.replace(/\.$/, '')}).`); + } + const weights = new Map(); + return new coreml.Model(metadata, null, model, weights); + }; + const openManifest = async (obj, context, path) => { + const entries = Object.values(obj.itemInfoEntries).filter((entry) => entry.path.toLowerCase().endsWith('.mlmodel')); + if (entries.length !== 1) { + throw new coreml.Error('Manifest does not contain Core ML model.'); + } + const name = `${path}Data/${entries[0].path}`; + const content = await context.fetch(name); + return openBinary(content.stream, context, name, 'Core ML Package'); + }; + const openManifestStream = async (context, path) => { + const name = `${path}Manifest.json`; + const content = await context.fetch(name); + const obj = content.read('json'); + return openManifest(obj, context, path); + }; + switch (target) { + case 'coreml.pb': { + return openBinary(context.stream, context, context.identifier); + } + case 'coreml.pbtxt': { + return openText(context.stream, context, context.identifier); + } + case 'coreml.manifest': { + const obj = context.peek('json'); + return openManifest(obj, context, ''); + } + case 'coreml.featuredescriptions': + case 'coreml.metadata': { + return openManifestStream(context, '../../'); + } + case 'coreml.weights': { + return openManifestStream(context, '../../../'); + } + default: { + throw new coreml.Error(`Unsupported Core ML format '${target}'.`); + } + } + } +}; + +coreml.Model = class { + + constructor(metadata, format, model, weights) { + this.format = `${format || 'Core ML'} v${model.specificationVersion}`; + this.metadata = new Map(); + const context = new coreml.Context(metadata, model, weights); + const graph = new coreml.Graph(context); + this.graphs = [ graph ]; + if (model.description && model.description.metadata) { + const properties = model.description.metadata; + if (properties.versionString) { + this.version = properties.versionString; + } + if (properties.shortDescription) { + this.description = properties.shortDescription; + } + if (properties.author) { + this.metadata.set('author', properties.author); + } + if (properties.license) { + this.metadata.set('license', properties.license); + } + if (metadata.userDefined && Object.keys(properties.userDefined).length > 0) { + /* empty */ + } + } + } +}; + +coreml.Graph = class { + + constructor(context) { + this.name = ''; + this.type = context.type; + this.groups = context.groups; + for (const value of context.values.values()) { + const name = value.name; + const type = value.type; + const description = value.description; + const initializer = value.initializer; + if (!value.obj) { + value.obj = new coreml.Value(name, type, description, initializer); + } + } + this.inputs = context.inputs.map((argument) => { + const values = argument.value.map((value) => value.obj); + return new coreml.Argument(argument.name, argument.visible, values); + }); + this.outputs = context.outputs.map((argument) => { + const values = argument.value.map((value) => value.obj); + return new coreml.Argument(argument.name, argument.visible, values); + }); + for (const obj of context.nodes) { + const attributes = obj.attributes; + switch (obj.type) { + case 'loop': + attributes.conditionNetwork = new coreml.Graph(attributes.conditionNetwork); + attributes.bodyNetwork = new coreml.Graph(attributes.bodyNetwork); + break; + case 'branch': + attributes.ifBranch = new coreml.Graph(attributes.ifBranch); + attributes.elseBranch = new coreml.Graph(attributes.elseBranch); + break; + default: + break; + } + } + this.nodes = context.nodes.map((obj) => new coreml.Node(context, obj)); + } +}; + +coreml.Argument = class { + + constructor(name, visible, value) { + this.name = name; + this.visible = visible; + this.value = value; + } +}; + +coreml.Value = class { + + constructor(name, type, description, initializer) { + if (typeof name !== 'string') { + throw new coreml.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer ? initializer.type : null; + this.description = description || null; + this.initializer = initializer || null; + this.quantization = initializer ? initializer.quantization : null; + } +}; + +coreml.Node = class { + + constructor(context, obj) { + if (!obj.type) { + throw new Error('Undefined node type.'); + } + if (obj.group) { + this.group = obj.group || null; + } + this.type = Object.assign({}, context.metadata.type(obj.type) || { name: obj.type }); + this.type.name = obj.type.split(':').pop(); + this.name = obj.name || ''; + this.description = obj.description || ''; + this.inputs = (obj.inputs || []).map((argument) => { + const values = argument.value.map((value) => value.obj); + return new coreml.Argument(argument.name, argument.visible, values); + }); + this.outputs = (obj.outputs || []).map((argument) => { + const values = argument.value.map((value) => value.obj); + return new coreml.Argument(argument.name, argument.visible, values); + }); + this.attributes = Object.entries(obj.attributes).map(([name, value]) => { + const metadata = context.metadata.attribute(obj.type, name); + return new coreml.Attribute(metadata, name, value); + }); + } +}; + +coreml.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + if (this.value instanceof coreml.Tensor) { + this.type = 'tensor'; + } + if (metadata) { + if (metadata.type) { + this.type = metadata.type; + } + if (this.type && coreml.proto) { + this.value = coreml.Utility.enum(this.type, this.value); + } + if (metadata.visible === false) { + this.visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (Array.isArray(value)) { + value = value.map((item) => item.toNumber()); + } + if (JSON.stringify(metadata.default) == JSON.stringify(value)) { + this.visible = false; + } + } + } + if (this.value instanceof coreml.Graph) { + this.type = 'graph'; + } + } +}; + +coreml.Tensor = class { + + constructor(type, values, quantization, category) { + this.type = type; + this.values = values; + this.category = category; + this._quantization = quantization; + if (type.dataType === 'float32') { + this.encoding = '|'; + } else if ((type.dataType.startsWith('uint') && type.dataType.length === 5) || + (type.dataType.startsWith('int') && type.dataType.length === 4)) { + this.encoding = '>'; + } else { + this.encoding = '<'; + } + if (quantization && + quantization.linearQuantization && + Array.isArray(quantization.linearQuantization.scale) && + Array.isArray(quantization.linearQuantization.bias)) { + this.quantization = { + type: 'linear', + scale: quantization.linearQuantization.scale, + bias: quantization.linearQuantization.bias + }; + } + if (quantization && + quantization.lookupTableQuantization && + quantization.lookupTableQuantization.floatValue && + quantization.lookupTableQuantization.floatValue.length > 0) { + this.quantization = { + type: 'lookup', + value: quantization.lookupTableQuantization.floatValue + }; + } + } +}; + +coreml.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape || new coreml.TensorShape([]); + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +coreml.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions.map((dim) => typeof dim === 'string' || Number.isInteger(dim) ? dim : dim.toNumber()); + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && Array.isArray(this.dimensions) && + this.dimensions.length === obj.dimensions.length && + obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + + toString() { + return Array.isArray(this.dimensions) && this.dimensions.length > 0 ? + `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]` : ''; + } +}; + +coreml.ListType = class { + + constructor(elementType) { + this.elementType = elementType; + } + + equals(obj) { + return obj instanceof coreml.ListType && this.elementType.equals(obj.elementType); + } + + toString() { + return `list<${this.elementType}>`; + } +}; + +coreml.MapType = class { + + constructor(keyType, valueType) { + this.keyType = keyType; + this.valueType = valueType; + } + + equals(obj) { + return obj instanceof coreml.MapType && this.keyType.equals(obj.keyType) && this.valueType.equals(obj.valueType); + } + + toString() { + return `map<${this.keyType},${this.valueType}>`; + } +}; + +coreml.SequenceType = class { + + constructor(type) { + this.type = type; + } + + equals(obj) { + return obj instanceof coreml.SequenceType && this.type.equals(obj.type); + } + + toString() { + return `sequence<${this.type}>`; + } +}; + +coreml.ImageType = class { + + constructor(colorSpace, width, height) { + this.width = width; + this.height = height; + switch (colorSpace) { + case coreml.proto.ImageFeatureType.ColorSpace.GRAYSCALE: + this.colorSpace = 'grayscale'; + break; + case coreml.proto.ImageFeatureType.ColorSpace.RGB: + this.colorSpace = 'RGB'; + break; + case coreml.proto.ImageFeatureType.ColorSpace.BGR: + this.colorSpace = 'BGR'; + break; + case coreml.proto.ImageFeatureType.ColorSpace.GRAYSCALE_FLOAT16: + this.colorSpace = 'grayscale:float16'; + break; + default: + throw new coreml.Error(`Unsupported image color space '${colorSpace}'.`); + } + } + + equals(obj) { + return obj instanceof coreml.ImageType && this.width === obj.width && this.height === obj.height && this.colorSpace === obj.colorSpace; + } + + toString() { + return `image<${this.colorSpace},${this.width. toString()}x${this.height}>`; + } +}; + +coreml.OptionalType = class { + + constructor(type) { + this.type = type; + } + + equals(obj) { + return obj instanceof coreml.OptionalType && this.type.equals(obj.type); + } + + toString() { + return `optional<${this.type}>`; + } +}; + +coreml.Context = class { + + constructor(metadata, model, weights, values) { + this.metadata = metadata; + this.weights = weights; + this.values = values || new Map(); + this.nodes = []; + this.inputs = []; + this.outputs = []; + if (model) { + const description = model.description; + const inputs = description && Array.isArray(description.input) ? description.input : []; + for (const description of inputs) { + const value = this.output(description.name); + this.update(value, description); + this.inputs.push({ name: description.name, visible: true, value: [ value ] }); + } + this.type = this.model(model, '', description); + const outputs = description && Array.isArray(description.output) ? description.output : []; + for (const description of outputs) { + const value = this.input(description.name); + this.update(value, description); + this.outputs.push({ name: description.name, visible: true, value: [ value ] }); + } + } + } + + context() { + return new coreml.Context(this.metadata, null, this.weights, this.values); + } + + network(obj) { + const context = this.context(); + for (const layer of obj.layers) { + const type = layer.layer; + context.node(context.groups, type, layer.name, '', layer[type], layer.input, layer.output, layer.inputTensor, layer.outputTensor); + } + context.updatePreprocessing('', obj.preprocessing, null); + context.type = 'Neural Network'; + return context; + } + + input(name) { + if (!this.values.has(name)) { + this.values.set(name, { counter: 0, name: name, to: [], from: [] }); + } + return this.values.get(name); + } + + output(name) { + if (!this.values.has(name)) { + const value = { counter: 0, name: name, to: [], from: [] }; + this.values.set(name, value); + const key = `${name}|${value.counter}`; + this.values.set(key, value); + } else { + const value = Object.assign({}, this.values.get(name)); + value.counter++; + value.name = `${name}|${value.counter}`; // custom argument id + this.values.set(name, value); + this.values.set(value.name, value); + } + return this.values.get(name); + } + + update(value, description) { + if (!value.type) { + value.type = coreml.Utility.featureType(description.type); + } + if (!value.description && description.shortDescription) { + value.description = description.shortDescription; + } + } + + node(group, type, name, description, data, inputs, outputs, inputTensors, outputTensors) { + const obj = { + group: group, + type: type, + name: name, + description: description, + attributes: {}, + inputs: [], + outputs: [] + }; + inputs = inputs.map((input, index) => { + const value = this.input(input); + if (!value.type && inputTensors && index < inputTensors.length) { + const tensor = inputTensors[index]; + const shape = tensor && tensor.dimValue ? new coreml.TensorShape(tensor.dimValue) : null; + value.type = new coreml.TensorType('?', shape); + } + return value; + }); + outputs = outputs.map((output, index) => { + const value = this.output(output); + if (!value.type && outputTensors && index < outputTensors.length) { + const tensor = outputTensors[index]; + const shape = tensor && tensor.dimValue ? new coreml.TensorShape(tensor.dimValue) : null; + value.type = new coreml.TensorType('?', shape); + } + return value; + }); + const initializers = []; + const initializer = (type, name, shape, data) => { + let dataType = '?'; + let quantization = null; + let values = null; + if (data) { + if (data.floatValue && data.floatValue.length > 0) { + values = data.floatValue; + dataType = 'float32'; + } else if (data.float16Value && data.float16Value.length > 0) { + values = data.float16Value; // byte[] + dataType = 'float16'; + } else if (data.rawValue && data.rawValue.length > 0) { + if (data.quantization) { + values = data.rawValue; + dataType = `uint${data.quantization.numberOfBits}`; + } else { + shape = []; + } + } + quantization = data.quantization || null; + } + const tensorType = new coreml.TensorType(dataType, new coreml.TensorShape(shape)); + const tensor = new coreml.Tensor(tensorType, values, quantization, 'Weights'); + const input = this.metadata.input(type, name); + const visible = input && input.visible === false ? false : true; + const value = { obj: new coreml.Value('', null, null, tensor) }; + initializers.push({ name: name, visible: visible, value: [ value ] }); + }; + const vector = (value) => { + return (value && Object.keys(value).length == 1 && value.vector) ? value.vector : value; + }; + const weights = (type, data) => { + switch (type) { + case 'convolution': { + const weightsShape = [ data.outputChannels, data.kernelChannels, data.kernelSize[0], data.kernelSize[1] ]; + if (data.isDeconvolution) { + weightsShape[0] = data.kernelChannels; + weightsShape[1] = Math.floor(data.outputChannels / (data.nGroups != 0 ? data.nGroups : 1)); + } + initializer(type, 'weights', weightsShape, data.weights); + if (data.hasBias) { + initializer(type, 'bias', [ data.outputChannels ], data.bias); + } + return { 'weights': true, 'bias': data.hasBias }; + } + case 'innerProduct': + initializer(type, 'weights', [ data.outputChannels, data.inputChannels ], data.weights); + if (data.hasBias) { + initializer(type, 'bias', [ data.outputChannels ], data.bias); + } + return { 'weights': true, 'bias': data.hasBias }; + case 'batchnorm': + initializer(type, 'gamma', [ data.channels ], data.gamma); + initializer(type, 'beta', [ data.channels ], data.beta); + if (data.mean) { + initializer(type, 'mean', [ data.channels ], data.mean); + } + if (data.variance) { + initializer(type, 'variance', [ data.channels ], data.variance); + } + return { 'gamma': true, 'beta': true, 'mean': true, 'variance': true }; + case 'embedding': + initializer(type, 'weights', [ data.inputDim, data.outputChannels ], data.weights); + return { 'weights': true }; + case 'loadConstant': + case 'loadConstantND': + initializer(type, 'data', data.shape, data.data); + return { 'data': true }; + case 'scale': + initializer(type, 'scale', data.shapeScale, data.scale); + if (data.hasBias) { + initializer(type, 'bias', data.shapeBias, data.bias); + } + return { 'scale': true, 'bias': data.hasBias }; + case 'bias': + initializer(type, 'bias', data.shape, data.bias); + return { 'bias': true }; + case 'simpleRecurrent': + initializer(type, 'weights', [ data.outputVectorSize, data.inputVectorSize ], data.weightMatrix); + initializer(type, 'recurrent', [ data.outputVectorSize, data.inputVectorSize ], data.recursionMatrix); + if (data.hasBiasVectors) { + initializer(type, 'bias', [ data.outputVectorSize ], data.biasVector); + } + return { 'weightMatrix': true, 'recursionMatrix': true, 'biasVector': data.hasBiasVectors }; + case 'gru': { + const recursionMatrixShape = [ data.outputVectorSize, data.outputVectorSize ]; + const weightMatrixShape = [ data.outputVectorSize, data.inputVectorSize ]; + const biasVectorShape = [ data.outputVectorSize ]; + initializer(type, 'updateGateWeightMatrix', weightMatrixShape, data.updateGateWeightMatrix); + initializer(type, 'resetGateWeightMatrix', weightMatrixShape, data.resetGateWeightMatrix); + initializer(type, 'outputGateWeightMatrix', weightMatrixShape, data.outputGateWeightMatrix); + initializer(type, 'updateGateRecursionMatrix', recursionMatrixShape, data.updateGateRecursionMatrix); + initializer(type, 'resetGateRecursionMatrix', recursionMatrixShape, data.resetGateRecursionMatrix); + initializer(type, 'outputGateRecursionMatrix', recursionMatrixShape, data.outputGateRecursionMatrix); + if (data.hasBiasVectors) { + initializer(type, 'updateGateBiasVector', biasVectorShape, data.updateGateBiasVector); + initializer(type, 'resetGateBiasVector', biasVectorShape, data.resetGateBiasVector); + initializer(type, 'outputGateBiasVector', biasVectorShape, data.outputGateBiasVector); + } + return { + 'updateGateWeightMatrix': true, 'resetGateWeightMatrix': true, 'outputGateWeightMatrix': true, + 'updateGateRecursionMatrix': true, 'resetGateRecursionMatrix': true, 'outputGateRecursionMatrix': true, + 'updateGateBiasVector': data.hasBiasVectors, 'resetGateBiasVector': data.hasBiasVectors, 'outputGateBiasVector': data.hasBiasVectors + }; + } + case 'uniDirectionalLSTM': + case 'biDirectionalLSTM': { + const count = (type == 'uniDirectionalLSTM') ? 1 : 2; + const h = data.outputVectorSize; + const x = data.inputVectorSize; + for (let i = 0; i < count; i++) { + const weights = count == 1 ? data.weightParams : data.weightParams[i]; + const suffix = (i == 0) ? '' : '_rev'; + initializer(type, `inputGateWeightMatrix${suffix}`, [h,x], weights.inputGateWeightMatrix); + initializer(type, `forgetGateWeightMatrix${suffix}`, [h,x], weights.forgetGateWeightMatrix); + initializer(type, `blockInputWeightMatrix${suffix}`, [h,x], weights.blockInputWeightMatrix); + initializer(type, `outputGateWeightMatrix${suffix}`, [h,x], weights.outputGateWeightMatrix); + initializer(type, `inputGateRecursionMatrix${suffix}`, [h,h], weights.inputGateRecursionMatrix); + initializer(type, `forgetGateRecursionMatrix${suffix}`, [h,h],weights.forgetGateRecursionMatrix); + initializer(type, `blockInputRecursionMatrix${suffix}`, [h,h], weights.blockInputRecursionMatrix); + initializer(type, `outputGateRecursionMatrix${suffix}`, [h,h], weights.outputGateRecursionMatrix); + if (data.params.hasBiasVectors) { + initializer(type, `inputGateBiasVector${suffix}`, [h], weights.inputGateBiasVector); + initializer(type, `forgetGateBiasVector${suffix}`, [h], weights.forgetGateBiasVector); + initializer(type, `blockInputBiasVector${suffix}`, [h], weights.blockInputBiasVector); + initializer(type, `outputGateBiasVector${suffix}`, [h], weights.outputGateBiasVector); + } + if (data.params.hasPeepholeVectors) { + initializer(type, `inputGatePeepholeVector${suffix}`, [h], weights.inputGatePeepholeVector); + initializer(type, `forgetGatePeepholeVector${suffix}`, [h], weights.forgetGatePeepholeVector); + initializer(type, `outputGatePeepholeVector${suffix}`, [h], weights.outputGatePeepholeVector); + } + } + return { 'weightParams': true }; + } + case 'dictVectorizer': + data.stringToIndex = vector(data.stringToIndex); + return {}; + case 'wordTagger': + data.modelParameterData = Array.from(data.modelParameterData); + data.stringTags = vector(data.stringTags); + return { tokensOutputFeatureName: true, tokenTagsOutputFeatureName: true, tokenLengthsOutputFeatureName: true, tokenLocationsOutputFeatureName: true }; + case 'textClassifier': + data.modelParameterData = Array.from(data.modelParameterData); + data.stringClassLabels = vector(data.stringClassLabels); + return {}; + case 'nonMaximumSuppression': + data.stringClassLabels = vector(data.stringClassLabels); + return {}; + default: + return {}; + } + }; + if (data) { + const attributes = obj.attributes; + const map = weights(type, data, initializers); + for (const [name, value] of Object.entries(data)) { + if (!map[name]) { + attributes[name] = value; + } + } + switch (obj.type) { + case 'loop': + attributes.bodyNetwork = this.network(attributes.bodyNetwork); + attributes.conditionNetwork = this.network(attributes.conditionNetwork); + break; + case 'branch': + attributes.ifBranch = this.network(attributes.ifBranch); + attributes.elseBranch = this.network(attributes.elseBranch); + break; + default: + break; + } + } + const metadata = this.metadata.type(type); + for (let i = 0; i < inputs.length;) { + const input = metadata && metadata.inputs && i < metadata.inputs.length ? metadata.inputs[i] : { name: i === 0 ? 'input' : i.toString() }; + const count = input.type === 'Tensor[]' ? inputs.length - i : 1; + const values = inputs.slice(i, i + count); + obj.inputs.push({ name: input.name, visible: true, value: values }); + i += count; + } + obj.inputs.push(...initializers); + for (let i = 0; i < outputs.length;) { + const output = metadata && metadata.outputs && i < metadata.outputs.length ? metadata.outputs[i] : { name: i === 0 ? 'output' : i.toString() }; + const count = output.type === 'Tensor[]' ? outputs.length - i : 1; + const args = outputs.slice(i, i + count); + obj.outputs.push({ name: output.name, visible: true, value: args }); + i += count; + } + this.nodes.push(obj); + return obj; + } + + model(model, group, description) { + this.groups = this.groups | (group.length > 0 ? true : false); + const shortDescription = model && model.description && model.description.metadata && model.description.metadata.shortDescription ? model.description.metadata.shortDescription : ''; + switch (model.Type) { + case 'neuralNetworkClassifier': { + const neuralNetworkClassifier = model.neuralNetworkClassifier; + for (const layer of neuralNetworkClassifier.layers) { + const type = layer.layer; + this.node(group, type, layer.name, group === '' ? '' : shortDescription, layer[type], layer.input, layer.output, layer.inputTensor, layer.outputTensor); + } + this.updateClassifierOutput(group, neuralNetworkClassifier, description); + this.updatePreprocessing(group, neuralNetworkClassifier.preprocessing, description); + return 'Neural Network Classifier'; + } + case 'neuralNetwork': { + const neuralNetwork = model.neuralNetwork; + for (const layer of neuralNetwork.layers) { + this.node(group, layer.layer, layer.name, group === '' ? '' : shortDescription, layer[layer.layer], layer.input, layer.output, layer.inputTensor, layer.outputTensor); + } + this.updatePreprocessing(group, neuralNetwork.preprocessing, description); + return 'Neural Network'; + } + case 'neuralNetworkRegressor': { + const neuralNetworkRegressor = model.neuralNetworkRegressor; + for (const layer of neuralNetworkRegressor.layers) { + this.node(group, layer.layer, layer.name, shortDescription, layer[layer.layer], layer.input, layer.output); + } + this.updatePreprocessing(group, neuralNetworkRegressor, description); + return 'Neural Network Regressor'; + } + case 'pipeline': { + for (let i = 0; i < model.pipeline.models.length; i++) { + this.model(model.pipeline.models[i], `${group ? (`${group}/`) : ''}pipeline[${i}]`, description); + } + return 'Pipeline'; + } + case 'pipelineClassifier': { + for (let i = 0; i < model.pipelineClassifier.pipeline.models.length; i++) { + this.model(model.pipelineClassifier.pipeline.models[i], `${group ? (`${group}/`) : ''}pipelineClassifier[${i}]`, description); + } + return 'Pipeline Classifier'; + } + case 'pipelineRegressor': { + for (let i = 0; i < model.pipelineRegressor.pipeline.models.length; i++) { + this.model(model.pipelineRegressor.pipeline.models[i], `${group ? (`${group}/`) : ''}pipelineRegressor[${i}]`, description); + } + return 'Pipeline Regressor'; + } + case 'glmClassifier': { + this.node(group, 'glmClassifier', null, shortDescription, + { + classEncoding: model.glmClassifier.classEncoding, + offset: model.glmClassifier.offset, + weights: model.glmClassifier.weights + }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this.updateClassifierOutput(group, model.glmClassifier, description); + return 'Generalized Linear Classifier'; + } + case 'glmRegressor': { + this.node(group, 'glmRegressor', null, shortDescription, + model.glmRegressor, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Generalized Linear Regressor'; + } + case 'treeEnsembleClassifier': { + this.node(group, 'treeEnsembleClassifier', null, shortDescription, + model.treeEnsembleClassifier.treeEnsemble, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this.updateClassifierOutput(group, model.treeEnsembleClassifier, description); + return 'Tree Ensemble Classifier'; + } + case 'treeEnsembleRegressor': { + this.node(group, 'treeEnsembleRegressor', null, shortDescription, + model.treeEnsembleRegressor.treeEnsemble, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Tree Ensemble Regressor'; + } + case 'supportVectorClassifier': { + this.node(group, 'supportVectorClassifier', null, shortDescription, + { + coefficients: model.supportVectorClassifier.coefficients, + denseSupportVectors: model.supportVectorClassifier.denseSupportVectors, + kernel: model.supportVectorClassifier.kernel, + numberOfSupportVectorsPerClass: model.supportVectorClassifier.numberOfSupportVectorsPerClass, + probA: model.supportVectorClassifier.probA, + probB: model.supportVectorClassifier.probB, + rho: model.supportVectorClassifier.rho, + supportVectors: model.supportVectorClassifier.supportVectors + }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this.updateClassifierOutput(group, model.supportVectorClassifier, description); + return 'Support Vector Classifier'; + } + case 'supportVectorRegressor': { + this.node(group, 'supportVectorRegressor', null, shortDescription, + { + coefficients: model.supportVectorRegressor.coefficients, + kernel: model.supportVectorRegressor.kernel, + rho: model.supportVectorRegressor.rho, + supportVectors: model.supportVectorRegressor.supportVectors + }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Support Vector Regressor'; + } + case 'oneHotEncoder': { + const categoryType = model.oneHotEncoder.CategoryType; + const oneHotEncoderParams = { outputSparse: model.oneHotEncoder.outputSparse }; + oneHotEncoderParams[categoryType] = model.oneHotEncoder[categoryType]; + this.node(group, 'oneHotEncoder', null, shortDescription, + oneHotEncoderParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'One Hot Encoder'; + } + case 'imputer': { + const imputedValue = model.imputer.ImputedValue; + const replaceValue = model.imputer.ReplaceValue; + const imputerParams = {}; + imputerParams[imputedValue] = model.imputer[imputedValue]; + imputerParams[replaceValue] = model.imputer[replaceValue]; + this.node(group, 'oneHotEncoder', null, shortDescription, + imputerParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Imputer'; + } + case 'featureVectorizer': { + this.node(group, 'featureVectorizer', null, shortDescription, + model.featureVectorizer, + model.description.input.map((item) => item.name), + [ model.description.output[0].name ]); + return 'Feature Vectorizer'; + } + case 'dictVectorizer': { + this.node(group, 'dictVectorizer', null, shortDescription, + model.dictVectorizer, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Dictionary Vectorizer'; + } + case 'scaler': { + this.node(group, 'scaler', null, shortDescription, + model.scaler, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Scaler'; + } + case 'categoricalMapping': { + this.node(group, 'categoricalMapping', null, shortDescription, + model.categoricalMapping, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Categorical Mapping'; + } + case 'normalizer': { + this.node(group, 'normalizer', null, shortDescription, + model.normalizer, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Normalizer'; + } + case 'arrayFeatureExtractor': { + this.node(group, 'arrayFeatureExtractor', null, shortDescription, + { extractIndex: model.arrayFeatureExtractor.extractIndex }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Array Feature Extractor'; + } + case 'nonMaximumSuppression': { + const nonMaximumSuppressionParams = { + pickTop: model.nonMaximumSuppression.pickTop, + stringClassLabels: model.nonMaximumSuppression.stringClassLabels, + iouThreshold: model.nonMaximumSuppression.iouThreshold, + confidenceThreshold: model.nonMaximumSuppression.confidenceThreshold + }; + this.node(group, 'nonMaximumSuppression', null, shortDescription, + nonMaximumSuppressionParams, + [ + model.nonMaximumSuppression.confidenceInputFeatureName, + model.nonMaximumSuppression.coordinatesInputFeatureName, + model.nonMaximumSuppression.iouThresholdInputFeatureName, + model.nonMaximumSuppression.confidenceThresholdInputFeatureName, + ], + [ + model.nonMaximumSuppression.confidenceOutputFeatureName, + model.nonMaximumSuppression.coordinatesOutputFeatureName + ]); + return 'Non Maximum Suppression'; + } + case 'wordTagger': { + this.node(group, 'wordTagger', null, shortDescription, + model.wordTagger, + [ model.description.input[0].name ], + [ + model.wordTagger.tokensOutputFeatureName, + model.wordTagger.tokenTagsOutputFeatureName, + model.wordTagger.tokenLocationsOutputFeatureName, + model.wordTagger.tokenLengthsOutputFeatureName + ]); + return 'Word Tagger'; + } + case 'textClassifier': { + this.node(group, 'textClassifier', null, shortDescription, + model.textClassifier, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Text Classifier'; + } + case 'visionFeaturePrint': { + const visionFeaturePrintParams = { + scene: model.visionFeaturePrint.scene + }; + this.node(group, 'visionFeaturePrint', null, shortDescription, + visionFeaturePrintParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Vision Feature Print'; + } + case 'soundAnalysisPreprocessing': { + this.node(group, 'soundAnalysisPreprocessing', null, shortDescription, + model.soundAnalysisPreprocessing, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Sound Analysis Preprocessing'; + } + case 'kNearestNeighborsClassifier': { + this.node(group, 'kNearestNeighborsClassifier', null, shortDescription, + model.kNearestNeighborsClassifier, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this.updateClassifierOutput(group, model.kNearestNeighborsClassifier, description); + return 'Nearest Neighbors Classifier'; + } + case 'itemSimilarityRecommender': { + this.node(group, 'itemSimilarityRecommender', null, shortDescription, + { + itemStringIds: model.itemSimilarityRecommender.itemStringIds.vector, + itemItemSimilarities: model.itemSimilarityRecommender.itemItemSimilarities + }, + model.description.input.map((feature) => feature.name), + model.description.output.map((feature) => feature.name)); + return 'Item Similarity Recommender'; + } + case 'audioFeaturePrint': { + this.node(group, 'audioFeaturePrint', null, shortDescription, + model.audioFeaturePrint, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Audio Feature Print'; + } + case 'linkedModel': { + this.node(group, 'linkedModel', null, shortDescription, + model.linkedModel.linkedModelFile, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Linked Model'; + } + case 'customModel': { + this.node(group, 'customModel', null, shortDescription, + { className: model.customModel.className, parameters: model.customModel.parameters }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'customModel'; + } + case 'mlProgram': { + return this.program(model.mlProgram, group); + } + default: { + throw new coreml.Error(`Unsupported model type '${JSON.stringify(Object.keys(model))}'.`); + } + } + } + + updateClassifierOutput(group, classifier, description) { + let labelProbabilityLayerName = classifier.labelProbabilityLayerName; + if (!labelProbabilityLayerName && this.nodes.length > 0) { + const node = this.nodes.slice(-1).pop(); + if (node && node.outputs.length == 1 && node.outputs[0].value.length == 1) { + labelProbabilityLayerName = node.outputs[0].value[0].name; + } + } + let predictedFeatureName = description.predictedFeatureName; + let predictedProbabilitiesName = description.predictedProbabilitiesName; + if ((predictedFeatureName || predictedProbabilitiesName) && labelProbabilityLayerName && classifier.ClassLabels) { + predictedFeatureName = predictedFeatureName ? predictedFeatureName : '?'; + predictedProbabilitiesName = predictedProbabilitiesName ? predictedProbabilitiesName : '?'; + const labelProbabilityInput = `${labelProbabilityLayerName}:labelProbabilityLayerName`; + const values = new Set(); + for (const node of this.nodes) { + for (const output of node.outputs) { + for (const value of output.value) { + if (value.name === labelProbabilityLayerName) { + value.name = labelProbabilityInput; + values.add(value); + } + } + } + } + this.values.set(labelProbabilityInput, this.values.get(labelProbabilityLayerName)); + this.values.delete(labelProbabilityLayerName); + const type = classifier.ClassLabels; + const node = { + // group: this._group, + type: type, + name: null, + description: '', + attributes: classifier[type] || {} + }; + node.inputs = [ + { name: 'input', visible: true, value: Array.from(values) } + ]; + node.outputs = [ + { name: 'probabilities', visible: true, value: [ this.output(predictedProbabilitiesName) ] }, + { name: 'feature', visible: true, value: [ this.output(predictedFeatureName) ] } + ]; + this.nodes.push(node); + } + } + + updatePreprocessing(group, preprocessings, description) { + if (preprocessings && preprocessings.length > 0) { + const preprocessingInput = description.input[0].name; + const inputNodes = []; + for (const node of this.nodes) { + if (node.inputs.some((input) => Array.isArray(input.value) && input.value.some((arg) => arg.name === preprocessingInput))) { + inputNodes.push(node); + } + } + let currentOutput = preprocessingInput; + let preprocessorOutput = null; + let preprocessorIndex = 0; + for (const preprocessing of preprocessings) { + const input = preprocessing.featureName ? preprocessing.featureName : currentOutput; + currentOutput = `${preprocessingInput}:${preprocessorIndex}`; + const preprocessor = preprocessing.preprocessor; + const node = this.node(group, preprocessor, null, '', preprocessing[preprocessor], [ input ], [ currentOutput ]); + /* eslint-disable prefer-destructuring */ + preprocessorOutput = node.outputs[0].value[0]; + /* eslint-enable prefer-destructuring */ + preprocessorIndex++; + } + for (const node of inputNodes) { + for (const input of node.inputs) { + if (Array.isArray(input.value)) { + for (let i = 0; i < input.value.length; i++) { + if (input.value[i].name === preprocessingInput) { + input.value[i] = preprocessorOutput; + } + } + } + } + } + } + } + + program(program, group) { + // TODO: need to handle functions other than main? + const main = program.functions.main; + // TODO: need to handle more than one block specialization? + const block_specializations = main.block_specializations; + const key = Object.keys(block_specializations).filter((key) => key.startsWith('CoreML')).shift(); + const block = block_specializations[key]; + const convertValue = (value) => { + switch (value.value) { + case 'immediateValue': { + const tensor = value.immediateValue.tensor; + const type = coreml.Utility.valueType(value.type); + let values = null; + switch (tensor.value) { + case 'ints': + values = tensor.ints.values; + break; + case 'strings': + values = tensor.strings.values; + break; + case 'bools': + values = tensor.bools.values; + break; + case 'floats': + values = tensor.floats.values; + break; + case 'bytes': + values = tensor.bytes.values; + break; + default: + throw new coreml.Error(`Unsupported tensor value '${tensor.value}'.`); + } + if (type.shape.dimensions.length === 0) { + [values] = values; + } + return values; + } + case 'blobFileValue': { + const type = coreml.Utility.valueType(value.type); + const blob = value.blobFileValue; + const offset = blob.offset.toNumber(); + const file = blob.fileName; + let data = null; + const stream = this.weights.get(file); + if (stream) { + stream.seek(offset); + const buffer = stream.read(32); + const reader = new base.BinaryReader(buffer); + const signature = reader.uint32(); + if (signature == 0xdeadbeef) { + reader.uint32(); // dataType + const size = reader.uint64(); + stream.seek(reader.uint64()); + const length = (type.shape.dimensions || []).reduce((a, b) => a * b, 1); + switch (type.dataType) { + case 'float32': { + const buffer = stream.read(size); + data = new Float32Array(buffer.buffer, buffer.byteOffset, length).slice(); + break; + } + case 'float16': + case 'int8': + case 'uint8': { + data = stream.read(size); + break; + } + default: + throw new coreml.Error(`Unsupported blob data type '${type.dataType}'.`); + } + } + } + return new coreml.Tensor(type, data, null, 'Blob'); + } + default: { + throw new coreml.Error(`Unsupported value '${value.value}'.`); + } + } + }; + const operations = block.operations.map((op) => { + const operation = { + type: op.type, + attributes: {} + }; + for (const [key, value] of Object.entries(op.attributes)) { + operation.attributes[key] = convertValue(value); + } + operation.inputs = Object.entries(op.inputs).map(([name, input]) => { + const args = input.arguments.map((argument) => { + if (argument.name) { + const value = this.input(argument.name); + value.to.push(operation); + return value; + } + return { value: argument.value }; + }); + return { name: name, value: args }; + }); + operation.outputs = op.outputs.map((output) => { + const value = this.input(output.name); + value.type = coreml.Utility.valueType(output.type); + value.from.push(operation); + return { name: 'output', value: [ value ] }; + }); + return operation; + }); + for (const op of operations) { + if (op.type === 'const' && op.inputs.length === 0 && + op.outputs.length === 1 && op.outputs[0].value.length === 1) { + /* eslint-disable prefer-destructuring */ + const value = op.outputs[0].value[0]; + /* eslint-enable prefer-destructuring */ + if (op.attributes && op.attributes.val) { + const type = value.type; + const data = op.attributes.val; + if (data instanceof Uint8Array && data.length === 2 && + type.dataType === 'float16' && type.shape.dimensions.length === 0) { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + value.value = view.getFloat16(0, true); + } else { + value.value = data; + } + value.const = true; + op.delete = true; + } + } + } + for (const op of operations) { + for (const input of op.inputs) { + if (input.value.length > 1 && input.value.some((argument) => argument.const)) { + if (!input.value.every((argument) => argument.value instanceof coreml.Tensor)) { + for (const value of input.value) { + for (const from of value.from) { + from.delete = false; + } + delete value.value; + } + } + } + } + } + for (const op of operations.filter((op) => !op.delete)) { + op.inputs = op.inputs.filter((input) => { + if (input.value.every((value) => value.value === undefined || value.value instanceof coreml.Tensor)) { + return true; + } + op.attributes[input.name] = input.value.length === 1 ? + input.value[0].value : + input.value.map((argument) => argument.value[0]); + return false; + }); + } + const mapValue = (name, value) => { + if (value.value instanceof coreml.Tensor) { + value.initializer = value.value; + delete value.value; + } + if (!this.values.has(name)) { + this.values.set(name, value); + } else if ((value.type && !value.type.equals(this.values.get(name).type)) || + (value.initializer && value.initializer !== this.values.get(name).initializer)) { + throw new coreml.Error(`Duplicate value '${name}'.`); + } + return this.values.get(name); + }; + for (const op of operations.filter((op) => !op.delete)) { + for (const argument of op.inputs) { + for (const value of argument.value) { + mapValue(value.name, value); + } + } + for (const argument of op.outputs) { + for (const value of argument.value) { + mapValue(value.name, value); + } + } + } + for (const op of operations.filter((op) => !op.delete)) { + op.group = group; + op.type = `program:${op.type}`; + const metadata = this.metadata.type(op.type); + if (metadata && Array.isArray(metadata.inputs)) { + const map = new Map(metadata.inputs.map((input, index) => [ input.name, index + 1 ])); + op.inputs.sort((a, b) => (map.get(a.name) || map.size) - (map.get(b.name) || map.size)); + } + this.nodes.push(op); + } + return 'ML Program'; + } +}; + +coreml.Utility = class { + + static enum(name, value) { + let type = coreml.proto; + const parts = name.split('.'); + while (type && parts.length > 0) { + type = type[parts.shift()]; + } + if (type) { + coreml.Utility._enumKeyMap = coreml.Utility._enumKeyMap || new Map(); + if (!coreml.Utility._enumKeyMap.has(name)) { + const map = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + coreml.Utility._enumKeyMap.set(name, map); + } + const map = coreml.Utility._enumKeyMap.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } + + static featureType(type) { + let result = '?'; + if (type) { + switch (type.Type) { + case 'multiArrayType': { + let shape = new coreml.TensorShape([]); + if (type.multiArrayType.shape && type.multiArrayType.shape.length > 0) { + shape = new coreml.TensorShape(type.multiArrayType.shape.map((dim) => dim.toNumber())); + } + let dataType; + const ArrayDataType = coreml.proto.ArrayFeatureType.ArrayDataType; + switch (type.multiArrayType.dataType) { + case ArrayDataType.INVALID_ARRAY_DATA_TYPE: + dataType = '?'; + break; + case ArrayDataType.FLOAT16: + dataType = 'float16'; + break; + case ArrayDataType.FLOAT32: + dataType = 'float32'; + break; + case ArrayDataType.DOUBLE: + dataType = 'float64'; + break; + case ArrayDataType.INT32: + dataType = 'int32'; + break; + default: + throw new coreml.Error(`Unsupported array data type '${type.multiArrayType.dataType}'.`); + } + result = new coreml.TensorType(dataType, shape); + break; + } + case 'stringType': { + result = new coreml.TensorType('string'); + break; + } + case 'doubleType': { + result = new coreml.TensorType('float64'); + break; + } + case 'int64Type': { + result = new coreml.TensorType('int64'); + break; + } + case 'dictionaryType': { + result = new coreml.MapType(type.dictionaryType.KeyType.replace('KeyType', ''), 'float64'); + break; + } + case 'sequenceType': { + result = new coreml.SequenceType(coreml.Utility.featureType(type[type.Type])); + break; + } + case 'imageType': { + result = new coreml.ImageType(type.imageType.colorSpace, type.imageType.width, type.imageType.height); + break; + } + default: { + throw new coreml.Error(`Unsupported feature type '${type.Type}'.`); + } + } + if (type.isOptional) { + result = new coreml.OptionalType(result); + } + } + return result; + } + + static tensorType(type) { + if (!coreml.Utility._dataTypes) { + coreml.Utility._dataTypes = new Map(Object.entries(coreml.proto.MILSpec.DataType).map((([key, value]) => [value, key.toLowerCase()]))); + coreml.Utility._dataTypes.delete(0); + coreml.Utility._dataTypes.set(1, 'boolean'); + } + const shape = type.dimensions.map((dim) => dim.constant ? dim.constant.size : '?'); + const dataType = coreml.Utility._dataTypes.get(type.dataType); + if (!dataType) { + throw new coreml.Error(`Unsupported data type '${type.dataType}'.`); + } + return new coreml.TensorType(dataType, new coreml.TensorShape(shape)); + } + + static valueType(type) { + switch (type.type) { + case 'tensorType': + return coreml.Utility.tensorType(type.tensorType); + case 'listType': + return new coreml.ListType(coreml.Utility.valueType(type.listType.type)); + case 'dictionaryType': + return new coreml.MapType(coreml.Utility.valueType(type.dictionaryType.keyType), coreml.Utility.valueType(type.dictionaryType.valueType)); + default: + throw new coreml.Error(`Unsupported value type '${type.type}'.`); + } + } +}; + +coreml.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading Core ML model.'; + } +}; + +export const ModelFactory = coreml.ModelFactory; diff --git a/dagre.js b/dagre.js new file mode 100644 index 00000000000..bff9e438381 --- /dev/null +++ b/dagre.js @@ -0,0 +1,2221 @@ + +const dagre = {}; + +// Dagre graph layout +// https://github.com/dagrejs/dagre +// https://github.com/dagrejs/graphlib + +dagre.layout = (graph, layout) => { + const time = (name, callback) => { + // const start = Date.now(); + const result = callback(); + // const duration = Date.now() - start; + // console.log(name + ': ' + duration + 'ms'); + return result; + }; + + // Constructs a new graph from the input graph, which can be used for layout. + // This process copies only whitelisted attributes from the input graph to the + // layout graph. Thus this function serves as a good place to determine what + // attributes can influence layout. + const buildLayoutGraph = (graph) => { + const g = new dagre.Graph({ compound: true }); + g.layout = Object.assign({}, { ranksep: 50, edgesep: 20, nodesep: 50, rankdir: 'tb' }, layout); + g.state = Object.assign({}, graph.state); + for (const node of graph.nodes.values()) { + const v = node.v; + const label = node.label; + g.setNode(v, { + width: label.width || 0, + height: label.height || 0 + }); + g.setParent(v, graph.parent(v)); + } + for (const e of graph.edges.values()) { + const edge = e.label; + g.setEdge(e.v, e.w, { + minlen: edge.minlen || 1, + weight: edge.weight || 1, + width: edge.width || 0, + height: edge.height || 0, + labeloffset: edge.labeloffset || 10, + labelpos: edge.labelpos || 'r' + }); + } + return g; + }; + + const runLayout = (g, time) => { + let uniqueIdCounter = 0; + const uniqueId = (prefix) => { + const id = ++uniqueIdCounter; + return prefix + id; + }; + const flat = (list) => { + if (Array.isArray(list) && list.every((item) => !Array.isArray(item))) { + return list; + } + const target = []; + for (const item of list) { + if (!Array.isArray(item)) { + target.push(item); + continue; + } + for (const entry of item) { + target.push(entry); + } + } + return target; + }; + + // Adds a dummy node to the graph and return v. + const addDummyNode = (g, type, label, name) => { + let v; + do { + v = uniqueId(name); + } while (g.hasNode(v)); + label.dummy = type; + g.setNode(v, label); + return v; + }; + + const asNonCompoundGraph = (g) => { + const graph = new dagre.Graph({}); + graph.layout = g.layout; + graph.state = g.state; + for (const node of g.nodes.values()) { + const v = node.v; + if (g.children(v).length === 0) { + graph.setNode(v, node.label); + } + } + for (const e of g.edges.values()) { + graph.setEdge(e.v, e.w, e.label); + } + return graph; + }; + + const maxRank = (g) => { + let rank = Number.NEGATIVE_INFINITY; + for (const node of g.nodes.values()) { + const x = node.label.rank; + if (x !== undefined && x > rank) { + rank = x; + } + } + return rank === Number.NEGATIVE_INFINITY ? undefined : rank; + }; + + // Given a DAG with each node assigned 'rank' and 'order' properties, this function will produce a matrix with the ids of each node. + const buildLayerMatrix = (g) => { + const rank = maxRank(g); + const length = rank === undefined ? 0 : rank + 1; + const layering = Array.from(new Array(length), () => []); + for (const node of g.nodes.values()) { + const label = node.label; + const rank = label.rank; + if (rank !== undefined) { + layering[rank][label.order] = node.v; + } + } + return layering; + }; + + // This idea comes from the Gansner paper: to account for edge labels in our layout we split each rank in half by doubling minlen and halving ranksep. + // Then we can place labels at these mid-points between nodes. + // We also add some minimal padding to the width to push the label for the edge away from the edge itself a bit. + const makeSpaceForEdgeLabels = (g) => { + g.layout.ranksep /= 2; + const rankdir = g.layout.rankdir; + for (const e of g.edges.values()) { + const edge = e.label; + edge.minlen *= 2; + if (edge.labelpos.toLowerCase() !== 'c') { + if (rankdir === 'TB' || rankdir === 'BT') { + edge.width += edge.labeloffset; + } else { + edge.height += edge.labeloffset; + } + } + } + }; + + const removeSelfEdges = (g) => { + for (const e of g.edges.values()) { + if (e.v === e.w) { + const label = e.vNode.label; + if (!label.selfEdges) { + label.selfEdges = []; + } + label.selfEdges.push({ e: e, label: e.label }); + g.removeEdge(e); + } + } + }; + + const acyclic_run = (g) => { + const edges = []; + const visited = new Set(); + const path = new Set(); + const stack = Array.from(g.nodes.keys()).reverse(); + while (stack.length > 0) { + const v = stack.pop(); + if (!Array.isArray(v)) { + if (!visited.has(v)) { + visited.add(v); + path.add(v); + stack.push([ v ]); + const out = g.node(v).out; + for (let i = out.length - 1; i >= 0; i--) { + const e = out[i]; + if (path.has(e.w)) { + edges.push(e); + } + stack.push(e.w); + } + } + } else { + path.delete(v[0]); + } + } + for (const e of edges) { + const label = e.label; + g.removeEdge(e); + label.forwardName = e.name; + label.reversed = true; + g.setEdge(e.w, e.v, label, uniqueId('rev')); + } + }; + const acyclic_undo = (g) => { + for (const e of g.edges.values()) { + const edge = e.label; + if (edge.reversed) { + edge.points.reverse(); + g.removeEdge(e); + const forwardName = edge.forwardName; + delete edge.reversed; + delete edge.forwardName; + g.setEdge(e.w, e.v, edge, forwardName); + } + } + }; + + // Returns the amount of slack for the given edge. + // The slack is defined as the difference between the length of the edge and its minimum length. + const slack = (g, e) => { + return e.wNode.label.rank - e.vNode.label.rank - e.label.minlen; + }; + + // Assigns a rank to each node in the input graph that respects the 'minlen' constraint specified on edges between nodes. + // This basic structure is derived from Gansner, et al., 'A Technique for Drawing Directed Graphs.' + // + // Pre-conditions: + // 1. Graph must be a connected DAG + // 2. Graph nodes must be objects + // 3. Graph edges must have 'weight' and 'minlen' attributes + // + // Post-conditions: + // 1. Graph nodes will have a 'rank' attribute based on the results of the + // algorithm. Ranks can start at any index (including negative), we'll + // fix them up later. + const rank = (g) => { + // Constructs a spanning tree with tight edges and adjusted the input node's ranks to achieve this. + // A tight edge is one that is has a length that matches its 'minlen' attribute. + // The basic structure for this function is derived from Gansner, et al., 'A Technique for Drawing Directed Graphs.' + // + // Pre-conditions: + // 1. Graph must be a DAG. + // 2. Graph must be connected. + // 3. Graph must have at least one node. + // 5. Graph nodes must have been previously assigned a 'rank' property that respects the 'minlen' property of incident edges. + // 6. Graph edges must have a 'minlen' property. + // + // Post-conditions: + // - Graph nodes will have their rank adjusted to ensure that all edges are tight. + // + // Returns a tree (undirected graph) that is constructed using only 'tight' edges. + const feasibleTree = (g) => { + const t = new dagre.Graph({ directed: false }); + // Choose arbitrary node from which to start our tree + const start = g.nodes.keys().next().value; + const size = g.nodes.size; + t.setNode(start, {}); + // Finds a maximal tree of tight edges and returns the number of nodes in the tree. + const tightTree = (t, g) => { + const stack = Array.from(t.nodes.keys()).reverse(); + while (stack.length > 0) { + const v = stack.pop(); + const node = g.node(v); + for (const e of node.in.concat(node.out)) { + const edgeV = e.v; + const w = (v === edgeV) ? e.w : edgeV; + if (!t.hasNode(w) && !slack(g, e)) { + t.setNode(w, {}); + t.setEdge(v, w, {}); + stack.push(w); + } + } + } + return t.nodes.size; + }; + while (tightTree(t, g) < size) { + // Finds the edge with the smallest slack that is incident on tree and returns it. + let minKey = Number.MAX_SAFE_INTEGER; + let edge = undefined; + for (const e of g.edges.values()) { + if (t.hasNode(e.v) !== t.hasNode(e.w)) { + const key = slack(g, e); + if (key < minKey) { + minKey = key; + edge = e; + } + } + } + const delta = t.hasNode(edge.v) ? slack(g, edge) : -slack(g, edge); + for (const v of t.nodes.keys()) { + g.node(v).label.rank += delta; + } + } + return t; + }; + // Initializes ranks for the input graph using the longest path algorithm. + // This algorithm scales well and is fast in practice, it yields rather poor solutions. + // Nodes are pushed to the lowest layer possible, leaving the bottom ranks wide and leaving edges longer than necessary. + // However, due to its speed, this algorithm is good for getting an initial ranking that can be fed into other algorithms. + // + // This algorithm does not normalize layers because it will be used by other algorithms in most cases. + // If using this algorithm directly, be sure to run normalize at the end. + // + // Pre-conditions: + // 1. Input graph is a DAG. + // 2. Input graph node labels can be assigned properties. + // + // Post-conditions: + // 1. Each node will be assign an (unnormalized) 'rank' property. + const longestPath = (g) => { + const visited = new Set(); + const stack = [ Array.from(g.nodes.values()).filter((node) => node.in.length === 0).reverse() ]; + while (stack.length > 0) { + const current = stack[stack.length - 1]; + if (Array.isArray(current)) { + const node = current.pop(); + if (current.length === 0) { + stack.pop(); + } + if (!visited.has(node)) { + visited.add(node); + const children = node.out.map((e) => e.wNode); + if (children.length > 0) { + stack.push(node); + stack.push(children.reverse()); + } else { + node.label.rank = 0; + } + } + } else { + stack.pop(); + let rank = Number.MAX_SAFE_INTEGER; + for (const e of current.out) { + rank = Math.min(rank, e.wNode.label.rank - e.label.minlen); + } + current.label.rank = rank; + } + } + }; + // The network simplex algorithm assigns ranks to each node in the input graph + // and iteratively improves the ranking to reduce the length of edges. + // + // Preconditions: + // 1. The input graph must be a DAG. + // 2. All nodes in the graph must have an object value. + // 3. All edges in the graph must have 'minlen' and 'weight' attributes. + // + // Postconditions: + // 1. All nodes in the graph will have an assigned 'rank' attribute that has + // been optimized by the network simplex algorithm. Ranks start at 0. + // + // A rough sketch of the algorithm is as follows: + // 1. Assign initial ranks to each node. We use the longest path algorithm, + // which assigns ranks to the lowest position possible. In general this + // leads to very wide bottom ranks and unnecessarily long edges. + // 2. Construct a feasible tight tree. A tight tree is one such that all + // edges in the tree have no slack (difference between length of edge + // and minlen for the edge). This by itself greatly improves the assigned + // rankings by shorting edges. + // 3. Iteratively find edges that have negative cut values. Generally a + // negative cut value indicates that the edge could be removed and a new + // tree edge could be added to produce a more compact graph. + // + // Much of the algorithms here are derived from Gansner, et al., 'A Technique + // for Drawing Directed Graphs.' The structure of the file roughly follows the + // structure of the overall algorithm. + const networkSimplex = (g) => { + // Returns a new graph with only simple edges. Handles aggregation of data associated with multi-edges. + const simplify = (g) => { + const graph = new dagre.Graph(); + graph.layout = g.layout; + graph.state = g.state; + for (const node of g.nodes.values()) { + graph.setNode(node.v, node.label); + } + for (const e of g.edges.values()) { + const simpleEdge = graph.edge(e.v, e.w); + const simpleLabel = simpleEdge ? simpleEdge.label : { weight: 0, minlen: 1 }; + const label = e.label; + graph.setEdge(e.v, e.w, { + weight: simpleLabel.weight + label.weight, + minlen: Math.max(simpleLabel.minlen, label.minlen) + }); + } + return graph; + }; + const initLowLimValues = (tree, root) => { + const dfs = (tree, visited, nextLim, v, parent) => { + const low = nextLim; + const label = tree.node(v).label; + visited.add(v); + for (const w of tree.neighbors(v)) { + if (!visited.has(w)) { + nextLim = dfs(tree, visited, nextLim, w, v); + } + } + label.low = low; + label.lim = nextLim++; + if (parent) { + label.parent = parent; + } else { + // TODO should be able to remove this when we incrementally update low lim + delete label.parent; + } + return nextLim; + }; + root = tree.nodes.keys().next().value; + const visited = new Set(); + dfs(tree, visited, 1, root); + }; + // Initializes cut values for all edges in the tree. + const initCutValues = (t, g) => { + const vs = []; + const visited = new Set(); + const stack = [ Array.from(t.nodes.keys()).reverse() ]; + while (stack.length > 0) { + const current = stack[stack.length - 1]; + if (Array.isArray(current)) { + const v = current.pop(); + if (current.length === 0) { + stack.pop(); + } + if (!visited.has(v)) { + visited.add(v); + const children = t.neighbors(v); + if (children.length > 0) { + stack.push(v); + stack.push(children.reverse()); + } else { + vs.push(v); + } + } + } else { + vs.push(stack.pop()); + } + } + for (const v of vs.slice(0, vs.length - 1)) { + // Given the tight tree, its graph, and a child in the graph calculate and + // return the cut value for the edge between the child and its parent. + const childLabel = t.node(v).label; + const parent = childLabel.parent; + // The graph's view of the tree edge we're inspecting + const edge = g.edge(v, parent); + // True if the child is on the tail end of the edge in the directed graph + const childIsTail = edge ? true : false; + // The accumulated cut value for the edge between this node and its parent + const graphEdge = edge ? edge.label : g.edge(parent, v).label; + let cutValue = graphEdge.weight; + const node = g.node(v); + for (const e of node.in.concat(node.out)) { + const isOutEdge = e.v === v; + const other = isOutEdge ? e.w : e.v; + if (other !== parent) { + const pointsToHead = isOutEdge === childIsTail; + cutValue += pointsToHead ? e.label.weight : -e.label.weight; + const edge = t.edge(v, other); + if (edge) { + const otherCutValue = edge.label.cutvalue; + cutValue += pointsToHead ? -otherCutValue : otherCutValue; + } + } + } + t.edge(v, parent).label.cutvalue = cutValue; + } + }; + const leaveEdge = (tree) => { + return Array.from(tree.edges.values()).find((e) => e.label.cutvalue < 0); + }; + const enterEdge = (t, g, edge) => { + let v = edge.v; + let w = edge.w; + // For the rest of this function we assume that v is the tail and w is the + // head, so if we don't have this edge in the graph we should flip it to + // match the correct orientation. + if (!g.edge(v, w)) { + v = edge.w; + w = edge.v; + } + const vLabel = t.node(v).label; + const wLabel = t.node(w).label; + let tailLabel = vLabel; + let flip = false; + // If the root is in the tail of the edge then we need to flip the logic that + // checks for the head and tail nodes in the candidates function below. + if (vLabel.lim > wLabel.lim) { + tailLabel = wLabel; + flip = true; + } + // Returns true if the specified node is descendant of the root node per the assigned low and lim attributes in the tree. + const isDescendant = (vLabel, rootLabel) => { + return rootLabel.low <= vLabel.lim && vLabel.lim <= rootLabel.lim; + }; + let minKey = Number.POSITIVE_INFINITY; + let minValue = undefined; + for (const edge of g.edges.values()) { + if (flip === isDescendant(t.node(edge.v).label, tailLabel) && + flip !== isDescendant(t.node(edge.w).label, tailLabel)) { + const key = slack(g, edge); + if (key < minKey) { + minKey = key; + minValue = edge; + } + } + } + return minValue; + }; + const exchangeEdges = (t, g, e, f) => { + t.removeEdge(e); + t.setEdge(f.v, f.w, {}); + initLowLimValues(t); + initCutValues(t, g); + // update ranks + const root = Array.from(t.nodes.keys()).find((v) => !g.node(v).label.parent); + const stack = [ root ]; + const visited = new Set(); + while (stack.length > 0) { + const v = stack.pop(); + if (!visited.has(v)) { + visited.add(v); + const neighbors = t.neighbors(v); + for (let i = neighbors.length - 1; i >= 0; i--) { + stack.push(neighbors[i]); + } + } + } + const vs = Array.from(visited); + for (const v of vs.slice(1)) { + const parent = t.node(v).label.parent; + let edge = g.edge(v, parent); + let flipped = false; + if (!edge) { + edge = g.edge(parent, v); + flipped = true; + } + g.node(v).label.rank = g.node(parent).label.rank + (flipped ? edge.label.minlen : -edge.label.minlen); + } + }; + g = simplify(g); + longestPath(g); + const t = feasibleTree(g); + initLowLimValues(t); + initCutValues(t, g); + let e; + let f; + while ((e = leaveEdge(t))) { + f = enterEdge(t, g, e); + exchangeEdges(t, g, e, f); + } + }; + switch (g.layout.ranker) { + case 'tight-tree': + longestPath(g); + feasibleTree(g); + break; + case 'longest-path': + longestPath(g); + break; + default: + networkSimplex(g); + break; + } + }; + + // Creates temporary dummy nodes that capture the rank in which each edge's label is going to, if it has one of non-zero width and height. + // We do this so that we can safely remove empty ranks while preserving balance for the label's position. + const injectEdgeLabelProxies = (g) => { + for (const e of g.edges.values()) { + const edge = e.label; + if (edge.width && edge.height) { + const v = e.vNode.label; + const w = e.wNode.label; + addDummyNode(g, 'edge-proxy', { rank: (w.rank - v.rank) / 2 + v.rank, e: e }, '_ep'); + } + } + }; + + const removeEmptyRanks = (g) => { + // Ranks may not start at 0, so we need to offset them + if (g.nodes.size > 0) { + let minRank = Number.MAX_SAFE_INTEGER; + let maxRank = Number.MIN_SAFE_INTEGER; + const nodes = Array.from(g.nodes.values()); + for (const node of nodes) { + const label = node.label; + if (label.rank !== undefined) { + minRank = Math.min(minRank, label.rank); + maxRank = Math.max(maxRank, label.rank); + } + } + const size = maxRank - minRank; + if (size > 0) { + const layers = new Array(size); + for (const node of nodes) { + const label = node.label; + if (label.rank !== undefined) { + const rank = label.rank - minRank; + if (!layers[rank]) { + layers[rank] = []; + } + layers[rank].push(node.v); + } + } + let delta = 0; + const nodeRankFactor = g.state.nodeRankFactor; + for (let i = 0; i < layers.length; i++) { + const vs = layers[i]; + if (vs === undefined && i % nodeRankFactor !== 0) { + delta--; + } else if (delta && vs) { + for (const v of vs) { + g.node(v).label.rank += delta; + } + } + } + } + } + }; + + // A nesting graph creates dummy nodes for the tops and bottoms of subgraphs, + // adds appropriate edges to ensure that all cluster nodes are placed between + // these boundries, and ensures that the graph is connected. + // In addition we ensure, through the use of the minlen property, that nodes + // and subgraph border nodes do not end up on the same rank. + // + // Preconditions: + // 1. Input graph is a DAG + // 2. Nodes in the input graph has a minlen attribute + // + // Postconditions: + // 1. Input graph is connected. + // 2. Dummy nodes are added for the tops and bottoms of subgraphs. + // 3. The minlen attribute for nodes is adjusted to ensure nodes do not + // get placed on the same rank as subgraph border nodes. + // + // The nesting graph idea comes from Sander, 'Layout of Compound Directed Graphs.' + const nestingGraph_run = (g) => { + const root = addDummyNode(g, 'root', {}, '_root'); + const treeDepths = (g) => { + const depths = {}; + const dfs = (v, depth) => { + const children = g.children(v); + if (children && children.length > 0) { + for (const child of children) { + dfs(child, depth + 1); + } + } + depths[v] = depth; + }; + for (const v of g.children()) { + dfs(v, 1); + } + return depths; + }; + const dfs = (g, root, nodeSep, weight, height, depths, v) => { + const children = g.children(v); + if (!children.length) { + if (v !== root) { + g.setEdge(root, v, { weight: 0, minlen: nodeSep }); + } + return; + } + const top = addDummyNode(g, 'border', { width: 0, height: 0 }, '_bt'); + const bottom = addDummyNode(g, 'border', { width: 0, height: 0 }, '_bb'); + const label = g.node(v).label; + g.setParent(top, v); + label.borderTop = top; + g.setParent(bottom, v); + label.borderBottom = bottom; + for (const child of children) { + dfs(g, root, nodeSep, weight, height, depths, child); + const childNode = g.node(child).label; + const childTop = childNode.borderTop ? childNode.borderTop : child; + const childBottom = childNode.borderBottom ? childNode.borderBottom : child; + const thisWeight = childNode.borderTop ? weight : 2 * weight; + const minlen = childTop !== childBottom ? 1 : height - depths[v] + 1; + g.setEdge(top, childTop, { weight: thisWeight, minlen: minlen, nestingEdge: true }); + g.setEdge(childBottom, bottom, { weight: thisWeight, minlen: minlen, nestingEdge: true }); + } + if (!g.parent(v)) { + g.setEdge(root, top, { weight: 0, minlen: height + depths[v] }); + } + }; + const depths = treeDepths(g); + const height = Math.max(...Object.values(depths)) - 1; // Note: depths is an Object not an array + const nodeSep = 2 * height + 1; + g.state.nestingRoot = root; + // Multiply minlen by nodeSep to align nodes on non-border ranks. + for (const e of g.edges.values()) { + e.label.minlen *= nodeSep; + } + // Calculate a weight that is sufficient to keep subgraphs vertically compact + const weight = Array.from(g.edges.values()).reduce((acc, e) => acc + e.label.weight, 0) + 1; + // Create border nodes and link them up + for (const child of g.children()) { + dfs(g, root, nodeSep, weight, height, depths, child); + } + // Save the multiplier for node layers for later removal of empty border layers. + g.state.nodeRankFactor = nodeSep; + }; + const nestingGraph_cleanup = (g) => { + g.removeNode(g.state.nestingRoot); + delete g.state.nestingRoot; + for (const e of g.edges.values()) { + if (e.label.nestingEdge) { + g.removeEdge(e); + } + } + }; + + const assignRankMinMax = (g) => { + // Adjusts the ranks for all nodes in the graph such that all nodes v have rank(v) >= 0 and at least one node w has rank(w) = 0. + let min = Number.POSITIVE_INFINITY; + for (const node of g.nodes.values()) { + const rank = node.label.rank; + if (rank !== undefined && rank < min) { + min = rank; + } + } + for (const node of g.nodes.values()) { + const label = node.label; + if (label.rank !== undefined) { + label.rank -= min; + } + } + let maxRank = 0; + for (const node of g.nodes.values()) { + const label = node.label; + if (label.borderTop) { + label.minRank = g.node(label.borderTop).label.rank; + label.maxRank = g.node(label.borderBottom).label.rank; + maxRank = Math.max(maxRank, label.maxRank); + } + } + g.state.maxRank = maxRank; + }; + + // Breaks any long edges in the graph into short segments that span 1 layer each. + // This operation is undoable with the denormalize function. + // + // Pre-conditions: + // 1. The input graph is a DAG. + // 2. Each node in the graph has a 'rank' property. + // + // Post-condition: + // 1. All edges in the graph have a length of 1. + // 2. Dummy nodes are added where edges have been split into segments. + // 3. The graph is augmented with a 'dummyChains' attribute which contains + // the first dummy in each chain of dummy nodes produced. + const normalize = (g) => { + g.state.dummyChains = []; + for (const e of g.edges.values()) { + let v = e.v; + const w = e.w; + const name = e.name; + const edgeLabel = e.label; + const labelRank = edgeLabel.labelRank; + let vRank = g.node(v).label.rank; + const wRank = g.node(w).label.rank; + if (wRank !== vRank + 1) { + g.removeEdge(e); + let first = true; + vRank++; + while (vRank < wRank) { + edgeLabel.points = []; + delete e.key; + const attrs = { + width: 0, height: 0, + edgeLabel: edgeLabel, + edgeObj: e, + rank: vRank + }; + const dummy = addDummyNode(g, 'edge', attrs, '_d'); + if (vRank === labelRank) { + attrs.width = edgeLabel.width; + attrs.height = edgeLabel.height; + attrs.dummy = 'edge-label'; + attrs.labelpos = edgeLabel.labelpos; + } + g.setEdge(v, dummy, { weight: edgeLabel.weight }, name); + if (first) { + g.state.dummyChains.push(dummy); + first = false; + } + v = dummy; + vRank++; + } + g.setEdge(v, w, { weight: edgeLabel.weight }, name); + } + } + }; + + const denormalize = (g) => { + for (let v of g.state.dummyChains) { + let label = g.node(v).label; + const edgeLabel = label.edgeLabel; + const e = label.edgeObj; + g.setEdge(e.v, e.w, edgeLabel, e.name); + while (label.dummy) { + const [w] = g.successors(v); + g.removeNode(v); + edgeLabel.points.push({ x: label.x, y: label.y }); + if (label.dummy === 'edge-label') { + edgeLabel.x = label.x; + edgeLabel.y = label.y; + edgeLabel.width = label.width; + edgeLabel.height = label.height; + } + v = w; + label = g.node(v).label; + } + } + }; + + const removeEdgeLabelProxies = (g) => { + for (const node of g.nodes.values()) { + const label = node.label; + if (label.dummy === 'edge-proxy') { + label.e.label.labelRank = label.rank; + g.removeNode(node.v); + } + } + }; + + const parentDummyChains = (g) => { + // Find a path from v to w through the lowest common ancestor (LCA). Return the full path and the LCA. + const findPath = (g, postorderNums, v, w) => { + const low = Math.min(postorderNums[v].low, postorderNums[w].low); + const lim = Math.max(postorderNums[v].lim, postorderNums[w].lim); + // Traverse up from v to find the LCA + let parent = v; + const vPath = []; + do { + parent = g.parent(parent); + vPath.push(parent); + } + while (parent && (postorderNums[parent].low > low || lim > postorderNums[parent].lim)); + const lca = parent; + // Traverse from w to LCA + parent = w; + const wPath = []; + while ((parent = g.parent(parent)) !== lca) { + wPath.push(parent); + } + return { path: vPath.concat(wPath.reverse()), lca: lca }; + }; + const postorder = (g) => { + const result = {}; + let lim = 0; + const dfs = (v) => { + const low = lim; + for (const u of g.children(v)) { + dfs(u); + } + result[v] = { low: low, lim: lim++ }; + }; + for (const v of g.children()) { + dfs(v); + } + return result; + }; + const postorderNums = postorder(g); + for (let v of g.state.dummyChains || []) { + const node = g.node(v).label; + const edgeObj = node.edgeObj; + const pathData = findPath(g, postorderNums, edgeObj.v, edgeObj.w); + const path = pathData.path; + const lca = pathData.lca; + let pathIdx = 0; + let pathV = path[pathIdx]; + let ascending = true; + while (v !== edgeObj.w) { + const node = g.node(v).label; + if (ascending) { + while ((pathV = path[pathIdx]) !== lca && g.node(pathV).label.maxRank < node.rank) { + pathIdx++; + } + if (pathV === lca) { + ascending = false; + } + } + if (!ascending) { + while (pathIdx < path.length - 1 && g.node(path[pathIdx + 1]).label.minRank <= node.rank) { + pathIdx++; + } + pathV = path[pathIdx]; + } + g.setParent(v, pathV); + [v] = g.successors(v); + } + } + }; + + const addBorderSegments = (g) => { + const addBorderNode = (g, prop, prefix, sg, sgNode, rank) => { + const label = { width: 0, height: 0, rank: rank, borderType: prop }; + const prev = sgNode[prop][rank - 1]; + const curr = addDummyNode(g, 'border', label, prefix); + sgNode[prop][rank] = curr; + g.setParent(curr, sg); + if (prev) { + g.setEdge(prev, curr, { weight: 1 }); + } + }; + const queue = g.children(); + while (queue.length > 0) { + const v = queue.shift(); + const node = g.node(v).label; + if ('minRank' in node) { + node.borderLeft = []; + node.borderRight = []; + const maxRank = node.maxRank + 1; + for (let rank = node.minRank; rank < maxRank; rank++) { + addBorderNode(g, 'borderLeft', '_bl', v, node, rank); + addBorderNode(g, 'borderRight', '_br', v, node, rank); + } + } + const children = g.children(v); + if (children.length) { + for (const v of children) { + queue.push(v); + } + } + } + }; + + // Applies heuristics to minimize edge crossings in the graph and sets the best order solution as an order attribute on each node. + // + // Pre-conditions: + // 1. Graph must be DAG + // 2. Graph nodes must have the 'rank' attribute + // 3. Graph edges must have the 'weight' attribute + // + // Post-conditions: + // 1. Graph nodes will have an 'order' attribute based on the results of the algorithm. + const order = (g) => { + const sortSubgraph = (g, v, cg, biasRight) => { + // Given a list of entries of the form {v, barycenter, weight} and a constraint graph this function will resolve any conflicts between the constraint graph and the barycenters for the entries. + // If the barycenters for an entry would violate a constraint in the constraint graph then we coalesce the nodes in the conflict into a new node that respects the contraint and aggregates barycenter and weight information. + // This implementation is based on the description in Forster, 'A Fast and Simple Hueristic for Constrained Two-Level Crossing Reduction,' thought it differs in some specific details. + // + // Pre-conditions: + // 1. Each entry has the form {v, barycenter, weight}, or if the node has no barycenter, then {v}. + // + // Returns: + // A new list of entries of the form {vs, i, barycenter, weight}. + // The list `vs` may either be a singleton or it may be an aggregation of nodes ordered such that they do not violate constraints from the constraint graph. + // The property `i` is the lowest original index of any of the elements in `vs`. + const resolveConflicts = (entries, cg) => { + const mappedEntries = new Map(); + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + const tmp = { indegree: 0, 'in': [], out: [], vs: [ entry.v ], i: i }; + if (entry.barycenter !== undefined) { + tmp.barycenter = entry.barycenter; + tmp.weight = entry.weight; + } + mappedEntries.set(entry.v, tmp); + } + for (const e of cg.edges.values()) { + const entryV = mappedEntries.get(e.v); + const entryW = mappedEntries.get(e.w); + if (entryV && entryW) { + entryW.indegree++; + entryV.out.push(entryW); + } + } + const sourceSet = Array.from(mappedEntries.values()).filter((entry) => !entry.indegree); + const results = []; + function handleIn(vEntry) { + return function(uEntry) { + if (uEntry.merged) { + return; + } + if (uEntry.barycenter === undefined || vEntry.barycenter === undefined || uEntry.barycenter >= vEntry.barycenter) { + let sum = 0; + let weight = 0; + if (vEntry.weight) { + sum += vEntry.barycenter * vEntry.weight; + weight += vEntry.weight; + } + if (uEntry.weight) { + sum += uEntry.barycenter * uEntry.weight; + weight += uEntry.weight; + } + vEntry.vs = uEntry.vs.concat(vEntry.vs); + vEntry.barycenter = sum / weight; + vEntry.weight = weight; + vEntry.i = Math.min(uEntry.i, vEntry.i); + uEntry.merged = true; + } + }; + } + function handleOut(vEntry) { + return function(wEntry) { + wEntry.in.push(vEntry); + if (--wEntry.indegree === 0) { + sourceSet.push(wEntry); + } + }; + } + while (sourceSet.length) { + const entry = sourceSet.pop(); + results.push(entry); + entry.in.reverse().forEach(handleIn(entry)); + entry.out.forEach(handleOut(entry)); + } + return results.filter((entry) => !entry.merged).map((entry) => { + const value = { + vs: entry.vs, + i: entry.i + }; + if (entry.barycenter !== undefined) { + value.barycenter = entry.barycenter; + } + if (entry.weight !== undefined) { + value.weight = entry.weight; + } + return value; + }); + }; + const barycenter = (g, movable) => { + return (movable || []).map((v) => { + const inV = g.node(v).in; + if (!inV.length) { + return { v: v }; + } + const result = inV.reduce((acc, e) => { + const edge = e.label; + const nodeU = e.vNode.label; + return { + sum: acc.sum + (edge.weight * nodeU.order), + weight: acc.weight + edge.weight + }; + }, { sum: 0, weight: 0 }); + return { + v: v, + barycenter: result.sum / result.weight, + weight: result.weight + }; + }); + }; + const sort = (entries, biasRight) => { + const consumeUnsortable = (vs, unsortable, index) => { + let last; + while (unsortable.length && (last = unsortable[unsortable.length - 1]).i <= index) { + unsortable.pop(); + vs.push(last.vs); + index++; + } + return index; + }; + const compareWithBias = (bias) => { + return function(entryV, entryW) { + if (entryV.barycenter < entryW.barycenter) { + return -1; + } else if (entryV.barycenter > entryW.barycenter) { + return 1; + } + return !bias ? entryV.i - entryW.i : entryW.i - entryV.i; + }; + }; + // partition + const parts = { lhs: [], rhs: [] }; + for (const value of entries) { + if ('barycenter' in value) { + parts.lhs.push(value); + } else { + parts.rhs.push(value); + } + } + const sortable = parts.lhs; + const unsortable = parts.rhs.sort((a, b) => -a.i + b.i); + const vs = []; + let sum = 0; + let weight = 0; + let vsIndex = 0; + sortable.sort(compareWithBias(!!biasRight)); + vsIndex = consumeUnsortable(vs, unsortable, vsIndex); + for (const entry of sortable) { + vsIndex += entry.vs.length; + vs.push(entry.vs); + sum += entry.barycenter * entry.weight; + weight += entry.weight; + vsIndex = consumeUnsortable(vs, unsortable, vsIndex); + } + const result = { vs: flat(vs) }; + if (weight) { + result.barycenter = sum / weight; + result.weight = weight; + } + return result; + }; + const node = g.node(v); + const bl = node && node.label ? node.label.borderLeft : undefined; + const br = node && node.label ? node.label.borderRight: undefined; + const subgraphs = {}; + const movable = bl ? g.children(v).filter((w) => w !== bl && w !== br) : g.children(v); + const barycenters = barycenter(g, movable); + for (const entry of barycenters) { + if (g.children(entry.v).length) { + const result = sortSubgraph(g, entry.v, cg, biasRight); + subgraphs[entry.v] = result; + if ('barycenter' in result) { + if (entry.barycenter !== undefined) { + entry.barycenter = (entry.barycenter * entry.weight + result.barycenter * result.weight) / (entry.weight + result.weight); + entry.weight += result.weight; + } else { + entry.barycenter = result.barycenter; + entry.weight = result.weight; + } + } + } + } + const entries = resolveConflicts(barycenters, cg); + // expand subgraphs + for (const entry of entries) { + entry.vs = flat(entry.vs.map((v) => subgraphs[v] ? subgraphs[v].vs : v)); + } + const result = sort(entries, biasRight); + if (bl) { + result.vs = flat([bl, result.vs, br]); + if (g.predecessors(bl).length) { + const blPred = g.node(g.predecessors(bl)[0]).label; + const brPred = g.node(g.predecessors(br)[0]).label; + if (!('barycenter' in result)) { + result.barycenter = 0; + result.weight = 0; + } + result.barycenter = (result.barycenter * result.weight + blPred.order + brPred.order) / (result.weight + 2); + result.weight += 2; + } + } + return result; + }; + const sweepLayerGraphs = (layerGraphs, biasRight) => { + const cg = new dagre.Graph(); + for (const lg of layerGraphs) { + const root = lg.state.root; + const sorted = sortSubgraph(lg, root, cg, biasRight); + const vs = sorted.vs; + const length = vs.length; + for (let i = 0; i < length; i++) { + lg.node(vs[i]).label.order = i; + } + // add subgraph constraints + const prev = {}; + let rootPrev; + let exit = false; + for (const v of vs) { + let child = lg.parent(v); + let prevChild; + while (child) { + const parent = lg.parent(child); + if (parent) { + prevChild = prev[parent]; + prev[parent] = child; + } else { + prevChild = rootPrev; + rootPrev = child; + } + if (prevChild && prevChild !== child) { + cg.setEdge(prevChild, child, null); + exit = true; + break; + } + child = parent; + } + if (exit) { + break; + } + } + } + }; + // A function that takes a layering (an array of layers, each with an array of + // ordererd nodes) and a graph and returns a weighted crossing count. + // + // Pre-conditions: + // 1. Input graph must be simple (not a multigraph), directed, and include + // only simple edges. + // 2. Edges in the input graph must have assigned weights. + // + // Post-conditions: + // 1. The graph and layering matrix are left unchanged. + // + // This algorithm is derived from Barth, et al., 'Bilayer Cross Counting.' + const crossCount = (g, layering) => { + let count = 0; + for (let i = 1; i < layering.length; i++) { + const northLayer = layering[i - 1]; + const southLayer = layering[i]; + // Sort all of the edges between the north and south layers by their position in the north layer and then the south. + // Map these edges to the position of their head in the south layer. + const southPos = {}; + for (let i = 0; i < southLayer.length; i++) { + southPos[southLayer[i]] = i; + } + const southEntries = []; + for (const v of northLayer) { + const entries = []; + for (const e of g.node(v).out) { + entries.push({ + pos: southPos[e.w], + weight: e.label.weight + }); + } + entries.sort((a, b) => a.pos - b.pos); + for (const entry of entries) { + southEntries.push(entry); + } + } + // Build the accumulator tree + let firstIndex = 1; + while (firstIndex < southLayer.length) { + firstIndex <<= 1; + } + const treeSize = 2 * firstIndex - 1; + firstIndex -= 1; + const tree = Array.from(new Array(treeSize), () => 0); + // Calculate the weighted crossings + for (const entry of southEntries) { + let index = entry.pos + firstIndex; + tree[index] += entry.weight; + let weightSum = 0; + while (index > 0) { + if (index % 2) { + weightSum += tree[index + 1]; + } + index = (index - 1) >> 1; + tree[index] += entry.weight; + } + count += entry.weight * weightSum; + } + } + return count; + }; + // Assigns an initial order value for each node by performing a DFS search + // starting from nodes in the first rank. Nodes are assigned an order in their + // rank as they are first visited. + // + // This approach comes from Gansner, et al., 'A Technique for Drawing Directed + // Graphs.' + // + // Returns a layering matrix with an array per layer and each layer sorted by + // the order of its nodes. + const initOrder = (g) => { + const visited = new Set(); + const nodes = Array.from(g.nodes.values()).filter((node) => g.children(node.v).length === 0); + let maxRank = undefined; + for (const node of nodes) { + const rank = node.label.rank; + if (maxRank === undefined || (rank !== undefined && rank > maxRank)) { + maxRank = rank; + } + } + if (maxRank !== undefined) { + const layers = Array.from(new Array(maxRank + 1), () => []); + const queue = nodes.sort((a, b) => a.label.rank - b.label.rank).map((node) => node.v).reverse(); + while (queue.length > 0) { + const v = queue.shift(); + if (!visited.has(v)) { + visited.add(v); + const rank = g.node(v).label.rank; + layers[rank].push(v); + for (const w of g.successors(v)) { + queue.push(w); + } + } + } + return layers; + } + return []; + }; + // Constructs a graph that can be used to sort a layer of nodes. + // The graph will contain all base and subgraph nodes from the request layer in their original + // hierarchy and any edges that are incident on these nodes and are of the type requested by the 'relationship' parameter. + // + // Nodes from the requested rank that do not have parents are assigned a root node in the output graph, + // which is set in the root graph attribute. + // This makes it easy to walk the hierarchy of movable nodes during ordering. + // + // Pre-conditions: + // 1. Input graph is a DAG + // 2. Base nodes in the input graph have a rank attribute + // 3. Subgraph nodes in the input graph has minRank and maxRank attributes + // 4. Edges have an assigned weight + // + // Post-conditions: + // 1. Output graph has all nodes in the movable rank with preserved hierarchy. + // 2. Root nodes in the movable layer are made children of the node + // indicated by the root attribute of the graph. + // 3. Non-movable nodes incident on movable nodes, selected by the + // relationship parameter, are included in the graph (without hierarchy). + // 4. Edges incident on movable nodes, selected by the relationship parameter, are added to the output graph. + // 5. The weights for copied edges are aggregated as need, since the output graph is not a multi-graph. + const buildLayerGraph = (g, nodes, rank, relationship) => { + let root; + while (g.hasNode((root = uniqueId('_root')))) { + // continue + } + const graph = new dagre.Graph({ compound: true }); + graph.state = { root: root }; + graph.setDefaultNodeLabel((v) => { + const node = g.node(v); + return node ? node.label : undefined; + }); + const length = nodes.length; + let i = 0; + while (i < length) { + const node = nodes[i++]; + const label = node.label; + if (label.rank === rank || 'minRank' in label && 'maxRank' in label && label.minRank <= rank && rank <= label.maxRank) { + const v = node.v; + graph.setNode(v); + const parent = g.parent(v); + graph.setParent(v, parent || root); + // This assumes we have only short edges! + if (relationship) { + for (const e of node.in) { + graph.setEdge(e.v, v, { weight: e.label.weight }); + } + } else { + for (const e of node.out) { + graph.setEdge(e.w, v, { weight: e.label.weight }); + } + } + if ('minRank' in label) { + graph.setNode(v, { + borderLeft: label.borderLeft[rank], + borderRight: label.borderRight[rank] + }); + } + } + } + return graph; + }; + let layering = initOrder(g); + const assignOrder = (g, layering) => { + for (const layer of layering) { + for (let i = 0; i < layer.length; i++) { + g.node(layer[i]).label.order = i; + } + } + }; + assignOrder(g, layering); + const rank = maxRank(g) || 0; + const downLayerGraphs = new Array(rank); + const upLayerGraphs = new Array(rank); + const nodes = Array.from(g.nodes.values()); + for (let i = 0; i < rank; i++) { + downLayerGraphs[i] = buildLayerGraph(g, nodes, i + 1, true); + upLayerGraphs[i] = buildLayerGraph(g, nodes, rank - i - 1, false); + } + let bestCC = Number.POSITIVE_INFINITY; + let best; + for (let i = 0, lastBest = 0; lastBest < 4; ++i, ++lastBest) { + sweepLayerGraphs(i % 2 ? downLayerGraphs : upLayerGraphs, i % 4 >= 2); + layering = buildLayerMatrix(g); + const cc = crossCount(g, layering); + if (cc < bestCC) { + lastBest = 0; + const length = layering.length; + best = new Array(length); + for (let j = 0; j < length; j++) { + best[j] = layering[j].slice(); + } + bestCC = cc; + } + } + assignOrder(g, best); + }; + + const insertSelfEdges = (g) => { + const layers = buildLayerMatrix(g); + for (const layer of layers) { + let orderShift = 0; + layer.forEach(function(v, i) { + const label = g.node(v).label; + label.order = i + orderShift; + if (label.selfEdges) { + for (const selfEdge of label.selfEdges) { + addDummyNode(g, 'selfedge', { + width: selfEdge.label.width, + height: selfEdge.label.height, + rank: label.rank, + order: i + (++orderShift), + e: selfEdge.e, + label: selfEdge.label + }, '_se'); + } + delete label.selfEdges; + } + }); + } + }; + + const coordinateSystem_swapWidthHeight = (g) => { + for (const node of g.nodes.values()) { + const label = node.label; + const w = label.width; + label.width = label.height; + label.height = w; + } + for (const e of g.edges.values()) { + const label = e.label; + const w = label.width; + label.width = label.height; + label.height = w; + } + }; + const coordinateSystem_adjust = (g) => { + const rankDir = g.layout.rankdir.toLowerCase(); + if (rankDir === 'lr' || rankDir === 'rl') { + coordinateSystem_swapWidthHeight(g); + } + }; + const coordinateSystem_undo = (g) => { + const rankDir = g.layout.rankdir.toLowerCase(); + if (rankDir === 'bt' || rankDir === 'rl') { + for (const node of g.nodes.values()) { + node.label.y = -node.label.y; + } + for (const e of g.edges.values()) { + const edge = e.label; + for (const attr of edge.points) { + attr.y = -attr.y; + } + if ('y' in edge) { + edge.y = -edge.y; + } + } + } + if (rankDir === 'lr' || rankDir === 'rl') { + const swapXYOne = (attrs) => { + const x = attrs.x; + attrs.x = attrs.y; + attrs.y = x; + }; + for (const node of g.nodes.values()) { + swapXYOne(node.label); + } + for (const e of g.edges.values()) { + const edge = e.label; + for (const e of edge.points) { + swapXYOne(e); + } + if (edge.x !== undefined) { + swapXYOne(edge); + } + } + coordinateSystem_swapWidthHeight(g); + } + }; + + const position = (g) => { + const addConflict = (conflicts, v, w) => { + if (v > w) { + const tmp = v; + v = w; + w = tmp; + } + let conflictsV = conflicts[v]; + if (!conflictsV) { + conflicts[v] = conflictsV = {}; + } + conflictsV[w] = true; + }; + const hasConflict = (conflicts, v, w) => { + if (v > w) { + const tmp = v; + v = w; + w = tmp; + } + return conflicts[v] && w in conflicts[v]; + }; + const buildBlockGraph = (g, layering, root, reverseSep) => { + const nodeSep = g.layout.nodesep; + const edgeSep = g.layout.edgesep; + const blockGraph = new dagre.Graph(); + for (const layer of layering) { + let u; + for (const v of layer) { + const vRoot = root[v]; + blockGraph.setNode(vRoot, {}); + if (u) { + const uRoot = root[u]; + const vLabel = g.node(v).label; + const wLabel = g.node(u).label; + let sum = 0; + let delta; + sum += vLabel.width / 2; + if ('labelpos' in vLabel) { + switch (vLabel.labelpos) { + case 'l': delta = -vLabel.width / 2; break; + case 'r': delta = vLabel.width / 2; break; + default: throw new dagre.Error(`Unsupported label position '${vLabel.labelpos}'.`); + } + } + if (delta) { + sum += reverseSep ? delta : -delta; + } + delta = 0; + sum += (vLabel.dummy ? edgeSep : nodeSep) / 2; + sum += (wLabel.dummy ? edgeSep : nodeSep) / 2; + sum += wLabel.width / 2; + if ('labelpos' in wLabel) { + switch (wLabel.labelpos) { + case 'l': delta = wLabel.width / 2; break; + case 'r': delta = -wLabel.width / 2; break; + default: throw new dagre.Error(`Unsupported label position '${wLabel.labelpos}'.`); + } + } + if (delta) { + sum += reverseSep ? delta : -delta; + } + const edge = blockGraph.edge(uRoot, vRoot); + const max = Math.max(sum, edge ? edge.label : 0); + if (edge) { + edge.label = max; + } else { + blockGraph.setEdge(uRoot, vRoot, max); + } + } + u = v; + } + } + return blockGraph; + }; + // Try to align nodes into vertical 'blocks' where possible. + // This algorithm attempts to align a node with one of its median neighbors. + // If the edge connecting a neighbor is a type-1 conflict then we ignore that possibility. + // If a previous node has already formed a block with a node after the node we're trying to form a block with, + // we also ignore that possibility - our blocks would be split in that scenario. + const verticalAlignment = (layering, conflicts, neighborFn) => { + const root = {}; + const align = {}; + const pos = {}; + // We cache the position here based on the layering because the graph and layering may be out of sync. + // The layering matrix is manipulated to generate different extreme alignments. + for (const layer of layering) { + let order = 0; + for (const v of layer) { + root[v] = v; + align[v] = v; + pos[v] = order; + order++; + } + } + for (const layer of layering) { + let prevIdx = -1; + for (const v of layer) { + let ws = neighborFn(v); + if (ws.length > 0) { + ws = ws.sort((a, b) => pos[a] - pos[b]); + const mp = (ws.length - 1) / 2.0; + const il = Math.ceil(mp); + for (let i = Math.floor(mp); i <= il; i++) { + const w = ws[i]; + if (align[v] === v && prevIdx < pos[w] && !hasConflict(conflicts, v, w)) { + align[w] = v; + align[v] = root[v] = root[w]; + prevIdx = pos[w]; + } + } + } + } + } + return { root: root, align: align }; + }; + const horizontalCompaction = (g, layering, root, align, reverseSep) => { + // This portion of the algorithm differs from BK due to a number of problems. + // Instead of their algorithm we construct a new block graph and do two sweeps. + const blockG = buildBlockGraph(g, layering, root, reverseSep); + const borderType = reverseSep ? 'borderLeft' : 'borderRight'; + const xs = {}; + // First pass, places blocks with the smallest possible coordinates. + if (blockG.nodes.size > 0) { + const stack = Array.from(blockG.nodes.keys()); + const visited = new Set(); + while (stack.length > 0) { + const v = stack.pop(); + if (visited.has(v)) { + let max = 0; + for (const e of blockG.node(v).in) { + max = Math.max(max, xs[e.v] + e.label); + } + xs[v] = max; + } else { + visited.add(v); + stack.push(v); + for (const w of blockG.predecessors(v)) { + stack.push(w); + } + } + } + } + // Second pass, removes unused space by moving blocks to the greatest coordinates without violating separation. + if (blockG.nodes.size > 0) { + const stack = Array.from(blockG.nodes.keys()); + const visited = new Set(); + while (stack.length > 0) { + const v = stack.pop(); + if (visited.has(v)) { + let min = Number.POSITIVE_INFINITY; + for (const e of blockG.node(v).out) { + min = Math.min(min, xs[e.w] - e.label); + } + const label = g.node(v).label; + if (min !== Number.POSITIVE_INFINITY && label.borderType !== borderType) { + xs[v] = Math.max(xs[v], min); + } + } else { + visited.add(v); + stack.push(v); + for (const w of blockG.successors(v)) { + stack.push(w); + } + } + } + } + // Assign x coordinates to all nodes + for (const v of Object.values(align)) { + xs[v] = xs[root[v]]; + } + return xs; + }; + // Marks all edges in the graph with a type-1 conflict with the 'type1Conflict' property. + // A type-1 conflict is one where a non-inner segment crosses an inner segment. + // An inner segment is an edge with both incident nodes marked with the 'dummy' property. + // + // This algorithm scans layer by layer, starting with the second, for type-1 + // conflicts between the current layer and the previous layer. For each layer + // it scans the nodes from left to right until it reaches one that is incident + // on an inner segment. It then scans predecessors to determine if they have + // edges that cross that inner segment. At the end a final scan is done for all + // nodes on the current rank to see if they cross the last visited inner segment. + // + // This algorithm (safely) assumes that a dummy node will only be incident on a + // single node in the layers being scanned. + const findType1Conflicts = (g, layering) => { + const conflicts = {}; + if (layering.length > 0) { + let [prev] = layering; + for (let k = 1; k < layering.length; k++) { + const layer = layering[k]; + // last visited node in the previous layer that is incident on an inner segment. + let k0 = 0; + // Tracks the last node in this layer scanned for crossings with a type-1 segment. + let scanPos = 0; + const prevLayerLength = prev.length; + const lastNode = layer[layer.length - 1]; + for (let i = 0; i < layer.length; i++) { + const v = layer[i]; + const w = g.node(v).label.dummy ? g.predecessors(v).find((u) => g.node(u).label.dummy) : null; + if (w || v === lastNode) { + const k1 = w ? g.node(w).label.order : prevLayerLength; + for (const scanNode of layer.slice(scanPos, i + 1)) { + // for (const scanNode of layer.slice(scanPos, scanPos + 1)) { + for (const u of g.predecessors(scanNode)) { + const uLabel = g.node(u).label; + const uPos = uLabel.order; + if ((uPos < k0 || k1 < uPos) && !(uLabel.dummy && g.node(scanNode).label.dummy)) { + addConflict(conflicts, u, scanNode); + } + } + } + // scanPos += 1; + scanPos = i + 1; + k0 = k1; + } + } + prev = layer; + } + } + return conflicts; + }; + const findType2Conflicts = (g, layering) => { + const conflicts = {}; + const scan = (south, southPos, southEnd, prevNorthBorder, nextNorthBorder) => { + let v; + for (let i = southPos; i < southEnd; i++) { + v = south[i]; + if (g.node(v).labeldummy) { + for (const u of g.predecessors(v)) { + const uNode = g.node(u).label; + if (uNode.dummy && (uNode.order < prevNorthBorder || uNode.order > nextNorthBorder)) { + addConflict(conflicts, u, v); + } + } + } + } + }; + if (layering.length > 0) { + let [north] = layering; + for (let i = 1; i < layering.length; i++) { + const south = layering[i]; + let prevNorthPos = -1; + let nextNorthPos; + let southPos = 0; + south.forEach(function(v, southLookahead) { + if (g.node(v).label.dummy === 'border') { + const predecessors = g.predecessors(v); + if (predecessors.length) { + nextNorthPos = g.node(predecessors[0]).label.order; + scan(south, southPos, southLookahead, prevNorthPos, nextNorthPos); + southPos = southLookahead; + prevNorthPos = nextNorthPos; + } + } + scan(south, southPos, south.length, nextNorthPos, north.length); + }); + north = south; + } + } + return conflicts; + }; + + g = asNonCompoundGraph(g); + const layering = buildLayerMatrix(g); + const ranksep = g.layout.ranksep; + // Assign y-coordinate based on rank + let y = 0; + for (const layer of layering) { + const maxHeight = layer.reduce((a, v) => Math.max(a, g.node(v).label.height), 0); + for (const v of layer) { + g.node(v).label.y = y + maxHeight / 2; + } + y += maxHeight + ranksep; + } + // Coordinate assignment based on Brandes and Köpf, 'Fast and Simple Horizontal Coordinate Assignment.' + const conflicts = Object.assign(findType1Conflicts(g, layering), findType2Conflicts(g, layering)); + const xss = {}; + for (const vertical of ['u', 'd']) { + let adjustedLayering = vertical === 'u' ? layering : Object.values(layering).reverse(); + for (const horizontal of ['l', 'r']) { + if (horizontal === 'r') { + adjustedLayering = adjustedLayering.map((layer) => Object.values(layer).reverse()); + } + const neighborFn = (vertical === 'u' ? g.predecessors : g.successors).bind(g); + const align = verticalAlignment(adjustedLayering, conflicts, neighborFn); + const xs = horizontalCompaction(g, adjustedLayering, align.root, align.align, horizontal === 'r'); + if (horizontal === 'r') { + for (const entry of Object.entries(xs)) { + xs[entry[0]] = -entry[1]; + } + } + xss[vertical + horizontal] = xs; + } + } + // Find smallest width alignment: Returns the alignment that has the smallest width of the given alignments. + let minWidth = Number.POSITIVE_INFINITY; + let minValue = undefined; + for (const xs of Object.values(xss)) { + let max = Number.NEGATIVE_INFINITY; + let min = Number.POSITIVE_INFINITY; + for (const [v, x] of Object.entries(xs)) { + const halfWidth = g.node(v).label.width / 2; + max = Math.max(x + halfWidth, max); + min = Math.min(x - halfWidth, min); + } + const width = max - min; + if (width < minWidth) { + minWidth = width; + minValue = xs; + } + } + // Align the coordinates of each of the layout alignments such that + // left-biased alignments have their minimum coordinate at the same point as + // the minimum coordinate of the smallest width alignment and right-biased + // alignments have their maximum coordinate at the same point as the maximum + // coordinate of the smallest width alignment. + const alignTo = minValue; + const range = (values) => { + let min = Number.POSITIVE_INFINITY; + let max = Number.NEGATIVE_INFINITY; + for (const value of values) { + if (value < min) { + min = value; + } + if (value > max) { + max = value; + } + } + return [ min, max ]; + }; + const alignToRange = range(Object.values(alignTo)); + for (const vertical of ['u', 'd']) { + for (const horizontal of ['l', 'r']) { + const alignment = vertical + horizontal; + const xs = xss[alignment]; + let delta; + if (xs !== alignTo) { + const vsValsRange = range(Object.values(xs)); + delta = horizontal === 'l' ? alignToRange[0] - vsValsRange[0] : alignToRange[1] - vsValsRange[1]; + if (delta) { + const list = {}; + for (const key of Object.keys(xs)) { + list[key] = xs[key] + delta; + } + xss[alignment] = list; + } + } + } + } + // balance + const align = g.layout.align; + if (align) { + const xs = xss[align.toLowerCase()]; + for (const v of Object.keys(xss.ul)) { + g.node(v).label.x = xs[v]; + } + } else { + for (const v of Object.keys(xss.ul)) { + const xs = [ xss.ul[v], xss.ur[v], xss.dl[v], xss.dr[v] ].sort((a, b) => a - b); + g.node(v).label.x = (xs[1] + xs[2]) / 2; + } + } + }; + + const positionSelfEdges = (g) => { + for (const node of g.nodes.values()) { + const label = node.label; + if (label.dummy === 'selfedge') { + const v = node.v; + const selfNode = g.node(label.e.v).label; + const x = selfNode.x + selfNode.width / 2; + const y = selfNode.y; + const dx = label.x - x; + const dy = selfNode.height / 2; + g.setEdge(label.e.v, label.e.w, label.label); + g.removeNode(v); + label.label.points = [ + { x: x + 2 * dx / 3, y: y - dy }, + { x: x + 5 * dx / 6, y: y - dy }, + { x: x + dx , y: y }, + { x: x + 5 * dx / 6, y: y + dy }, + { x: x + 2 * dx / 3, y: y + dy } + ]; + label.label.x = label.x; + label.label.y = label.y; + } + } + }; + + const removeBorderNodes = (g) => { + for (const node of g.nodes.values()) { + const v = node.v; + if (g.children(v).length) { + const label = node.label; + const t = g.node(label.borderTop).label; + const b = g.node(label.borderBottom).label; + const l = g.node(label.borderLeft[label.borderLeft.length - 1]).label; + const r = g.node(label.borderRight[label.borderRight.length - 1]).label; + label.width = Math.abs(r.x - l.x); + label.height = Math.abs(b.y - t.y); + label.x = l.x + label.width / 2; + label.y = t.y + label.height / 2; + } + } + for (const node of g.nodes.values()) { + if (node.label.dummy === 'border') { + g.removeNode(node.v); + } + } + }; + + const fixupEdgeLabelCoords = (g) => { + for (const e of g.edges.values()) { + const edge = e.label; + if ('x' in edge) { + if (edge.labelpos === 'l' || edge.labelpos === 'r') { + edge.width -= edge.labeloffset; + } + switch (edge.labelpos) { + case 'l': edge.x -= edge.width / 2 + edge.labeloffset; break; + case 'r': edge.x += edge.width / 2 + edge.labeloffset; break; + default: throw new dagre.Error(`Unsupported label position '${edge.labelpos}'.`); + } + } + } + }; + + const translateGraph = (g) => { + let minX = Number.POSITIVE_INFINITY; + let maxX = 0; + let minY = Number.POSITIVE_INFINITY; + let maxY = 0; + const getExtremes = (attrs) => { + const x = attrs.x; + const y = attrs.y; + const w = attrs.width; + const h = attrs.height; + minX = Math.min(minX, x - w / 2); + maxX = Math.max(maxX, x + w / 2); + minY = Math.min(minY, y - h / 2); + maxY = Math.max(maxY, y + h / 2); + }; + for (const node of g.nodes.values()) { + getExtremes(node.label); + } + for (const e of g.edges.values()) { + const edge = e.label; + if ('x' in edge) { + getExtremes(edge); + } + } + for (const node of g.nodes.values()) { + node.label.x -= minX; + node.label.y -= minY; + } + for (const e of g.edges.values()) { + const edge = e.label; + for (const p of edge.points) { + p.x -= minX; + p.y -= minY; + } + if ('x' in edge) { + edge.x -= minX; + } + if ('y' in edge) { + edge.y -= minY; + } + } + g.state.width = maxX - minX; + g.state.height = maxY - minY; + }; + + const assignNodeIntersects = (g) => { + // Finds where a line starting at point ({x, y}) would intersect a rectangle + // ({x, y, width, height}) if it were pointing at the rectangle's center. + const intersectRect = (rect, point) => { + const x = rect.x; + const y = rect.y; + // Rectangle intersection algorithm from: http://math.stackexchange.com/questions/108113/find-edge-between-two-boxes + const dx = point.x - x; + const dy = point.y - y; + if (dx === 0 && dy === 0) { + throw new Error('Not possible to find intersection inside of the rectangle'); + } + let w = rect.width / 2; + let h = rect.height / 2; + if (Math.abs(dy) * w > Math.abs(dx) * h) { + // Intersection is top or bottom of rect. + h = dy < 0 ? -h : h; + return { x: x + (h * dx / dy), y: y + h }; + } + // Intersection is left or right of rect. + w = dx < 0 ? -w : w; + return { x: x + w, y: y + (w * dy / dx) }; + }; + for (const e of g.edges.values()) { + const edge = e.label; + const vNode = e.vNode.label; + const wNode = e.wNode.label; + let p1; + let p2; + if (!edge.points) { + edge.points = []; + p1 = wNode; + p2 = vNode; + } else { + [p1] = edge.points; + p2 = edge.points[edge.points.length - 1]; + } + edge.points.unshift(intersectRect(vNode, p1)); + edge.points.push(intersectRect(wNode, p2)); + } + }; + + time(' makeSpaceForEdgeLabels', () => makeSpaceForEdgeLabels(g)); + time(' removeSelfEdges', () => removeSelfEdges(g)); + time(' acyclic_run', () => acyclic_run(g)); + time(' nestingGraph_run', () => nestingGraph_run(g)); + time(' rank', () => rank(asNonCompoundGraph(g))); + time(' injectEdgeLabelProxies', () => injectEdgeLabelProxies(g)); + time(' removeEmptyRanks', () => removeEmptyRanks(g)); + time(' nestingGraph_cleanup', () => nestingGraph_cleanup(g)); + time(' assignRankMinMax', () => assignRankMinMax(g)); + time(' removeEdgeLabelProxies', () => removeEdgeLabelProxies(g)); + time(' normalize', () => normalize(g)); + time(' parentDummyChains', () => parentDummyChains(g)); + time(' addBorderSegments', () => addBorderSegments(g)); + time(' order', () => order(g)); + time(' insertSelfEdges', () => insertSelfEdges(g)); + time(' coordinateSystem_adjust', () => coordinateSystem_adjust(g)); + time(' position', () => position(g)); + time(' positionSelfEdges', () => positionSelfEdges(g)); + time(' removeBorderNodes', () => removeBorderNodes(g)); + time(' denormalize', () => denormalize(g)); + time(' fixupEdgeLabelCoords', () => fixupEdgeLabelCoords(g)); + time(' coordinateSystem_undo', () => coordinateSystem_undo(g)); + time(' translateGraph', () => translateGraph(g)); + time(' assignNodeIntersects', () => assignNodeIntersects(g)); + time(' acyclic_undo', () => acyclic_undo(g)); + }; + + // Copies final layout information from the layout graph back to the input graph. + // This process only copies whitelisted attributes from the layout graph to the input graph, + // so it serves as a good place to determine what attributes can influence layout. + const updateSourceGraph = (graph, g) => { + for (const node of graph.nodes.values()) { + const label = node.label; + if (label) { + const v = node.v; + const layoutLabel = g.node(v).label; + label.x = layoutLabel.x; + label.y = layoutLabel.y; + if (g.children(v).length) { + label.width = layoutLabel.width; + label.height = layoutLabel.height; + } + } + } + for (const e of graph.edges.values()) { + const label = g.edge(e.v, e.w).label; + e.label.points = label.points; + if ('x' in label) { + e.label.x = label.x; + e.label.y = label.y; + } + } + graph.state = graph.state || {}; + graph.state.width = g.state.width; + graph.state.height = g.state.height; + }; + + time('layout', () => { + const layoutGraph = + time(' buildLayoutGraph', () => buildLayoutGraph(graph)); + time(' runLayout', () => runLayout(layoutGraph, time)); + time(' updateSourceGraph', () => updateSourceGraph(graph, layoutGraph)); + }); +}; + +dagre.Graph = class { + + constructor(layout) { + layout = layout || {}; + this._directed = 'directed' in layout ? layout.directed : true; + this._compound = 'compound' in layout ? layout.compound : false; + this.state = {}; + this.layout = {}; + this._defaultNodeLabelFn = () => { + return undefined; + }; + this.nodes = new Map(); + this.edges = new Map(); + if (this._compound) { + this._parent = {}; + this._children = {}; + this._children['\x00'] = {}; + } + } + + isDirected() { + return this._directed; + } + + isCompound() { + return this._compound; + } + + setDefaultNodeLabel(newDefault) { + this._defaultNodeLabelFn = newDefault; + } + + setNode(v, label) { + const node = this.nodes.get(v); + if (node) { + if (label) { + node.label = label; + } + } else { + const node = { label: label ? label : this._defaultNodeLabelFn(v), in: [], out: [], predecessors: {}, successors: {}, v: v }; + this.nodes.set(v, node); + if (this._compound) { + this._parent[v] = '\x00'; + this._children[v] = {}; + this._children['\x00'][v] = true; + } + } + } + + node(v) { + return this.nodes.get(v); + } + + hasNode(v) { + return this.nodes.has(v); + } + + removeNode(v) { + const node = this.nodes.get(v); + if (node) { + if (this._compound) { + delete this._children[this._parent[v]][v]; + delete this._parent[v]; + for (const child of this.children(v)) { + this.setParent(child); + } + delete this._children[v]; + } + for (const edge of node.in) { + this.removeEdge(edge); + } + for (const edge of node.out) { + this.removeEdge(edge); + } + this.nodes.delete(v); + } + } + + setParent(v, parent) { + if (!this._compound) { + throw new Error('Cannot set parent in a non-compound graph'); + } + if (parent) { + for (let ancestor = parent; ancestor !== undefined; ancestor = this.parent(ancestor)) { + if (ancestor === v) { + throw new Error(`Setting ${parent} as parent of ${v} would create a cycle.`); + } + } + this.setNode(parent); + } else { + parent = '\x00'; + } + delete this._children[this._parent[v]][v]; + this._parent[v] = parent; + this._children[parent][v] = true; + } + + parent(v) { + if (this._compound) { + const parent = this._parent[v]; + if (parent !== '\x00') { + return parent; + } + } + return null; + } + + children(v) { + if (this._compound) { + return Object.keys(this._children[v === undefined ? '\x00' : v]); + } else if (v === undefined) { + return this.nodes.keys(); + } else if (this.hasNode(v)) { + return []; + } + return null; + } + + predecessors(v) { + return Object.keys(this.nodes.get(v).predecessors); + } + + successors(v) { + return Object.keys(this.nodes.get(v).successors); + } + + neighbors(v) { + return Array.from(new Set(this.predecessors(v).concat(this.successors(v)))); + } + + edge(v, w) { + return this.edges.get(this._edgeKey(this._directed, v, w)); + } + + setEdge(v, w, label, name) { + const key = this._edgeKey(this._directed, v, w, name); + const edge = this.edges.get(key); + if (edge) { + edge.label = label; + } else { + if (!this._directed && v > w) { + const tmp = v; + v = w; + w = tmp; + } + const edge = { label: label, v: v, w: w, name: name, key: key, vNode: null, wNode: null }; + this.edges.set(key, edge); + this.setNode(v); + this.setNode(w); + const wNode = this.nodes.get(w); + const vNode = this.nodes.get(v); + edge.wNode = wNode; + edge.vNode = vNode; + const incrementOrInitEntry = (map, k) => { + if (map[k]) { + map[k]++; + } else { + map[k] = 1; + } + }; + incrementOrInitEntry(wNode.predecessors, v); + incrementOrInitEntry(vNode.successors, w); + wNode.in.push(edge); + vNode.out.push(edge); + } + } + + removeEdge(edge) { + const key = edge.key; + const v = edge.v; + const w = edge.w; + const decrementOrRemoveEntry = (map, k) => { + if (!--map[k]) { + delete map[k]; + } + }; + const wNode = edge.wNode; + const vNode = edge.vNode; + decrementOrRemoveEntry(wNode.predecessors, v); + decrementOrRemoveEntry(vNode.successors, w); + wNode.in = wNode.in.filter((edge) => edge.key !== key); + vNode.out = vNode.out.filter((edge) => edge.key !== key); + this.edges.delete(key); + } + + _edgeKey(isDirected, v, w, name) { + if (!isDirected && v > w) { + return name ? `${w}:${v}:${name}` : `${w}:${v}:`; + } + return name ? `${v}:${w}:${name}` : `${v}:${w}:`; + } + + toString() { + return [ + '[nodes]', Array.from(this.nodes.values()).map((n) => JSON.stringify(n.label)).join('\n'), + '[edges]', Array.from(this.edges.values()).map((e) => JSON.stringify(e.label)).join('\n'), + '[parents]', JSON.stringify(this._parent, null, 2), + '[children]', JSON.stringify(this._children, null, 2) + ].join('\n'); + } +}; + +export const { layout, Graph } = dagre; diff --git a/darknet-metadata.json b/darknet-metadata.json new file mode 100644 index 00000000000..9e19d0b48cd --- /dev/null +++ b/darknet-metadata.json @@ -0,0 +1,481 @@ +[ + { + "name": "avgpool", + "category": "Pool" + }, + { + "name": "batchnorm", + "category": "Normalization" + }, + { + "name": "connected", + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + }, + { + "name": "conv_lstm", + "category": "Layer", + "attributes": [ + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "size", "type": "int32", "default": 3 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "dilation", "default": 1 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "default": 0 }, + { "name": "output", "type": "int32", "default": 1 }, + { "name": "state_constrain", "type": "int32", "default": 16 }, + { "name": "peephole", "type": "int32", "default": 0 } + ] + }, + { + "name": "convolutional", + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "stride_x", "type": "int32", "default": -1 }, + { "name": "stride_y", "type": "int32", "default": -1 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "dilation", "default": 1 }, + { "name": "share_index", "default": -1000000000 }, + { "name": "binary", "type": "int32", "default": 0 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "bin_output", "type": "int32", "default": 0 }, + { "name": "flipped", "type": "int32", "default": 0 }, + { "name": "dot", "type": "float32", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + }, + { + "name": "cost", + "category": "Activation", + "attributes": [ + { "name": "type", "type": "string", "default": "sse" }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "ratio", "type": "float32", "default": 0 } + ] + }, + { + "name": "crnn", + "category": "Layer", + "attributes": [ + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic" }, + { "name": "dilation", "default": 1 }, + { "name": "padding", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "type": "int32", "default": 0 }, + { "name": "output_filters", "default": 1 }, + { "name": "hidden_filters", "default": 1 } + ] + }, + { + "name": "crop", + "category": "Shape", + "attributes": [ + { "name": "crop_height", "type": "int32", "default": 1 }, + { "name": "crop_width", "type": "int32", "default": 1 }, + { "name": "flip", "type": "int32", "default": 0 }, + { "name": "exposure", "type": "float32", "default": 1 }, + { "name": "saturation", "type": "float32", "default": 1 }, + { "name": "angle", "type": "float32", "default": 0 }, + { "name": "noadjust", "default": 0 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "deconvolutional", + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "visible": false, "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + }, + { + "name": "detection", + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 1 }, + { "name": "coord", "type": "int32", "default": 1 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "coord_scale", "type": "float32", "default": 1 }, + { "name": "object_scale", "type": "float32", "default": 1 }, + { "name": "noobject_scale", "type": "float32", "default": 1 }, + { "name": "class_scale", "type": "float32", "default": 1 }, + { "name": "forced", "type": "int32", "default": 0 }, + { "name": "side", "type": "int32", "default": 7 }, + { "name": "softmax", "type": "int32", "default": 0 }, + { "name": "sqrt", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 30 }, + { "name": "rescore", "type": "int32", "default": 0 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "reorg", "type": "int32", "default": 0 } + ] + }, + { + "name": "dropout", + "category": "Dropout", + "attributes": [ + { "name": "probability", "type": "float32", "default": 0.5 }, + { "name": "dropblock", "type": "int32", "default": 0 }, + { "name": "dropblock_size_rel", "type": "float32", "default": 0 }, + { "name": "dropblock_size_abs ", "type": "int32", "default": 7 } + ] + }, + { + "name": "elu", + "category": "Activation" + }, + { + "name": "gaussian_yolo", + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "mask", "type": "string", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "label_smooth_eps", "type": "float32", "default": 0 }, + { "name": "scale_x_y", "type": "float32", "default": 1 }, + { "name": "uc_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_normalizer", "type": "float32", "default": 0.75 }, + { "name": "cls_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_loss", "type": "string", "default": "mse", "description": "options are: mse, giou, diou, and ciou" }, + { "name": "max", "default": 90 }, + { "name": "ignore_thresh", "type": "float32", "default": 0.5 }, + { "name": "truth_thresh", "default": 1 }, + { "name": "iou_thresh", "type": "float32", "default": 1, "description": "recommended to use iou_thresh=0.213" }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "beta_nms", "type": "float32", "default": 0.6 }, + { "name": "nms_kind", "type": "string", "default": "default", "description": "options are: greedynms, diounms, cornersnms, or defaultnms" }, + { "name": "anchors", "type": "string", "default": 0 }, + { "name": "yolo_point", "type": "string", "default": "center", "description": "options are: center, left_top, and right_bottom" } + ] + }, + { + "name": "gru", + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + }, + { + "name": "h_swish", + "category": "Activation" + }, + { + "name": "hardtan", + "category": "Activation" + }, + { + "name": "leaky", + "category": "Activation" + }, + { + "name": "lhtan", + "category": "Activation" + }, + { + "name": "linear", + "category": "Activation" + }, + { + "name": "local", + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "visible": false, "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + }, + { + "name": "loggy", + "category": "Activation" + }, + { + "name": "logistic", + "category": "Activation" + }, + { + "name": "lstm", + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + }, + { + "name": "maxpool", + "category": "Pool", + "attributes": [ + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "stride_x", "type": "int32", "default": 1 }, + { "name": "stride_y", "type": "int32", "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "maxpool_depth", "type": "int32", "default": 0 }, + { "name": "out_channels", "default": 1 }, + { "name": "antialiasing", "type": "int32", "default": 0 } + ] + }, + { + "name": "mish", + "category": "Activation" + }, + { + "name": "net", + "attributes": [ + { "name": "batch", "type": "int32", "default": 1 }, + { "name": "max_batches", "type": "int32", "default": 0, "description": "Limits the maximum number of iterations" }, + { "name": "learning_rate", "type": "float32", "default": 0.001 }, + { "name": "momentum", "type": "float32", "default": 0.9 }, + { "name": "decay", "type": "float32", "default": 0.0001 }, + { "name": "subdivisions", "type": "int32", "default": 1, "description": "In concert with batch property, this greatly affect memory usage, minimal working number is recommended" }, + { "name": "time_steps", "type": "int32", "default": 1 }, + { "name": "notruth", "type": "int32", "default": 0 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "adam", "type": "int32", "default": 0 }, + { "name": "B1", "type": "float32", "default": 0.9 }, + { "name": "B2", "type": "float32", "default": 0.999 }, + { "name": "eps", "type": "float32", "default": 1e-7 }, + { "name": "height", "type": "int32", "default": 0 }, + { "name": "width", "type": "int32", "default": 0 }, + { "name": "channels", "type": "int32", "default": 0 }, + { "name": "inputs", "type": "int32" }, + { "name": "max_crop", "type": "int32" }, + { "name": "min_crop", "type": "int32" }, + { "name": "max_ratio", "type": "float32" }, + { "name": "min_ratio", "type": "float32" }, + { "name": "center", "type": "int32", "default": 0 }, + { "name": "clip", "type": "int32", "default": 0 }, + { "name": "angle", "type": "float32", "default": 0 }, + { "name": "aspect", "type": "float32", "default": 1 }, + { "name": "saturation", "type": "float32", "default": 1 }, + { "name": "exposure", "type": "float32", "default": 1 }, + { "name": "hue", "type": "float32", "default": 0 }, + { "name": "power", "type": "float32", "default": 4 }, + { "name": "flip", "type": "int32", "default": 1, "description": "Enables augmentation method: horizontal flip" }, + { "name": "blur", "type": "int32", "default": 0, "description": "Enables augmentation method: backgound blurring" }, + { "name": "mixup", "type": "int32", "default": 0, "description": "Enables augmentation method: images mixup" }, + { "name": "cutmix", "type": "int32", "default": 0, "description": "Enables augmentation method: images cutmix" }, + { "name": "mosaic", "type": "int32", "default": 0, "description": "Enables augmentation method: images mosaicing" }, + { "name": "letter_box", "type": "int32", "default": 0, "description": "Enables letter-box resizing (keeping the aspect ratio)" }, + { "name": "policy", "type": "string", "default": "constant" }, + { "name": "burn_in", "type": "int32", "default": 0, "description": "Is used for MAP calculation: permit a minimal number of iteration before first MAP check" }, + { "name": "letter_box", "type": "int32", "default": 0 }, + { "name": "optimized_memory", "type": "int32", "default": 0, "description": "can offload memory from GPU into CPU at the cost of speed, 3 options are possible please look at: https://github.com/AlexeyAB/darknet/issues/4386" }, + { "name": "workspace_size_limit_MB", "type": "float32", "default": 1024 } + ] + }, + { + "name": "norm_chan", + "category": "Activation" + }, + { + "name": "norm_chan_softmax", + "category": "Activation" + }, + { + "name": "normalization", + "category": "Normalization", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 }, + { "name": "kappa", "type": "float32", "default": 1 }, + { "name": "size", "default": 5 } + ] + }, + { + "name": "plse", + "category": "Activation" + }, + { + "name": "ramp", + "category": "Activation" + }, + { + "name": "region", + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "coord", "default": 4 }, + { "name": "num", "default": 1 }, + { "name": "mask", "type": "string", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "classfix", "type": "int32", "default": 0 }, + { "name": "coord_scale", "type": "float32", "default": 1 }, + { "name": "object_scale", "type": "float32", "default": 1 }, + { "name": "noobject_scale", "type": "float32", "default": 1 }, + { "name": "mask_scale", "type": "float32", "default": 1 }, + { "name": "class_scale", "type": "float32", "default": 1 }, + { "name": "bias_match", "type": "int32", "default": 0 }, + { "name": "focal_loss", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 90 }, + { "name": "softmax", "type": "int32", "default": 0 }, + { "name": "rescore", "type": "int32", "default": 0 }, + { "name": "thresh", "type": "float32", "default": 0.5 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "tree", "type": "string", "default": 0 }, + { "name": "anchors", "type": "string", "default": 0 }, + { "name": "absolute", "default": 0 }, + { "name": "log", "default": 0 }, + { "name": "sqrt", "default": 0 } + ] + }, + { + "name": "relie", + "category": "Activation" + }, + { + "name": "relu", + "category": "Activation" + }, + { + "name": "relu6", + "category": "Activation" + }, + { + "name": "reorg", + "category": "Shape", + "attributes": [ + { "name": "stride", "default": 1 }, + { "name": "reverse", "type": "int32", "default": 0 } + ] + }, + { + "name": "rnn", + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false }, + { "name": "hidden", "visible": false, "default": 1 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "default": 0 }, + { "name": "logistic", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + }, + { + "name": "route", + "category": "Tensor", + "attributes": [ + { "name": "groups_id", "type": "int32", "default": 0 }, + { "name": "groups", "type": "int32", "default": 1 } + ] + }, + { + "name": "sam", + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU" }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + }, + { + "name": "scale_channels", + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU" }, + { "name": "scale_wh", "default": 0 }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + }, + { + "name": "selu", + "category": "Activation" + }, + { + "name": "silu", + "category": "Activation" + }, + { + "name": "shortcut", + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "assisted_excitation", "default": 0 }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + }, + { + "name": "softmax", + "category": "Activation", + "attributes": [ + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "temperature", "type": "float32", "default": 1 }, + { "name": "tree", "type": "string", "default": 0 }, + { "name": "spatial", "type": "int32", "default": 0 }, + { "name": "noloss", "type": "int32", "default": 0 } + ] + }, + { + "name": "stair", + "category": "Activation" + }, + { + "name": "swish", + "category": "Activation" + }, + { + "name": "tanh", + "category": "Activation" + }, + { + "name": "upsample", + "category": "Data", + "attributes": [ + { "name": "stride", "type": "int32", "default": 2 }, + { "name": "scale", "type": "float32", "default": 1 } + ] + }, + { + "name": "yolo", + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "mask", "type": "int32[]", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "label_smooth_eps", "type": "float32", "default": 0 }, + { "name": "scale_x_y", "type": "float32", "default": 1 }, + { "name": "iou_normalizer", "type": "float32", "default": 0.75 }, + { "name": "cls_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_loss", "type": "string", "default": "mse", "description": "options are: mse, giou, diou, and ciou" }, + { "name": "focal_loss", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 90 }, + { "name": "ignore_thresh", "type": "float32", "default": 0.5 }, + { "name": "truth_thresh", "type": "float32", "default": 1 }, + { "name": "iou_thresh", "type": "float32", "default": 1, "description": "recommended to use iou_thresh=0.213" }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "nms_kind", "type": "string", "default": "default", "description": "options are: greedynms, diounms, cornersnms, or defaultnms" }, + { "name": "anchors", "type": "int32[]", "default": 0 } + ] + } +] \ No newline at end of file diff --git a/darknet.js b/darknet.js new file mode 100644 index 00000000000..436eb898324 --- /dev/null +++ b/darknet.js @@ -0,0 +1,1104 @@ + +import * as text from './text.js'; + +const darknet = {}; + +darknet.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'weights') { + const weights = darknet.Weights.open(context); + if (weights) { + return { name: 'darknet.weights', value: weights }; + } + return undefined; + } + try { + const reader = text.Reader.open(context.stream, 65536); + for (let line = reader.read(); line !== undefined; line = reader.read()) { + const content = line.trim(); + if (content.length > 0 && !content.startsWith('#')) { + if (content.startsWith('[') && content.endsWith(']')) { + return { name: 'darknet.model', value: context.stream }; + } + return undefined; + } + } + } catch (err) { + // continue regardless of error + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('darknet-metadata.json'); + const identifier = context.identifier; + const parts = identifier.split('.'); + parts.pop(); + const basename = parts.join('.'); + switch (target.name) { + case 'darknet.weights': { + const weights = target.value; + const name = `${basename}.cfg`; + const content = await context.fetch(name); + const reader = new darknet.Reader(content.stream, content.identifier); + return new darknet.Model(metadata, reader, weights); + } + case 'darknet.model': { + try { + const name = `${basename}.weights`; + const content = await context.fetch(name); + const weights = darknet.Weights.open(content); + const reader = new darknet.Reader(context.stream, context.identifier); + return new darknet.Model(metadata, reader, weights); + } catch (error) { + const reader = new darknet.Reader(context.stream, context.identifier); + return new darknet.Model(metadata, reader, null); + } + } + default: { + throw new darknet.Error(`Unsupported Darknet format '${target}'.`); + } + } + } +}; + +darknet.Model = class { + + constructor(metadata, reader, weights) { + this._graphs = [ new darknet.Graph(metadata, reader, weights) ]; + } + + get format() { + return 'Darknet'; + } + + get graphs() { + return this._graphs; + } +}; + +darknet.Graph = class { + + constructor(metadata, reader, weights) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + const params = {}; + const sections = reader.read(); + const globals = new Map(); + const net = sections.shift(); + const option_find_int = (options, key, defaultValue) => { + let value = options[key]; + if (typeof value === 'string' && value.startsWith('$')) { + const key = value.substring(1); + value = globals.has(key) ? globals.get(key) : value; + } + if (value !== undefined) { + const number = parseInt(value, 10); + if (!Number.isInteger(number)) { + throw new darknet.Error(`Invalid int option '${JSON.stringify(options[key])}'.`); + } + return number; + } + return defaultValue; + }; + const option_find_str = (options, key, defaultValue) => { + const value = options[key]; + return value !== undefined ? value : defaultValue; + }; + const make_shape = (dimensions, source) => { + if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) { + throw new darknet.Error(`Invalid tensor shape '${JSON.stringify(dimensions)}' in '${source}'.`); + } + return new darknet.TensorShape(dimensions); + }; + const load_weights = (name, shape, visible) => { + const data = weights ? weights.read(4 * shape.reduce((a, b) => a * b, 1)) : null; + const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights')); + const initializer = new darknet.Tensor(type, data); + const value = new darknet.Value('', null, initializer); + return new darknet.Argument(name, visible === false ? false : true, [ value ]); + }; + const load_batch_normalize_weights = (layer, prefix, size) => { + layer.weights.push(load_weights(`${prefix}scale`, [ size ], prefix === '')); + layer.weights.push(load_weights(`${prefix}mean`, [ size ], prefix === '')); + layer.weights.push(load_weights(`${prefix}variance`, [ size ], prefix === '')); + }; + const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => { + layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1; + layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1; + layer.out_c = n; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.weights.push(load_weights(`${prefix}biases`, [ n ], prefix === '')); + if (batch_normalize) { + if (prefix) { + load_batch_normalize_weights(layer, prefix, n); + } else { + const batchnorm_layer = { weights: [] }; + load_batch_normalize_weights(batchnorm_layer, prefix, n); + layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer }); + } + } + layer.weights.push(load_weights(`${prefix}weights`, [ Math.floor(c / groups), n, size, size ], prefix === '')); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer')); + }; + const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => { + layer.out_h = 1; + layer.out_w = 1; + layer.out_c = outputs; + layer.out = outputs; + layer.weights.push(load_weights(`${prefix}biases`, [ outputs ], prefix === '')); + if (batch_normalize) { + if (prefix) { + load_batch_normalize_weights(layer, prefix, outputs); + } else { + const batchnorm_layer = { weights: [] }; + load_batch_normalize_weights(batchnorm_layer, prefix, outputs); + layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer }); + } + } + layer.weights.push(load_weights(`${prefix}weights`, [ inputs, outputs ], prefix === '')); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer')); + }; + if (sections.length === 0) { + throw new darknet.Error('Config file has no sections.'); + } + switch (net.type) { + case 'net': + case 'network': { + params.h = option_find_int(net.options, 'height', 0); + params.w = option_find_int(net.options, 'width', 0); + params.c = option_find_int(net.options, 'channels', 0); + params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c); + for (const key of Object.keys(net.options)) { + globals.set(key, net.options[key]); + } + break; + } + default: { + throw new darknet.Error(`Unexpected '[${net.type}]' section. First section must be [net] or [network].`); + } + } + const inputType = params.w && params.h && params.c ? + new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) : + new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else')); + const inputName = 'input'; + params.value = [ new darknet.Value(inputName, inputType, null) ]; + this._inputs.push(new darknet.Argument(inputName, true, params.value)); + for (let i = 0; i < sections.length; i++) { + const section = sections[i]; + section.name = i.toString(); + section.layer = { + inputs: [], + weights: [], + outputs: [ new darknet.Value(section.name, null, null) ], + chain: [] + }; + } + let infer = true; + for (let i = 0; i < sections.length; i++) { + const section = sections[i]; + const options = section.options; + const layer = section.layer; + layer.inputs.push(...params.value); + switch (section.type) { + case 'shortcut': { + let remove = true; + const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : []; + for (const route of from) { + const index = route < 0 ? i + route : route; + const exists = index >= 0 && index < sections.length; + remove = exists && remove; + if (exists) { + const source = sections[index].layer; + layer.inputs.push(source.outputs[0]); + } + } + if (remove) { + delete options.from; + } + break; + } + case 'sam': + case 'scale_channels': { + const from = option_find_int(options, 'from', 0); + const index = from < 0 ? i + from : from; + if (index >= 0 && index < sections.length) { + const source = sections[index].layer; + layer.from = source; + layer.inputs.push(source.outputs[0]); + delete options.from; + } + break; + } + case 'route': { + layer.inputs = []; + layer.layers = []; + let remove = true; + const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : []; + for (const route of routes) { + const index = route < 0 ? i + route : route; + const exists = index >= 0 && index < sections.length; + remove = exists && remove; + if (exists) { + const source = sections[index].layer; + layer.inputs.push(source.outputs[0]); + layer.layers.push(source); + } + } + if (remove) { + delete options.layers; + } + break; + } + default: + break; + } + if (infer) { + switch (section.type) { + case 'conv': + case 'convolutional': + case 'deconvolutional': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before convolutional layer must output image.'); + } + const size = option_find_int(options, 'size', 1); + const n = option_find_int(options, 'filters', 1); + const pad = option_find_int(options, 'pad', 0); + const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0); + let stride_x = option_find_int(options, 'stride_x', -1); + let stride_y = option_find_int(options, 'stride_y', -1); + if (stride_x < 1 || stride_y < 1) { + const stride = option_find_int(options, 'stride', 1); + stride_x = stride_x < 1 ? stride : stride_x; + stride_y = stride_y < 1 ? stride : stride_y; + } + const groups = option_find_int(options, 'groups', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize); + if (activation !== 'logistic' && activation !== 'none') { + layer.chain.push({ type: activation }); + } + break; + } + case 'connected': { + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + make_connected_layer(layer, '', params.inputs, outputs, batch_normalize); + if (activation !== 'logistic' && activation !== 'none') { + layer.chain.push({ type: activation }); + } + break; + } + case 'local': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before avgpool layer must output image.'); + } + const n = option_find_int(options, 'filters' , 1); + const size = option_find_int(options, 'size', 1); + const stride = option_find_int(options, 'stride', 1); + const pad = option_find_int(options, 'pad', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1; + layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1; + layer.out_c = n; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ])); + layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ])); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local')); + if (activation !== 'logistic' && activation !== 'none') { + layer.chain.push({ type: activation }); + } + break; + } + case 'batchnorm': { + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = params.c; + layer.out = layer.in; + load_batch_normalize_weights(layer, '', layer.out_c); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'batchnorm')); + break; + } + case 'activation': { + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = params.c; + layer.out = layer.in; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'activation')); + break; + } + case 'max': + case 'maxpool': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before maxpool layer must output image.'); + } + const antialiasing = option_find_int(options, 'antialiasing', 0); + const stride = option_find_int(options, 'stride', 1); + const blur_stride_x = option_find_int(options, 'stride_x', stride); + const blur_stride_y = option_find_int(options, 'stride_y', stride); + const stride_x = antialiasing ? 1 : blur_stride_x; + const stride_y = antialiasing ? 1 : blur_stride_y; + const size = option_find_int(options, 'size', stride); + const padding = option_find_int(options, 'padding', size - 1); + const out_channels = option_find_int(options, 'out_channels', 1); + const maxpool_depth = option_find_int(options, 'maxpool_depth', 0); + if (maxpool_depth) { + layer.out_c = out_channels; + layer.out_w = params.w; + layer.out_h = params.h; + } else { + layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1; + layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1; + layer.out_c = params.c; + } + if (antialiasing) { + const blur_size = antialiasing === 2 ? 2 : 3; + const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3); + layer.input_layer = { weights: [], outputs: layer.outputs, chain: [] }; + make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0); + layer.out_w = layer.input_layer.out_w; + layer.out_h = layer.input_layer.out_h; + layer.out_c = layer.input_layer.out_c; + } else { + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool')); + } + layer.out = layer.out_w * layer.out_h * layer.out_c; + break; + } + case 'avgpool': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before avgpool layer must output image.'); + } + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = params.c; + layer.out = layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool')); + break; + } + case 'crnn': { + const size = option_find_int(options, 'size', 3); + const stride = option_find_int(options, 'stride', 1); + const output_filters = option_find_int(options, 'output', 1); + const hidden_filters = option_find_int(options, 'hidden', 1); + const groups = option_find_int(options, 'groups', 1); + const pad = option_find_int(options, 'pad', 0); + const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.input_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize); + layer.self_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize); + layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] }; + make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.input_layer.weights); + layer.weights = layer.weights.concat(layer.self_layer.weights); + layer.weights = layer.weights.concat(layer.output_layer.weights); + layer.out_h = layer.output_layer.out_h; + layer.out_w = layer.output_layer.out_w; + layer.out_c = output_filters; + layer.out = layer.output_layer.out; + break; + } + case 'rnn': { + const outputs = option_find_int(options, 'output', 1); + const hidden = option_find_int(options, 'hidden', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const inputs = params.inputs; + layer.input_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize); + layer.self_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize); + layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] }; + make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.input_layer.weights); + layer.weights = layer.weights.concat(layer.self_layer.weights); + layer.weights = layer.weights.concat(layer.output_layer.weights); + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = outputs; + layer.out = outputs; + break; + } + case 'gru': { + const inputs = params.inputs; + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.input_z_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize); + layer.state_z_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize); + layer.input_r_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize); + layer.state_r_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize); + layer.input_h_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize); + layer.state_h_layer = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.input_z_layer.weights); + layer.weights = layer.weights.concat(layer.state_z_layer.weights); + layer.weights = layer.weights.concat(layer.input_r_layer.weights); + layer.weights = layer.weights.concat(layer.state_r_layer.weights); + layer.weights = layer.weights.concat(layer.input_h_layer.weights); + layer.weights = layer.weights.concat(layer.state_h_layer.weights); + layer.out = outputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru')); + break; + } + case 'lstm': { + const inputs = params.inputs; + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.uf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize); + layer.ui = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize); + layer.ug = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize); + layer.uo = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize); + layer.wf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize); + layer.wi = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize); + layer.wg = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize); + layer.wo = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.uf.weights); + layer.weights = layer.weights.concat(layer.ui.weights); + layer.weights = layer.weights.concat(layer.ug.weights); + layer.weights = layer.weights.concat(layer.uo.weights); + layer.weights = layer.weights.concat(layer.wf.weights); + layer.weights = layer.weights.concat(layer.wi.weights); + layer.weights = layer.weights.concat(layer.wg.weights); + layer.weights = layer.weights.concat(layer.wo.weights); + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = outputs; + layer.out = outputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm')); + weights = null; + break; + } + case 'conv_lstm': { + const size = option_find_int(options, "size", 3); + const stride = option_find_int(options, "stride", 1); + const output_filters = option_find_int(options, "output", 1); + const groups = option_find_int(options, "groups", 1); + const pad = option_find_int(options, "pad", 0); + const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const bottleneck = option_find_int(options, "bottleneck", 0); + const peephole = option_find_int(options, "peephole", 0); + layer.uf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.uf, 'uf_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.ui = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.ui, 'ui_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.ug = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.ug, 'ug_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.uo = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.uo, 'uo_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.uf.weights); + layer.weights = layer.weights.concat(layer.ui.weights); + layer.weights = layer.weights.concat(layer.ug.weights); + layer.weights = layer.weights.concat(layer.uo.weights); + if (bottleneck) { + layer.wf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters * 2, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.wf.weights); + } else { + layer.wf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.wi = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.wi, 'wi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.wg = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.wg, 'wg_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.wo = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.wo, 'wo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.wf.weights); + layer.weights = layer.weights.concat(layer.wi.weights); + layer.weights = layer.weights.concat(layer.wg.weights); + layer.weights = layer.weights.concat(layer.wo.weights); + } + if (peephole) { + layer.vf = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.vf, 'vf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.vi = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.vi, 'vi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.vo = { weights: [], outputs: [ new darknet.Value('', null, null) ], chain: [] }; + make_convolutional_layer(layer.vo, 'vo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.vf.weights); + layer.weights = layer.weights.concat(layer.vi.weights); + layer.weights = layer.weights.concat(layer.vo.weights); + } + layer.out_h = layer.uo.out_h; + layer.out_w = layer.uo.out_w; + layer.out_c = output_filters; + layer.out = layer.out_h * layer.out_w * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'conv_lstm')); + break; + } + case 'softmax': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax')); + break; + } + case 'dropout': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout')); + break; + } + case 'upsample': { + const stride = option_find_int(options, 'stride', 2); + layer.out_w = params.w * stride; + layer.out_h = params.h * stride; + layer.out_c = params.c; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample')); + break; + } + case 'crop': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before crop layer must output image.'); + } + const crop_height = option_find_int(options, 'crop_height', 1); + const crop_width = option_find_int(options, 'crop_width', 1); + layer.out_w = crop_width; + layer.out_h = crop_height; + layer.out_c = params.c; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop')); + break; + } + case 'yolo': { + const classes = option_find_int(options, 'classes', 20); + const n = option_find_int(options, 'num', 1); + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = n * (classes + 4 + 1); + layer.out = layer.out_h * layer.out_w * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo')); + break; + } + case 'Gaussian_yolo': { + const classes = option_find_int(options, 'classes', 20); + const n = option_find_int(options, 'num', 1); + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = n * (classes + 8 + 1); + layer.out = layer.out_h * layer.out_w * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo')); + break; + } + case 'region': { + const coords = option_find_int(options, 'coords', 4); + const classes = option_find_int(options, 'classes', 20); + const num = option_find_int(options, 'num', 1); + layer.out = params.h * params.w * num * (classes + coords + 1); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region')); + break; + } + case 'cost': { + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost')); + break; + } + case 'reorg': { + const stride = option_find_int(options, 'stride', 1); + const reverse = option_find_int(options, 'reverse', 0); + const extra = option_find_int(options, 'extra', 0); + if (reverse) { + layer.out_w = params.w * stride; + layer.out_h = params.h * stride; + layer.out_c = Math.floor(params.c / (stride * stride)); + layer.out = layer.out_h * layer.out_w * layer.out_c; + } else { + layer.out_w = Math.floor(params.w / stride); + layer.out_h = Math.floor(params.h / stride); + layer.out_c = params.c * (stride * stride); + layer.out = layer.out_h * layer.out_w * layer.out_c; + } + if (extra) { + layer.out_w = 0; + layer.out_h = 0; + layer.out_c = 0; + layer.out = (params.h * params.w * params.c) + extra; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg')); + } else { + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'reorg')); + } + break; + } + case 'route': { + const layers = [].concat(layer.layers); + const groups = option_find_int(options, 'groups', 1); + layer.out = 0; + for (const next of layers) { + layer.out += next.outputs / groups; + } + if (layers.length > 0) { + const first = layers.shift(); + layer.out_w = first.out_w; + layer.out_h = first.out_h; + layer.out_c = first.out_c / groups; + while (layers.length > 0) { + const next = layers.shift(); + if (next.out_w === first.out_w && next.out_h === first.out_h) { + layer.out_c += next.out_c; + continue; + } + infer = false; + break; + } + if (infer) { + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route')); + } + } else { + infer = false; + } + if (!infer) { + layer.out_h = 0; + layer.out_w = 0; + layer.out_c = 0; + } + break; + } + case 'sam': + case 'scale_channels': { + const activation = option_find_str(options, 'activation', 'linear'); + const from = layer.from; + if (from) { + layer.out_w = from.out_w; + layer.out_h = from.out_h; + layer.out_c = from.out_c; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'shortcut|scale_channels|sam')); + } + if (activation !== 'linear' && activation !== 'none') { + layer.chain.push({ type: activation }); + } + break; + } + case 'shortcut': { + const activation = option_find_str(options, 'activation', 'linear'); + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.w * params.h * params.c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam')); + if (activation !== 'linear' && activation !== 'none') { + layer.chain.push({ type: activation }); + } + break; + } + case 'detection': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection')); + break; + } + default: { + infer = false; + break; + } + } + params.h = layer.out_h; + params.w = layer.out_w; + params.c = layer.out_c; + params.inputs = layer.out; + params.last = section; + } + params.value = layer.outputs; + } + + for (let i = 0; i < sections.length; i++) { + this._nodes.push(new darknet.Node(metadata, net, sections[i])); + } + + if (weights) { + weights.validate(); + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +darknet.Argument = class { + + constructor(name, visible, value) { + this._name = name; + this._visible = visible; + this._value = value; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get value() { + return this._value; + } +}; + +darknet.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new darknet.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type; + this._initializer = initializer; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + set type(value) { + if (this._type) { + throw new darknet.Error('Invalid argument type set operation.'); + } + this._type = value; + } + + get initializer() { + return this._initializer; + } +}; + +darknet.Node = class { + + constructor(metadata, net, section) { + this._name = section.name || ''; + this._location = section.line !== undefined ? section.line.toString() : undefined; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._chain = []; + const type = section.type; + this._type = metadata.type(type) || { name: type }; + const layer = section.layer; + if (layer && layer.inputs && layer.inputs.length > 0) { + this._inputs.push(new darknet.Argument(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs)); + } + if (layer && layer.weights && layer.weights.length > 0) { + this._inputs = this._inputs.concat(layer.weights); + } + if (layer && layer.outputs && layer.outputs.length > 0) { + this._outputs.push(new darknet.Argument(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs)); + } + if (layer && layer.chain) { + for (const chain of layer.chain) { + this._chain.push(new darknet.Node(metadata, net, chain, '')); + } + } + const options = section.options; + if (options) { + for (const key of Object.keys(options)) { + this._attributes.push(new darknet.Attribute(metadata.attribute(type, key), key, options[key])); + } + } + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } +}; + +darknet.Attribute = class { + + constructor(metadata, name, value) { + this._name = name; + this._value = value; + if (metadata) { + this._type = metadata.type || ''; + switch (this._type) { + case '': + case 'string': { + break; + } + case 'int32': { + const number = parseInt(this._value, 10); + if (Number.isInteger(number)) { + this._value = number; + } + break; + } + case 'float32': { + const number = parseFloat(this._value); + if (!isNaN(number)) { + this._value = number; + } + break; + } + case 'int32[]': { + const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10)); + if (numbers.every((number) => Number.isInteger(number))) { + this._value = numbers; + } + break; + } + default: { + throw new darknet.Error(`Unsupported attribute type '${this._type}'.`); + } + } + if (metadata && metadata.visible === false) { + this._visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (this._value == metadata.default) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +darknet.Tensor = class { + + constructor(type, data) { + this._type = type; + this._values = data; + } + + get name() { + return ''; + } + + get type() { + return this._type; + } + + get values() { + return this._values; + } +}; + +darknet.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this._dataType || '?') + this._shape.toString(); + } +}; + +darknet.TensorShape = class { + + constructor(dimensions) { + if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) { + throw new darknet.Error(`Invalid tensor shape '${JSON.stringify(dimensions)}'.`); + } + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +darknet.Reader = class { + + constructor(stream, identifier) { + this.stream = stream; + this.identifier = identifier; + } + + read() { + // read_cfg + const sections = []; + let section = null; + const reader = text.Reader.open(this.stream); + let lineNumber = 0; + const setup = /^setup.*\.cfg$/.test(this.identifier); + for (let content = reader.read(); content !== undefined; content = reader.read()) { + lineNumber++; + const line = content.replace(/\s/g, ''); + if (line.length > 0) { + switch (line[0]) { + case '#': + case ';': + break; + case '[': { + const type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1); + if (setup) { + if (type === 'metadata' || type === 'global' || type === 'wheel' || + type === 'isort' || type === 'flake8' || type === 'build_ext' || + type.startsWith('bdist_') || type.startsWith('tool:') || type.startsWith('coverage:')) { + throw new darknet.Error('Invalid file content. File contains Python setup configuration data.'); + } + } + section = { + line: lineNumber, + type: type, + options: {} + }; + sections.push(section); + break; + } + default: { + if (!section || line[0] < 0x20 || line[0] > 0x7E) { + throw new darknet.Error(`Invalid cfg '${content.replace(/[^\x20-\x7E]+/g, '?').trim()}' at line ${lineNumber}.`); + } + const index = line.indexOf('='); + if (index < 0) { + throw new darknet.Error(`Invalid cfg '${content.replace(/[^\x20-\x7E]+/g, '?').trim()}' at line ${lineNumber}.`); + } + const key = line.substring(0, index); + const value = line.substring(index + 1); + section.options[key] = value; + break; + } + } + } + } + return sections; + } +}; + +darknet.Weights = class { + + static open(context) { + const reader = context.reader; + if (reader && reader.length >= 20) { + const major = reader.int32(); + const minor = reader.int32(); + reader.int32(); // revision + const transpose = (major > 1000) || (minor > 1000); + if (!transpose) { + reader.skip((major * 10 + minor) >= 2 ? 8 : 4); + return new darknet.Weights(reader); + } + reader.seek(0); + } + return null; + } + + constructor(reader) { + this._reader = reader; + } + + read(size) { + return this._reader.read(size); + } + + validate() { + if (this._reader.position != this._reader.length) { + throw new darknet.Error('Invalid weights size.'); + } + } +}; + +darknet.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Darknet model.'; + } +}; + +export const ModelFactory = darknet.ModelFactory; + diff --git a/dl4j-metadata.json b/dl4j-metadata.json new file mode 100644 index 00000000000..ac1ca3e4f72 --- /dev/null +++ b/dl4j-metadata.json @@ -0,0 +1,85 @@ +[ + { + "name": "BatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "eps" }, + { "name": "gamma" }, + { "name": "decay" } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "attributes": [ + { "name": "dilation" }, + { "name": "kernelSize" }, + { "name": "padding" } + ] + }, + { + "name": "Dense", + "category": "Layer", + "attributes": [] + }, + { + "name": "Dropout", + "category": "Dropout", + "attributes": [] + }, + { + "name": "GlobalPooling", + "category": "Pool", + "attributes": [] + }, + { + "name": "LReLU", + "category": "Activation", + "attributes": [] + }, + { + "name": "Merge", + "category": "Tensor", + "attributes": [] + }, + { + "name": "Output", + "category": "Layer", + "attributes": [] + }, + { + "name": "ReLU", + "category": "Activation", + "attributes": [] + }, + { + "name": "SeparableConvolution2D", + "category": "Layer", + "attributes": [] + }, + { + "name": "Sigmoid", + "category": "Activation", + "attributes": [] + }, + { + "name": "Softmax", + "category": "Activation", + "attributes": [] + }, + { + "name": "Subsampling", + "category": "Layer", + "attributes": [] + }, + { + "name": "TanH", + "category": "Activation", + "attributes": [] + }, + { + "name": "Upsampling2D", + "category": "Layer", + "attributes": [] + } +] \ No newline at end of file diff --git a/dl4j.js b/dl4j.js new file mode 100644 index 00000000000..df5d3c49c95 --- /dev/null +++ b/dl4j.js @@ -0,0 +1,402 @@ + +// Experimental + +import * as base from './base.js'; + +const dl4j = {}; + +dl4j.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + if (identifier === 'configuration.json') { + const obj = context.peek('json'); + if (obj && (obj.confs || obj.vertices)) { + return 'dl4j.configuration'; + } + } + if (identifier === 'coefficients.bin') { + const signature = [ 0x00, 0x07, 0x4A, 0x41, 0x56, 0x41, 0x43, 0x50, 0x50 ]; // JAVACPP + const stream = context.stream; + if (signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'dl4j.coefficients'; + } + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('dl4j-metadata.json'); + switch (target) { + case 'dl4j.configuration': { + const obj = context.peek('json'); + try { + const content = await context.fetch('coefficients.bin'); + const buffer = content.stream.peek(); + return new dl4j.Model(metadata, obj, buffer); + } catch (error) { + return new dl4j.Model(metadata, obj, null); + } + } + case 'dl4j.coefficients': { + const content = await context.fetch('configuration.json'); + const obj = content.read('json'); + return new dl4j.Model(metadata, obj, context.stream.peek()); + } + default: { + throw new dl4j.Error(`Unsupported Deeplearning4j format '${target}'.`); + } + } + } +}; + +dl4j.Model = class { + + constructor(metadata, configuration, coefficients) { + this.format = 'Deeplearning4j'; + this.graphs = [ new dl4j.Graph(metadata, configuration, coefficients) ]; + } +}; + +dl4j.Graph = class { + + constructor(metadata, configuration, coefficients) { + this.inputs = []; + this.outputs =[]; + this.nodes = []; + coefficients = coefficients ? new dl4j.NDArray(coefficients) : null; + const dataType = coefficients ? coefficients.dataType : '?'; + const values = new Map(); + const value = (name, type, tensor) => { + if (name.length === 0 && tensor) { + return new dl4j.Value(name, type || null, tensor); + } + if (!values.has(name)) { + values.set(name, new dl4j.Value(name, type || null, tensor || null)); + } else if (type || tensor) { + throw new dl4j.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + if (configuration.networkInputs) { + for (const input of configuration.networkInputs) { + const argument = new dl4j.Argument(input, [ value(input) ]); + this.inputs.push(argument); + } + } + if (configuration.networkOutputs) { + for (const output of configuration.networkOutputs) { + const argument = new dl4j.Argument(output, [ value(output) ]); + this.outputs.push(argument); + } + } + let inputs = null; + // Computation Graph + if (configuration.vertices) { + for (const name in configuration.vertices) { + const vertex = dl4j.Node._object(configuration.vertices[name]); + inputs = configuration.vertexInputs[name]; + let variables = []; + let layer = null; + switch (vertex.__type__) { + case 'LayerVertex': + layer = dl4j.Node._object(vertex.layerConf.layer); + variables = vertex.layerConf.variables; + break; + case 'MergeVertex': + layer = { __type__: 'Merge', layerName: name }; + break; + case 'ElementWiseVertex': + layer = { __type__: 'ElementWise', layerName: name, op: vertex.op }; + break; + case 'PreprocessorVertex': + layer = { __type__: 'Preprocessor', layerName: name }; + break; + default: + throw new dl4j.Error(`Unsupported vertex class '${vertex['@class']}'.`); + } + this.nodes.push(new dl4j.Node(metadata, layer, inputs, dataType, variables, value)); + } + } + // Multi Layer Network + if (configuration.confs) { + inputs = [ 'input' ]; + this.inputs.push(new dl4j.Argument('input', [ value('input') ])); + for (const conf of configuration.confs) { + const layer = dl4j.Node._object(conf.layer); + this.nodes.push(new dl4j.Node(metadata, layer, inputs, dataType, conf.variables, value)); + inputs = [ layer.layerName ]; + } + this.outputs.push(new dl4j.Argument('output', [ value(inputs[0]) ])); + } + } +}; + +dl4j.Argument = class { + + constructor(name, value, visible) { + this.name = name; + this.value = value; + if (visible === false) { + this.visible = false; + } + } +}; + +dl4j.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new dl4j.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : type; + this.initializer = initializer; + } +}; + +dl4j.Node = class { + + constructor(metadata, layer, inputs, dataType, variables, value) { + this.name = layer.layerName || ''; + this.inputs = []; + this.outputs = []; + this.attributes = []; + const type = layer.__type__; + this.type = metadata.type(type) || { name: type }; + if (inputs && inputs.length > 0) { + const values = inputs.map((input) => value(input)); + const argument = new dl4j.Argument(values.length < 2 ? 'input' : 'inputs', values); + this.inputs.push(argument); + } + if (variables) { + for (const variable of variables) { + let tensor = null; + switch (type) { + case 'Convolution': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, layer.kernelSize.concat([ layer.nin, layer.nout ])); + break; + case 'b': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error(`Unsupported '${type}' variable '${variable}'.`); + } + break; + case 'SeparableConvolution2D': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, layer.kernelSize.concat([ layer.nin, layer.nout ])); + break; + case 'pW': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error(`Unsupported '${type}' variable '${variable}'.`); + } + break; + case 'Output': + case 'Dense': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, [ layer.nout, layer.nin ]); + break; + case 'b': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error(`Unsupported '${this.type}' variable '${variable}'.`); + } + break; + case 'BatchNormalization': + tensor = new dl4j.Tensor(dataType, [ layer.nin ]); + break; + default: + throw new dl4j.Error(`Unsupported '${type}' variable '${variable}'.`); + } + const argument = new dl4j.Argument(variable, [ value('', null, tensor) ]); + this.inputs.push(argument); + } + } + if (this.name) { + const argument = new dl4j.Argument('output', [ value(this.name) ]); + this.outputs.push(argument); + } + let attributes = layer; + if (layer.activationFn) { + const activation = dl4j.Node._object(layer.activationFn); + if (activation.__type__ !== 'ActivationIdentity' && activation.__type__ !== 'Identity') { + if (activation.__type__.startsWith('Activation')) { + activation.__type__ = activation.__type__.substring('Activation'.length); + } + if (this.type == 'Activation') { + this.type = activation.__type__; + attributes = activation; + } else { + this.chain = this.chain || []; + this.chain.push(new dl4j.Node(metadata, activation, [], null, null, value)); + } + } + } + for (const [name, value] of Object.entries(attributes)) { + switch (name) { + case '__type__': + case 'constraints': + case 'layerName': + case 'activationFn': + case 'idropout': + case 'hasBias': + continue; + default: + break; + } + const definition = metadata.attribute(type, name); + const visible = definition && definition.visible === false ? false : true; + const attribute = new dl4j.Argument(name, value, visible); + this.attributes.push(attribute); + } + if (layer.idropout) { + const dropout = dl4j.Node._object(layer.idropout); + if (dropout.p !== 1.0) { + throw new dl4j.Error("Layer 'idropout' not implemented."); + } + } + } + + static _object(value) { + let result = {}; + if (value['@class']) { + result = value; + let type = value['@class'].split('.').pop(); + if (type.endsWith('Layer')) { + type = type.substring(0, type.length - 5); + } + delete value['@class']; + result.__type__ = type; + } else { + let [key] = Object.keys(value); + result = value[key]; + if (key.length > 0) { + key = key[0].toUpperCase() + key.substring(1); + } + result.__type__ = key; + } + return result; + } +}; + +dl4j.Tensor = class { + + constructor(dataType, shape) { + this.type = new dl4j.TensorType(dataType, new dl4j.TensorShape(shape)); + } +}; + +dl4j.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +dl4j.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + if (this.dimensions) { + if (this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +dl4j.NDArray = class { + + constructor(buffer) { + const reader = new dl4j.BinaryReader(buffer); + const readHeader = (reader) => { + const alloc = reader.string(); + let length = 0; + switch (alloc) { + case 'DIRECT': + case 'HEAP': + case 'JAVACPP': + length = reader.int32(); + break; + case 'LONG_SHAPE': + case 'MIXED_DATA_TYPES': + length = reader.int64(); + break; + default: + throw new dl4j.Error(`Unsupported header alloc '${alloc}'.`); + } + const type = reader.string(); + return [ alloc, length, type ]; + }; + const headerShape = readHeader(reader); + if (headerShape[2] !== 'INT') { + throw new dl4j.Error(`Unsupported header shape type '${headerShape[2]}'.`); + } + const shapeInfo = new Array(headerShape[1]); + for (let i = 0; i < shapeInfo.length; i++) { + shapeInfo[i] = reader.int32(); + } + const [rank] = shapeInfo; + const shapeInfoLength = rank * 2 + 4; + this.shape = shapeInfo.slice(1, 1 + rank); + this.strides = shapeInfo.slice(1 + rank, 1 + (rank * 2)); + this.order = shapeInfo[shapeInfoLength - 1]; + const headerData = readHeader(reader); + const dataTypes = new Map([ + [ 'INT', [ 'int32', 4 ] ], + [ 'FLOAT', [ 'float32', 4 ] ], + [ 'DOUBLE', [ 'float64', 8 ] ] + ]); + if (!dataTypes.has(headerData[2])) { + throw new dl4j.Error(`Unsupported header data type '${headerShape[2]}'.`); + } + const [dataType, itemSize] = dataTypes.get(headerData[2]); + this.dataType = dataType; + const size = headerData[1] * itemSize; + if ((reader.position + size) <= reader.length) { + this.data = reader.read(size); + } + } +}; + +dl4j.BinaryReader = class extends base.BinaryReader { + + constructor(buffer) { + super(buffer, false); + } + + string() { + const size = this.uint16(); + const buffer = this.read(size); + this._decoder = this._decoder || new TextDecoder('ascii'); + return this._decoder.decode(buffer); + } +}; + +dl4j.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Deeplearning4j model.'; + } +}; + +export const ModelFactory = dl4j.ModelFactory; diff --git a/dlc-metadata.json b/dlc-metadata.json new file mode 100644 index 00000000000..e5cc3806c93 --- /dev/null +++ b/dlc-metadata.json @@ -0,0 +1,141 @@ +[ + { + "name": "Convolutional:v3", + "category": "Layer" + }, + { + "name": "Deconvolution:v3", + "category": "Layer" + }, + { + "name": "BatchNorm:v3", + "category": "Normalization" + }, + { + "name": "Concat:v3", + "category": "Tensor" + }, + { + "name": "Gather:v3", + "category": "Transform" + }, + { + "name": "FullyConnected:v3", + "category": "Layer" + }, + { + "name": "Neuron:v3", + "category": "Activation", + "attributes": [ + { "name": "type", "type": "Activation" } + ] + }, + { + "name": "Reshape:v3", + "category": "Shape" + }, + { + "name": "Permute:v3", + "category": "Shape" + }, + { + "name": "Pooling:v3", + "category": "Pool" + }, + { + "name": "SoftMax:v3", + "category": "Activation" + }, + { + "name": "StridedSlice:v3", + "category": "Tensor" + }, + { + "name": "Slice:v3", + "category": "Tensor" + }, + { + "name": "Pool:v4", + "category": "Pool" + }, + { + "name": "Softmax:v4", + "category": "Activation" + }, + { + "name": "Sigmoid:v4", + "category": "Activation" + }, + { + "name": "Conv2d:v4", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "DepthWiseConv2d:v4", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "FullyConnected:v4", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "Concat:v4", + "category": "Tensor" + }, + { + "name": "Split:v4", + "category": "Tensor" + }, + { + "name": "Reshape:v4", + "category": "Shape" + }, + { + "name": "Neuron:v4", + "category": "Activation" + }, + { + "name": "Transpose:v4", + "category": "Transform" + }, + { + "name": "StridedSlice:v4", + "category": "Tensor" + }, + { + "name": "Pad:v4", + "category": "Shape" + }, + { + "name": "Gather:v4", + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "indices" } + ] + }, + { + "name": "Batchnorm:v4", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "mean" }, + { "name": "variance" } + ] + } +] \ No newline at end of file diff --git a/dlc-schema.js b/dlc-schema.js new file mode 100644 index 00000000000..b1f62704202 --- /dev/null +++ b/dlc-schema.js @@ -0,0 +1,289 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('dlc'); + +$root.dlc = $root.dlc || {}; + +$root.dlc.v3 = $root.dlc.v3 || {}; + +$root.dlc.v3.Model = class Model { + + static decode(reader, position) { + const $ = new $root.dlc.v3.Model(); + $.unk1 = reader.int32_(position, 4, 0); + $.nodes = reader.tableArray(position, 6, $root.dlc.v3.Node.decode); + $.unk2 = reader.typedArray(position, 8, Int32Array); + $.unk3 = reader.typedArray(position, 10, Int32Array); + $.attributes = reader.tableArray(position, 12, $root.dlc.v3.Attribute.decode); + return $; + } +}; + +$root.dlc.v3.Node = class Node { + + static decode(reader, position) { + const $ = new $root.dlc.v3.Node(); + $.index = reader.int32_(position, 4, 0); + $.name = reader.string_(position, 6, null); + $.type = reader.string_(position, 8, null); + $.inputs = reader.strings_(position, 10); + $.outputs = reader.strings_(position, 12); + $.attributes = reader.tableArray(position, 14, $root.dlc.v3.Attribute.decode); + return $; + } +}; + +$root.dlc.v3.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.dlc.v3.Tensor(); + $.name = reader.string_(position, 4, null); + $.shape = reader.typedArray(position, 6, Int32Array); + $.data = reader.table(position, 8, $root.dlc.v3.TensorData.decode); + $.attributes = reader.tableArray(position, 10, $root.dlc.v3.Attribute.decode); + return $; + } +}; + +$root.dlc.v3.TensorData = class TensorData { + + static decode(reader, position) { + const $ = new $root.dlc.v3.TensorData(); + $.dtype = reader.uint8_(position, 4, 0); + $.bytes = reader.typedArray(position, 6, Uint8Array); + $.floats = reader.typedArray(position, 8, Float32Array); + return $; + } +}; + +$root.dlc.v3.Attribute = class Attribute { + + static decode(reader, position) { + const $ = new $root.dlc.v3.Attribute(); + $.name = reader.string_(position, 4, null); + $.type = reader.uint8_(position, 6, 0); + $.bool_value = reader.bool_(position, 8, false); + $.int32_value = reader.int32_(position, 10, 0); + $.uint32_value = reader.uint32_(position, 12, 0); + $.float32_value = reader.float32_(position, 14, 0); + $.string_value = reader.string_(position, 16, null); + $.unk6 = reader.typedArray(position, 18, Int8Array); + $.byte_list = reader.typedArray(position, 20, Int8Array); + $.int32_list = reader.typedArray(position, 22, Int32Array); + $.float32_list = reader.typedArray(position, 24, Float32Array); + $.unk10 = reader.typedArray(position, 26, Int8Array); + $.attributes = reader.tableArray(position, 28, $root.dlc.v3.Attribute.decode); + return $; + } +}; + +$root.dlc.v3.Activation = { + ReLU: 1, + Sigmoid: 3 +}; + +$root.dlc.v3.ModelParameters = class ModelParameters { + + static decode(reader, position) { + const $ = new $root.dlc.v3.ModelParameters(); + $.nodes = reader.tableArray(position, 4, $root.dlc.v3.NodeParameters.decode); + return $; + } +}; + +$root.dlc.v3.NodeParameters = class NodeParameters { + + static decode(reader, position) { + const $ = new $root.dlc.v3.NodeParameters(); + $.name = reader.string_(position, 4, null); + $.weights = reader.tableArray(position, 6, $root.dlc.v3.Tensor.decode); + return $; + } +}; + +$root.dlc = $root.dlc || {}; + +$root.dlc.v4 = $root.dlc.v4 || {}; + +$root.dlc.v4.Model = class Model { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Model(); + $.graphs = reader.tableArray(position, 4, $root.dlc.v4.Graph.decode); + return $; + } +}; + +$root.dlc.v4.Graph = class Graph { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Graph(); + $.name = reader.string_(position, 4, null); + $.nodes = reader.tableArray(position, 6, $root.dlc.v4.Node.decode); + $.tensors = reader.tableArray(position, 8, $root.dlc.v4.Tensor.decode); + return $; + } +}; + +$root.dlc.v4.Node = class Node { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Node(); + $.name = reader.string_(position, 4, null); + $.type = reader.string_(position, 6, null); + $.inputs = reader.strings_(position, 8); + $.outputs = reader.strings_(position, 10); + $.attributes = reader.tableArray(position, 12, $root.dlc.v4.Attribute.decode); + return $; + } +}; + +$root.dlc.v4.Attribute = class Attribute { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Attribute(); + $.name = reader.string_(position, 4, null); + $.kind = reader.int32_(position, 6, 0); + $.flag = reader.uint8_(position, 8, 0); + $.value = reader.table(position, 10, $root.dlc.v4.Value.decode); + $.tensor = reader.table(position, 12, $root.dlc.v4.Tensor.decode); + return $; + } +}; + +$root.dlc.v4.Value = class Value { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Value(); + $.kind = reader.int32_(position, 4, 0); + $.int32_value = reader.int32_(position, 6, 0); + $.float32_value = reader.float32_(position, 8, 0); + $.string_value = reader.string_(position, 10, null); + return $; + } +}; + +$root.dlc.v4.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Tensor(); + $.unk1 = reader.uint32_(position, 4, 0); + $.name = reader.string_(position, 6, null); + $.location = reader.int32_(position, 8, 0); + $.shape = reader.typedArray(position, 10, Int32Array); + $.unk2 = reader.int32_(position, 12, 0); + $.info = reader.table(position, 14, $root.dlc.v4.TensorInfo.decode); + $.dtype = reader.int32_(position, 16, 0); + $.output_dtype = reader.int32_(position, 18, 0); + $.unk6 = reader.uint8_(position, 20, 0); + return $; + } +}; + +$root.dlc.v4.TensorInfo = class TensorInfo { + + static decode(reader, position) { + const $ = new $root.dlc.v4.TensorInfo(); + $.i1 = reader.int32_(position, 4, 0); + $.b1 = reader.uint8_(position, 6, 0); + $.a = reader.table(position, 8, $root.dlc.v4.TensorInfo1.decode); + $.b = reader.table(position, 10, $root.dlc.v4.TensorInfo2.decode); + return $; + } +}; + +$root.dlc.v4.TensorInfo1 = class TensorInfo1 { + + static decode(reader, position) { + const $ = new $root.dlc.v4.TensorInfo1(); + $.i1 = reader.int32_(position, 4, 0); + $.f1 = reader.float32_(position, 6, 0); + $.f2 = reader.float32_(position, 8, 0); + $.f3 = reader.float32_(position, 10, 0); + $.i2 = reader.int32_(position, 12, 0); + return $; + } +}; + +$root.dlc.v4.TensorInfo2 = class TensorInfo2 { + + static decode(reader, position) { + const $ = new $root.dlc.v4.TensorInfo2(); + $.i1 = reader.int32_(position, 4, 0); + $.l = reader.tableArray(position, 6, $root.dlc.v4.TensorInfo3.decode); + return $; + } +}; + +$root.dlc.v4.TensorInfo3 = class TensorInfo3 { + + static decode(reader, position) { + const $ = new $root.dlc.v4.TensorInfo3(); + $.i1 = reader.int32_(position, 4, 0); + $.f1 = reader.float32_(position, 6, 0); + $.f2 = reader.float32_(position, 8, 0); + $.f3 = reader.float32_(position, 10, 0); + $.i2 = reader.int32_(position, 12, 0); + $.b1 = reader.uint8_(position, 14, 0); + return $; + } +}; + +$root.dlc.v4.ModelParameters64 = class ModelParameters64 { + + static decode(reader, position) { + const $ = new $root.dlc.v4.ModelParameters64(); + $.buffers = reader.tableArray(position, 4, $root.dlc.v4.Buffer.decode); + $.params = reader.typedArray(position, 6, Uint8Array); + return $; + } +}; + +$root.dlc.v4.ModelParameters = class ModelParameters { + + static decode(reader, position) { + const $ = new $root.dlc.v4.ModelParameters(); + $.graphs = reader.tableArray(position, 4, $root.dlc.v4.GraphParameters.decode); + return $; + } +}; + +$root.dlc.v4.GraphParameters = class GraphParameters { + + static decode(reader, position) { + const $ = new $root.dlc.v4.GraphParameters(); + $.name = reader.string_(position, 4, null); + $.tensors = reader.tableArray(position, 6, $root.dlc.v4.TensorData.decode); + $.nodes = reader.tableArray(position, 8, $root.dlc.v4.NodeParameters.decode); + return $; + } +}; + +$root.dlc.v4.NodeParameters = class NodeParameters { + + static decode(reader, position) { + const $ = new $root.dlc.v4.NodeParameters(); + $.tensors = reader.tableArray(position, 4, $root.dlc.v4.TensorData.decode); + return $; + } +}; + +$root.dlc.v4.TensorData = class TensorData { + + static decode(reader, position) { + const $ = new $root.dlc.v4.TensorData(); + $.name = reader.string_(position, 4, null); + $.bytes = reader.typedArray(position, 6, Uint8Array); + return $; + } +}; + +$root.dlc.v4.Buffer = class Buffer { + + static decode(reader, position) { + const $ = new $root.dlc.v4.Buffer(); + $.bytes = reader.typedArray(position, 4, Uint8Array); + return $; + } +}; diff --git a/dlc.js b/dlc.js new file mode 100644 index 00000000000..1a155995478 --- /dev/null +++ b/dlc.js @@ -0,0 +1,672 @@ + +import * as text from './text.js'; +import * as flatbuffers from './flatbuffers.js'; + +const dlc = {}; + +dlc.ModelFactory = class { + + match(context) { + return dlc.Container.open(context); + } + + async open(context, target) { + await context.require('./dlc-schema'); + dlc.schema = flatbuffers.get('dlc').dlc; + await target.read(); + const metadata = await context.metadata('dlc-metadata.json'); + return new dlc.Model(metadata, target); + } +}; + +dlc.Model = class { + + constructor(metadata, target) { + this.format = target.format; + this.metadata = new Map(); + if (target.metadata.size > 0) { + const version = target.metadata.get('model-version'); + if (version) { + this.version = version; + } + const converter = target.metadata.get('converter-command'); + if (converter) { + const source = converter.split(' ').shift().trim(); + if (source.length > 0) { + const version = target.metadata.get('converter-version'); + this.metadata.set('source', version ? `${source} v${version}` : source); + } + } + } + for (const graph of target.graphs) { + this.graphs = [ new dlc.Graph(metadata, target.version, graph) ]; + } + } +}; + +dlc.Graph = class { + + constructor(metadata, version, graph) { + this.name = graph.name; + this.inputs = []; + this.outputs = []; + const values = new Map(); + switch (version) { + case 3: { + for (const node of graph.nodes) { + for (const name of node.inputs) { + if (!values.has(name)) { + values.set(name, {}); + } + } + for (const name of node.outputs) { + if (!values.has(name)) { + values.set(name, {}); + } + } + let shapes = new Array(node.outputs.length); + for (const attribute of node.attributes) { + if (attribute.name === 'OutputDims' && + Array.isArray(attribute.attributes) && attribute.attributes.length > 0) { + shapes = attribute.data; + break; + } + } + for (let i = 0; i < node.outputs.length; i++) { + const name = node.outputs[i]; + const value = values.get(name); + if (!value.shape && i < shapes.length) { + value.shape = shapes[i]; + } + } + } + break; + } + case 4: { + for (const tensor of graph.tensors) { + values.set(tensor.name, tensor); + } + break; + } + default: { + break; + } + } + for (const [name, tensor] of values) { + const type = tensor.shape ? new dlc.TensorType(tensor.dtype, tensor.shape) : null; + const initializer = tensor.data && tensor.data ? new dlc.Tensor(type, tensor.data) : null; + const value = new dlc.Value(name, type, initializer); + values.set(name, value); + } + const value = (name) => { + if (!values.has(name)) { + values.set(name, new dlc.Value(name)); + } + return values.get(name); + }; + this.nodes = []; + for (const node of graph.nodes) { + if (node.type === 'Input') { + this.inputs.push(new dlc.Argument(node.name, node.inputs.map((input) => value(input)))); + continue; + } + this.nodes.push(new dlc.Node(metadata, version, node, value)); + } + } +}; + +dlc.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +dlc.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new dlc.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type; + this.initializer = initializer; + } +}; + +dlc.Node = class { + + constructor(metadata, version, node, value) { + const type = `${node.type}:v${version}`; + this.type = Object.assign({}, metadata.type(type)); + this.type.name = node.type; + this.name = node.name; + this.inputs = []; + this.outputs = []; + this.attributes = []; + const inputs = Array.isArray(node.inputs) ? Array.from(node.inputs).map((input) => value(input)) : []; + if (Array.isArray(this.type.inputs) && inputs.length === this.type.inputs.length) { + for (let i = 0; i < inputs.length; i++) { + const argument = new dlc.Argument(this.type.inputs[i].name, [ inputs[i] ]); + this.inputs.push(argument); + } + } else if (inputs.length > 0) { + const argument = new dlc.Argument(inputs.length === 1 ? 'input' : 'inputs', inputs); + this.inputs.push(argument); + } + const outputs = Array.isArray(node.outputs) ? Array.from(node.outputs).map((output) => value(output)) : []; + if (Array.isArray(this.type.outputs) && outputs.length === this.type.outputs.length) { + for (let i = 0; i < outputs.length; i++) { + const argument = new dlc.Argument(this.type.outputs[i].name, [ outputs[i] ]); + this.outputs.push(argument); + } + } else if (outputs.length > 0) { + const argument = new dlc.Argument(outputs.length === 1 ? 'output' : 'outputs', outputs); + this.outputs.push(argument); + } + if (node.attributes) { + for (const attr of node.attributes) { + if (attr.name === 'OutputDims') { + continue; + } + const attribute = new dlc.Attribute(metadata.attribute(type, attr.name), version, attr); + this.attributes.push(attribute); + } + } + if (node.weights) { + for (const tensor of node.weights) { + const type = new dlc.TensorType(tensor.data.dtype, tensor.shape); + const value = new dlc.Value('', type, new dlc.Tensor(type, tensor.data)); + this.inputs.push(new dlc.Argument(tensor.name, [ value ])); + } + } + } +}; + +dlc.Attribute = class { + + constructor(metadata, version, attribute) { + this.name = attribute.name; + this.type = attribute.type; + switch (this.type) { + case 'tensor': { + const tensor = attribute.data; + const type = new dlc.TensorType(tensor.dtype, tensor.shape); + const data = tensor.data; + this.value = new dlc.Tensor(type, data); + break; + } + default: { + this.value = attribute.data; + } + } + if (metadata && metadata.type) { + this.type = metadata.type; + this.value = dlc.Utility.enum(version, this.type, this.value); + } + } +}; + +dlc.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = new dlc.TensorShape(shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +dlc.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.from(dimensions); + } + + toString() { + if (Array.isArray(this.dimensions) && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +dlc.Tensor = class { + + constructor(type, data) { + this.type = type; + if (data instanceof Uint8Array) { + this.encoding = '<'; + this.values = data; + } else { + this.encoding = '|'; + switch (type.dataType) { + case 'uint8': this.values = data.bytes; break; + case 'float32': this.values = data.floats; break; + default: throw new dlc.Error(`Unsupported tensor data type '${type.dataType}'.`); + } + } + } +}; + +dlc.Container = class { + + static open(context) { + const entries = context.peek('zip'); + if (entries instanceof Map) { + if (entries.has('model') || entries.has('model.params')) { + return new dlc.Container(context, entries.get('model'), entries.get('model.params'), entries.get('dlc.metadata')); + } + } + const stream = context.stream; + switch (dlc.Container._signature(stream).split('.').pop()) { + case 'NETD': + return new dlc.Container(context, stream, undefined, undefined); + case 'NETP': + return new dlc.Container(context, undefined, stream, undefined); + case 'NR64': + return new dlc.Container(context, undefined, stream, undefined); + default: + return null; + } + } + + constructor(context, model, params, metadata) { + this._context = context; + this._model = model; + this._params = params; + this._metadata = metadata; + } + + async read() { + if (this._model === undefined) { + this._model = await this._fetch('model'); + } + if (this._params === undefined) { + this._params = await this._fetch('model.params'); + } + if (this._metadata === undefined) { + this._metadata = await this._fetch('dlc.metadata'); + } + delete this._context; + this.graphs = []; + this.metadata = new Map(); + if (this._model) { + this.format = 'DLC'; + const stream = this._model; + delete this._model; + const signature = dlc.Container._signature(stream); + switch (signature) { + case '2': { + throw new dlc.Error("File contains undocumented DLC v2 data."); + } + case '3.NETD': + case 'NETD': { + this.version = 3; + this.graph = dlc.Container._model3(stream, signature); + this.graphs = [ this.graph ]; + break; + } + case '4.NETD': { + this.version = 4; + this.graphs = dlc.Container._model4(stream); + break; + } + default: { + const buffer = stream.peek(Math.min(stream.length, 16)); + const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + throw new dlc.Error(`File contains undocumented '${content}' data.`); + } + } + } + if (this._params) { + this.format = this.format || 'DLC Weights'; + const stream = this._params; + delete this._params; + const signature = dlc.Container._signature(stream); + switch (signature) { + case '2': { + throw new dlc.Error("File contains undocumented DLC v2 data."); + } + case '3.NETP': + case 'NETP': { + this.version = this.graphs.length > 0 ? this.version : 3; + this.graph = dlc.Container._params3(stream, signature, this.graph); + this.graphs = [ this.graph ]; + break; + } + case '4.NETP': + case '4.NR64': { + dlc.Container._params4(stream, this.graphs, signature); + break; + } + default: { + const buffer = stream.peek(Math.min(stream.length, 16)); + const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + throw new dlc.Error(`File contains undocumented '${content}' data.`); + } + } + } + if (this._metadata) { + const stream = this._metadata; + delete this._metadata; + const reader = text.Reader.open(stream); + for (;;) { + const line = reader.read(); + if (line === undefined) { + break; + } + const index = line.indexOf('='); + if (index === -1) { + break; + } + const key = line.substring(0, index); + const value = line.substring(index + 1); + this.metadata.set(key, value); + } + } + } + + static _model3(stream, signature) { + let model = null; + try { + const buffer = new Uint8Array(signature === 'NETD' ? stream.peek() : stream.peek().subarray(8)); + const reader = flatbuffers.BinaryReader.open(buffer); + model = dlc.schema.v3.Model.decode(reader, reader.root); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dlc.Error(`File format is not dlc.v3.NETD (${message.replace(/\.$/, '')}).`); + } + model.tensors = []; + const updateAttribute = (attr) => { + switch (attr.type) { + case 1: return [ 'boolean', attr.bool_value ]; + case 2: return [ 'int32', attr.int32_value ]; + case 3: return [ 'uint32', attr.uint32_value ]; + case 4: return [ 'float32', attr.float32_value ]; + case 5: return [ 'string', attr.string_value ]; + case 7: return [ 'byte[]', Array.from(attr.byte_list) ]; + case 8: return [ 'int32[]', Array.from(attr.int32_list) ]; + case 9: return [ 'float32[]', Array.from(attr.float32_list) ]; + case 11: { + const obj = {}; + let index = 0; + let list = true; + for (const attribute of attr.attributes) { + const name = attribute.name; + const [, data] = updateAttribute(attribute); + obj[name] = data; + list = list && index.toString() === attribute.name; + index++; + } + return list ? [ '', Object.values(obj) ] : [ '', obj ]; + } + default: + throw new dlc.Error(`Unsupported attribute type '${attr.type}'.`); + } + }; + for (const node of model.nodes) { + for (const attribute of node.attributes) { + const [type, data] = updateAttribute(attribute); + attribute.type = type; + attribute.data = data; + } + } + return model; + } + + static _model4(stream) { + let model = null; + try { + const buffer = new Uint8Array(stream.peek().subarray(8)); + const reader = flatbuffers.BinaryReader.open(buffer); + model = dlc.schema.v4.Model.decode(reader, reader.root); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dlc.Error(`File format is not dlc.v4.NETD (${message.replace(/\.$/, '')}).`); + } + const dataType = (value) => { + switch (value) { + case 0x0008: return 'int8'; + case 0x0016: return 'int16'; + case 0x0032: return 'int32'; + case 0x0108: return 'int8'; + case 0x0132: return 'int32'; + case 0x0216: return 'float16'; + case 0x0232: return 'float32'; + case 0x0308: return 'qint8'; + case 0x0332: return 'qint32'; + case 0x0408: return 'uint8'; + case 0x0416: return 'uint16'; + case 0x0508: return 'boolean'; + default: throw new dlc.Error(`Unsupported data type '${JSON.stringify(value)}'.`); + } + }; + const updateTensor = (tensor) => { + tensor.dtype = dataType(tensor.dtype); + tensor.output_dtype = dataType(tensor.output_dtype); + }; + for (const graph of model.graphs) { + for (const node of graph.nodes) { + for (const attribute of node.attributes) { + switch (attribute.kind) { + case 0: { + const value = attribute.value; + switch (value.kind) { + case 0x7fffffff: + attribute.data = value.string_value; + attribute.type = 'string'; + break; + case 0x0032: + attribute.data = value.int32_value; + break; + case 0x0108: + attribute.data = value.int32_value; + attribute.type = 'int8'; + break; + case 0x0132: + attribute.data = value.int32_value; + attribute.type = 'int32'; + break; + case 0x0232: + attribute.data = value.float32_value; + attribute.type = 'float32'; + break; + case 0x0508: + attribute.data = value.int32_value !== 0; + attribute.type = 'boolean'; + break; + default: + throw new dlc.Error(`Unknown attribute value kind '${value.kind}'.`); + } + break; + } + case 1: { + const tensor = attribute.tensor; + updateTensor(tensor); + attribute.type = 'tensor'; + attribute.data = tensor; + break; + } + default: { + throw new dlc.Error(`Unknown attribute kind '${attribute.kind}'.`); + } + } + } + } + for (const tensor of graph.tensors) { + updateTensor(tensor); + } + } + return model.graphs; + } + + static _params3(stream, signature, graph) { + let params = null; + try { + const buffer = new Uint8Array(signature === 'NETP' ? stream.peek() : stream.peek().subarray(8)); + const reader = flatbuffers.BinaryReader.open(buffer); + params = dlc.schema.v3.ModelParameters.decode(reader, reader.root); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dlc.Error(`File format is not dlc.v3.NETP (${message.replace(/\.$/, '')}).`); + } + if (!graph) { + graph = new dlc.schema.v3.ModelParameters(); + graph.nodes = new Array(params.nodes.length); + graph.tensors = []; + for (let i = 0; i < graph.nodes.length; i++) { + const node = new dlc.schema.v3.Node(); + node.type = 'Weights'; + node.name = params.nodes[i].name; + node.inputs = []; + node.outputs = []; + node.attributes = []; + graph.nodes[i] = node; + } + } + const dataType = (value) => { + switch (value) { + case null: return '?'; + case 6: return 'uint8'; + case 9: return 'float32'; + default: + throw new dlc.Error(`Unsupported data type '${JSON.stringify(value)}'.`); + } + }; + const weights = new Map(params.nodes.map((node) => [ node.name, node.weights ])); + for (const node of graph.nodes) { + if (weights.has(node.name)) { + const tensors = weights.get(node.name); + for (const tensor of tensors) { + tensor.data.dtype = dataType(tensor.data.dtype); + } + node.weights = tensors; + } + } + return graph; + } + + static _params4(stream, graphs, signature) { + let buffer = stream.peek().subarray(8); + let buffers = null; + if (signature === '4.NR64') { + try { + const reader = flatbuffers.BinaryReader.open(buffer); + const nr64 = dlc.schema.v4.ModelParameters64.decode(reader, reader.root); + buffers = nr64.buffers; + buffer = nr64.params; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dlc.Error(`File format is not dlc.v4.NR64 (${message.replace(/\.$/, '')}).`); + } + } + let params = null; + try { + const reader = flatbuffers.BinaryReader.open(buffer); + params = dlc.schema.v4.ModelParameters.decode(reader, reader.root); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dlc.Error(`File format is not dlc.v4.NETP (${message.replace(/\.$/, '')}).`); + } + if (graphs.length === 0) { + throw new dlc.Error('Model definition not available.'); + } + const weights = new Map(params.graphs.map((graph) => [ graph.name, graph ])); + for (const graph of graphs) { + const params = weights.get(graph.name); + const tensors = new Map(params.tensors.map((tensor) => [ tensor.name, tensor ])); + let index = 0; + graph.tensors.sort((a, b) => a.name.localeCompare(b.name)); + for (const tensor of graph.tensors) { + if (tensor.location === 4) { + tensor.data = buffers ? buffers[index++].bytes : tensors.get(tensor.name).bytes; + } + } + for (let i = 0; i < graph.nodes.length; i++) { + const node = graph.nodes[i]; + const tensors = new Map(params.nodes[i].tensors.map((tensor) => [ tensor.name, tensor ])); + for (const attribute of node.attributes) { + const tensor = attribute.tensor; + if (tensor) { + tensor.data = buffers ? buffers[index++].bytes : tensors.get(tensor.name).bytes; + } + } + } + } + } + + async _fetch(name) { + try { + const context = await this._context.fetch(name); + return context.stream; + } catch (error) { + return null; + } + } + + static _signature(stream) { + if (stream) { + const buffer = stream.peek(Math.min(stream.length, 16)); + const match = (signature) => buffer.length >= signature.length && signature.every((value, index) => value === buffer[index]); + if (match([ 0xD5, 0x0A, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00 ]) && buffer.length >= 16) { + const reader = flatbuffers.BinaryReader.open(buffer.slice(8)); + return `4.${reader.identifier}`; + } + if (match([ 0xD5, 0x0A, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 ]) && buffer.length >= 16) { + const reader = flatbuffers.BinaryReader.open(buffer.slice(8)); + return `3.${reader.identifier}`; + } + if (match([ 0xD5, 0x0A, 0x02, 0x00 ])) { + return '2'; + } + if (buffer.length >= 8) { + const reader = flatbuffers.BinaryReader.open(buffer); + return reader.identifier; + } + } + return ''; + } +}; + +dlc.Utility = class { + + static enum(version, name, value) { + switch (version) { + case 3: version = 'v3'; break; + case 4: version = 'v4'; break; + default: version = ''; + } + const schema = dlc.schema[version]; + if (schema && name) { + const type = schema[name]; + if (type) { + dlc.Utility[version] = dlc.Utility[version] || new Map(); + const enums = dlc.Utility[version]; + if (!enums.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + enums.set(name, entries); + } + const values = enums.get(name); + if (values.has(value)) { + return values.get(value); + } + } + } + return value; + } +}; + +dlc.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading DLC model.'; + } +}; + +export const ModelFactory = dlc.ModelFactory; + diff --git a/dnn-metadata.json b/dnn-metadata.json new file mode 100644 index 00000000000..2e0f4b2a434 --- /dev/null +++ b/dnn-metadata.json @@ -0,0 +1,109 @@ +[ + { + "name": "batchnorm", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" } + ] + }, + { + "name": "concat", + "category": "Tensor", + "inputs": [ + { "name": "input", "option": "variadic" } + ] + }, + { + "name": "const_v2", + "category": "Constant" + }, + { + "name": "conv", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "deconv", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "depthdeconv", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "eltwise", + "inputs": [ + { "name": "input", "option": "variadic" } + ] + }, + { + "name": "linear", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "pad", + "category": "Shape" + }, + { + "name": "pool", + "category": "Pool", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "prelu", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ] + }, + { + "name": "relu", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "relu6", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "sigmoid", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "softmax", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + } +] \ No newline at end of file diff --git a/dnn-proto.js b/dnn-proto.js new file mode 100644 index 00000000000..d8a61626ecb --- /dev/null +++ b/dnn-proto.js @@ -0,0 +1,369 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('dnn'); + +$root.dnn = {}; + +$root.dnn.Model = class Model { + + constructor() { + this.input_shape = []; + this.input_name = []; + this.node = []; + this.input = []; + this.output = []; + } + + static decode(reader, length) { + const message = new $root.dnn.Model(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.version = reader.int32(); + break; + case 4: + message.input_shape = reader.array(message.input_shape, () => reader.int32(), tag); + break; + case 7: + message.input_name.push(reader.string()); + break; + case 10: + message.node.push($root.dnn.Node.decode(reader, reader.uint32())); + break; + case 12: + message.input.push($root.dnn.Parameter.decode(reader, reader.uint32())); + break; + case 13: + message.output.push($root.dnn.Parameter.decode(reader, reader.uint32())); + break; + case 14: + message.a014 = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Model.prototype.name = ""; +$root.dnn.Model.prototype.version = 0; +$root.dnn.Model.prototype.a014 = 0; + +$root.dnn.Parameter = class Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.dnn.Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.dnn.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Parameter.prototype.name = ""; +$root.dnn.Parameter.prototype.shape = null; + +$root.dnn.Shape = class Shape { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.dnn.Shape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim0 = reader.int32(); + break; + case 2: + message.dim1 = reader.int32(); + break; + case 3: + message.dim2 = reader.int32(); + break; + case 4: + message.dim3 = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Shape.prototype.dim0 = 0; +$root.dnn.Shape.prototype.dim1 = 0; +$root.dnn.Shape.prototype.dim2 = 0; +$root.dnn.Shape.prototype.dim3 = 0; + +$root.dnn.Node = class Node { + + constructor() { + this.input = []; + this.output = []; + } + + static decode(reader, length) { + const message = new $root.dnn.Node(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.layer = $root.dnn.Layer.decode(reader, reader.uint32()); + break; + case 2: + message.input.push(reader.string()); + break; + case 3: + message.output.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Node.prototype.layer = null; + +$root.dnn.Layer = class Layer { + + constructor() { + this.weight = []; + } + + static decode(reader, length) { + const message = new $root.dnn.Layer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.filters = reader.int32(); + break; + case 7: + message.a007 = reader.int32(); + break; + case 8: + message.a008 = reader.int32(); + break; + case 9: + message.groups = reader.int32(); + break; + case 10: + message.a010 = reader.int32(); + break; + case 11: + message.a011 = reader.int32(); + break; + case 14: + message.slope = reader.float(); + break; + case 15: + message.intercept = reader.float(); + break; + case 50: + message.weight.push($root.dnn.Tensor.decode(reader, reader.uint32())); + break; + case 72: + message.operation = reader.int32(); + break; + case 65: + message.axis = reader.int32(); + break; + case 77: + message.a077 = reader.int32(); + break; + case 79: + message.scale = reader.float(); + break; + case 80: + message.pad_1 = reader.int32(); + break; + case 81: + message.pad_2 = reader.int32(); + break; + case 82: + message.pad_3 = reader.int32(); + break; + case 83: + message.pad_4 = reader.int32(); + break; + case 84: + message.pad_5 = reader.int32(); + break; + case 85: + message.a085 = reader.int32(); + break; + case 90: + message.a090 = reader.int32(); + break; + case 101: + message.is_quantized = reader.bool(); + break; + case 104: + message.quantization = $root.dnn.Buffer.decode(reader, reader.uint32()); + break; + case 109: + message.stride_w = reader.int32(); + break; + case 110: + message.stride_h = reader.int32(); + break; + case 111: + message.kernel_w = reader.int32(); + break; + case 112: + message.kernel_h = reader.int32(); + break; + case 115: + message.a115 = reader.int32(); + break; + case 116: + message.a116 = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Layer.prototype.name = ""; +$root.dnn.Layer.prototype.type = ""; +$root.dnn.Layer.prototype.filters = 0; +$root.dnn.Layer.prototype.a007 = 0; +$root.dnn.Layer.prototype.a008 = 0; +$root.dnn.Layer.prototype.groups = 0; +$root.dnn.Layer.prototype.a010 = 0; +$root.dnn.Layer.prototype.a011 = 0; +$root.dnn.Layer.prototype.slope = 0; +$root.dnn.Layer.prototype.intercept = 0; +$root.dnn.Layer.prototype.operation = 0; +$root.dnn.Layer.prototype.axis = 0; +$root.dnn.Layer.prototype.a077 = 0; +$root.dnn.Layer.prototype.scale = 0; +$root.dnn.Layer.prototype.pad_1 = 0; +$root.dnn.Layer.prototype.pad_2 = 0; +$root.dnn.Layer.prototype.pad_3 = 0; +$root.dnn.Layer.prototype.pad_4 = 0; +$root.dnn.Layer.prototype.pad_5 = 0; +$root.dnn.Layer.prototype.a085 = 0; +$root.dnn.Layer.prototype.a090 = 0; +$root.dnn.Layer.prototype.is_quantized = false; +$root.dnn.Layer.prototype.quantization = null; +$root.dnn.Layer.prototype.stride_w = 0; +$root.dnn.Layer.prototype.stride_h = 0; +$root.dnn.Layer.prototype.kernel_w = 0; +$root.dnn.Layer.prototype.kernel_h = 0; +$root.dnn.Layer.prototype.a115 = 0; +$root.dnn.Layer.prototype.a116 = 0; + +$root.dnn.Buffer = class Buffer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.dnn.Buffer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + message.data = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Buffer.prototype.data = new Uint8Array([]); + +$root.dnn.Tensor = class Tensor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.dnn.Tensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim0 = reader.int32(); + break; + case 2: + message.dim1 = reader.int32(); + break; + case 3: + message.dim2 = reader.int32(); + break; + case 4: + message.dim3 = reader.int32(); + break; + case 5: + message.data = reader.bytes(); + break; + case 6: + message.quantized_data = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } +}; + +$root.dnn.Tensor.prototype.dim0 = 0; +$root.dnn.Tensor.prototype.dim1 = 0; +$root.dnn.Tensor.prototype.dim2 = 0; +$root.dnn.Tensor.prototype.dim3 = 0; +$root.dnn.Tensor.prototype.data = new Uint8Array([]); +$root.dnn.Tensor.prototype.quantized_data = new Uint8Array([]); diff --git a/dnn.js b/dnn.js new file mode 100644 index 00000000000..907435ea3df --- /dev/null +++ b/dnn.js @@ -0,0 +1,261 @@ + +// Experimental + +import * as protobuf from './protobuf.js'; + +const dnn = {}; + +dnn.ModelFactory = class { + + match(context) { + const tags = context.tags('pb'); + if (tags.get(4) == 0 && tags.get(10) == 2) { + return { name: 'dnn' }; + } + return undefined; + } + + async open(context) { + await context.require('./dnn-proto'); + let model = null; + try { + dnn.proto = protobuf.get('dnn').dnn; + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + model = dnn.proto.Model.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new dnn.Error(`File format is not dnn.Graph (${message.replace(/\.$/, '')}).`); + } + const metadata = await context.metadata('dnn-metadata.json'); + return new dnn.Model(metadata, model); + } +}; + +dnn.Model = class { + + constructor(metadata, model) { + this.name = model.name || ''; + this.format = `SnapML${model.version ? ` v${model.version}` : ''}`; + this.graphs = [ new dnn.Graph(metadata, model) ]; + } +}; + +dnn.Graph = class { + + constructor(metadata, model) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + const scope = {}; + let index = 0; + for (const node of model.node) { + node.input = node.input.map((input) => scope[input] ? scope[input] : input); + node.output = node.output.map((output) => { + scope[output] = scope[output] ? `${output}\n${index}` : output; // custom argument id + return scope[output]; + }); + index++; + } + const values = new Map(); + values.map = (name, type) => { + if (!values.has(name)) { + values.set(name, new dnn.Value(name, type)); + } + return values.get(name); + }; + for (const input of model.input) { + const shape = input.shape; + const type = new dnn.TensorType('float32', new dnn.TensorShape([ shape.dim0, shape.dim1, shape.dim2, shape.dim3 ])); + const argument = new dnn.Argument(input.name, [ values.map(input.name, type) ]); + this.inputs.push(argument); + } + for (const output of model.output) { + const shape = output.shape; + const type = new dnn.TensorType('float32', new dnn.TensorShape([ shape.dim0, shape.dim1, shape.dim2, shape.dim3 ])); + const argument = new dnn.Argument(output.name, [ values.map(output.name, type) ]); + this.outputs.push(argument); + } + if (this.inputs.length === 0 && model.input_name && model.input_shape && model.input_shape.length === model.input_name.length * 4) { + for (let i = 0; i < model.input_name.length; i++) { + const name = model.input_name[i]; + const shape = model.input_shape.slice(i * 4, (i * 4 + 4)); + const type = new dnn.TensorType('float32', new dnn.TensorShape([ shape[1], shape[3], shape[2], shape[0] ])); + const argument = new dnn.Argument(name, [ values.map(name, type) ]); + this.inputs.push(argument); + } + } + if (this.inputs.length === 0 && model.input_shape && model.input_shape.length === 4 && model.node.length > 0 && model.node[0].input.length > 0) { + /* eslint-disable prefer-destructuring */ + const name = model.node[0].input[0]; + /* eslint-enable prefer-destructuring */ + const shape = model.input_shape; + const type = new dnn.TensorType('float32', new dnn.TensorShape([ shape[1], shape[3], shape[2], shape[0] ])); + const argument = new dnn.Argument(name, [ values.map(name, type) ]); + this.inputs.push(argument); + } + + for (const node of model.node) { + this.nodes.push(new dnn.Node(metadata, node, values)); + } + } +}; + +dnn.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +dnn.Value = class { + + constructor(name, type, initializer, quantization) { + if (typeof name !== 'string') { + throw new dnn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type || null; + this.initializer = initializer || null; + if (quantization) { + this.quantization = { + type: 'lookup', + value: new Map(quantization.map((value, index) => [ index, value ])) + }; + } + } +}; + +dnn.Node = class { + + constructor(metadata, node, values) { + const layer = node.layer; + this.name = layer.name; + const type = layer.type; + this.type = metadata.type(type) || { name: type }; + this.attributes = []; + this.inputs = []; + this.outputs = []; + const inputs = node.input.map((input) => values.map(input)); + for (const weight of layer.weight) { + let quantization = null; + if (layer.is_quantized && weight === layer.weight[0] && layer.quantization && layer.quantization.data) { + const data = layer.quantization.data; + quantization = new Array(data.length >> 2); + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + for (let i = 0; i < quantization.length; i++) { + quantization[i] = view.getFloat32(i << 2, true); + } + } + const initializer = new dnn.Tensor(weight, quantization); + inputs.push(new dnn.Value('', initializer.type, initializer, quantization)); + } + const outputs = node.output.map((output) => values.map(output)); + if (inputs && inputs.length > 0) { + let inputIndex = 0; + if (this.type && this.type.inputs) { + for (const inputSchema of this.type.inputs) { + if (inputIndex < inputs.length || inputSchema.option != 'optional') { + const inputCount = (inputSchema.option == 'variadic') ? (node.input.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount); + this.inputs.push(new dnn.Argument(inputSchema.name, inputArguments)); + inputIndex += inputCount; + } + } + } + this.inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new dnn.Argument(inputName, [ input ]); + })); + } + if (outputs.length > 0) { + this.outputs = outputs.map((output, index) => { + const inputName = (index == 0) ? 'output' : index.toString(); + return new dnn.Argument(inputName, [ output ]); + }); + } + for (const key of Object.keys(layer)) { + switch (key) { + case 'name': + case 'type': + case 'weight': + case 'is_quantized': + case 'quantization': + break; + default: { + const attribute = new dnn.Attribute(metadata.attribute(type, key), key, layer[key]); + this.attributes.push(attribute); + break; + } + } + } + } +}; + +dnn.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + } +}; + +dnn.Tensor = class { + + constructor(weight, quantization) { + const shape = new dnn.TensorShape([ weight.dim0, weight.dim1, weight.dim2, weight.dim3 ]); + this.values = quantization ? weight.quantized_data : weight.data; + const size = shape.dimensions.reduce((a, b) => a * b, 1); + const itemSize = Math.floor(this.values.length / size); + const remainder = this.values.length - (itemSize * size); + if (remainder < 0 || remainder > itemSize) { + throw new dnn.Error('Invalid tensor data size.'); + } + let dataType = '?'; + switch (itemSize) { + case 1: dataType = 'int8'; break; + case 2: dataType = 'float16'; break; + case 4: dataType = 'float32'; break; + default: dataType = '?'; break; + } + this.type = new dnn.TensorType(dataType, shape); + } +}; + +dnn.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +dnn.TensorShape = class { + + constructor(shape) { + this.dimensions = shape; + } + + toString() { + if (!this.dimensions || this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.join(',')}]`; + } +}; + +dnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading SnapML model.'; + } +}; + +export const ModelFactory = dnn.ModelFactory; + diff --git a/favicon.ico b/favicon.ico new file mode 100644 index 00000000000..c30ac5a9b11 Binary files /dev/null and b/favicon.ico differ diff --git a/flatbuffers.js b/flatbuffers.js new file mode 100644 index 00000000000..ea0316f0cf8 --- /dev/null +++ b/flatbuffers.js @@ -0,0 +1,385 @@ + +const flatbuffers = {}; + +flatbuffers.get = (name) => { + flatbuffers._roots = flatbuffers._roots || new Map(); + const roots = flatbuffers._roots; + if (!roots.has(name)) { + roots.set(name, {}); + } + return roots.get(name); +}; + +flatbuffers.BinaryReader = class { + + static open(data) { + return data ? new flatbuffers.BinaryReader(data) : null; + } + + constructor(data) { + const buffer = data instanceof Uint8Array ? data : data.peek(); + this._buffer = buffer; + this._position = 0; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + + get root() { + return this.int32(this._position) + this._position; + } + + get identifier() { + if (this._buffer.length >= 8) { + const buffer = this._buffer.slice(4, 8); + if (buffer.every((c) => c >= 32 && c <= 128)) { + return String.fromCharCode(...buffer); + } + } + return ''; + } + + bool(offset) { + return !!this.int8(offset); + } + + bool_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.bool(position + offset) : defaultValue; + } + + int8(offset) { + return this.uint8(offset) << 24 >> 24; + } + + int8_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.int8(position + offset) : defaultValue; + } + + uint8(offset) { + return this._buffer[offset]; + } + + uint8_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.uint8(position + offset) : defaultValue; + } + + int16(offset) { + return this._dataView.getInt16(offset, true); + } + + int16_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.int16(position + offset) : defaultValue; + } + + uint16(offset) { + return this._dataView.getUint16(offset, true); + } + + uint16_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.uint16(position + offset) : defaultValue; + } + + int32(offset) { + return this._dataView.getInt32(offset, true); + } + + int32_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.int32(position + offset) : defaultValue; + } + + uint32(offset) { + return this._dataView.getUint32(offset, true); + } + + uint32_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.int32(position + offset) : defaultValue; + } + + int64(offset) { + return this._dataView.getInt64(offset, true); + } + + int64_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.int64(position + offset) : defaultValue; + } + + uint64(offset) { + return this._dataView.getUint64(offset, true); + } + + uint64_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.uint64(position + offset) : defaultValue; + } + + float32(offset) { + return this._dataView.getFloat32(offset, true); + } + + float32_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.float32(position + offset) : defaultValue; + } + + float64(offset) { + return this._dataView.getFloat64(offset, true); + } + + float64_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.float64(position + offset) : defaultValue; + } + + string(offset, encoding) { + offset += this.int32(offset); + const length = this.int32(offset); + let result = ''; + let i = 0; + offset += 4; + if (encoding === 1) { + return this._buffer.subarray(offset, offset + length); + } + while (i < length) { + let codePoint; + // Decode UTF-8 + const a = this.uint8(offset + i++); + if (a < 0xC0) { + codePoint = a; + } else { + const b = this.uint8(offset + i++); + if (a < 0xE0) { + codePoint = ((a & 0x1F) << 6) | (b & 0x3F); + } else { + const c = this.uint8(offset + i++); + if (a < 0xF0) { + codePoint = ((a & 0x0F) << 12) | ((b & 0x3F) << 6) | (c & 0x3F); + } else { + const d = this.uint8(offset + i++); + codePoint = ((a & 0x07) << 18) | ((b & 0x3F) << 12) | ((c & 0x3F) << 6) | (d & 0x3F); + } + } + } + // Encode UTF-16 + if (codePoint < 0x10000) { + result += String.fromCharCode(codePoint); + } else { + codePoint -= 0x10000; + result += String.fromCharCode((codePoint >> 10) + 0xD800, (codePoint & ((1 << 10) - 1)) + 0xDC00); + } + } + + return result; + } + + string_(position, offset, defaultValue) { + offset = this._offset(position, offset); + return offset ? this.string(position + offset) : defaultValue; + } + + bools_(position, offset) { + offset = this._offset(position, offset); + if (offset) { + const length = this._length(position + offset); + offset = this._vector(position + offset); + const array = new Array(length); + for (let i = 0; i < length; i++) { + array[i] = this.uint8(offset + i + 4) ? true : false; + } + return array; + } + return []; + } + + int64s_(position, offset) { + offset = this._offset(position, offset); + if (offset) { + const length = this._length(position + offset); + offset = this._vector(position + offset); + const array = new Array(length); + for (let i = 0; i < length; i++) { + array[i] = this.int64(offset + (i << 3)); + } + return array; + } + return []; + } + + uint64s_(position, offset) { + offset = this._offset(position, offset); + if (offset) { + const length = this._length(position + offset); + offset = this._vector(position + offset); + const array = new Array(length); + for (let i = 0; i < length; i++) { + array[i] = this.uint64(offset + (i << 3)); + } + return array; + } + return []; + } + + strings_(position, offset) { + offset = this._offset(position, offset); + if (offset) { + const length = this._length(position + offset); + offset = this._vector(position + offset); + const array = new Array(length); + for (let i = 0; i < length; i++) { + array[i] = this.string(offset + i * 4); + } + return array; + } + return []; + } + + struct(position, offset, decode) { + offset = this._offset(position, offset); + return offset ? decode(this, position + offset) : null; + } + + table(position, offset, decode) { + offset = this._offset(position, offset); + return offset ? decode(this, this._indirect(position + offset)) : null; + } + + union(position, offset, decode) { + const type_offset = this._offset(position, offset); + const type = type_offset ? this.uint8(position + type_offset) : 0; + offset = this._offset(position, offset + 2); + return offset ? decode(this, this._union(position + offset), type) : null; + } + + typedArray(position, offset, type) { + offset = this._offset(position, offset); + return offset ? new type(this._buffer.buffer, this._buffer.byteOffset + this._vector(position + offset), this._length(position + offset)) : new type(0); + } + + unionArray(/* position, offset, decode */) { + return new flatbuffers.Error('Not implemented.'); + } + + structArray(position, offset, decode) { + offset = this._offset(position, offset); + const length = offset ? this._length(position + offset) : 0; + const list = new Array(length); + for (let i = 0; i < length; i++) { + list[i] = decode(this, this._vector(position + offset) + i * 8); + } + return list; + } + + tableArray(position, offset, decode) { + offset = this._offset(position, offset); + const length = offset ? this._length(position + offset) : 0; + const list = new Array(length); + for (let i = 0; i < length; i++) { + list[i] = decode(this, this._indirect(this._vector(position + offset) + i * 4)); + } + return list; + } + + _offset(bb_pos, vtableOffset) { + const vtable = bb_pos - this.int32(bb_pos); + return vtableOffset < this.int16(vtable) ? this.int16(vtable + vtableOffset) : 0; + } + + _indirect(offset) { + return offset + this.int32(offset); + } + + _vector(offset) { + return offset + this.int32(offset) + 4; + } + + _length(offset) { + return this.int32(offset + this.int32(offset)); + } + + _union(offset) { + return offset + this.int32(offset); + } +}; + +flatbuffers.TextReader = class { + + static open(obj) { + return new flatbuffers.TextReader(obj); + } + + constructor(obj) { + this._root = obj; + } + + get root() { + return this._root; + } + + value(obj, defaultValue) { + return obj !== undefined ? obj : defaultValue; + } + + object(obj, decode) { + return obj !== undefined ? decode(this, obj) : obj; + } + + array(obj) { + if (Array.isArray(obj)) { + const target = new Array(obj.length); + for (let i = 0; i < obj.length; i++) { + target[i] = obj[i]; + } + return target; + } + if (!obj) { + return []; + } + throw new flatbuffers.Error('Inalid value array.'); + } + + typedArray(obj, type) { + if (Array.isArray(obj)) { + const target = new type(obj.length); + for (let i = 0; i < obj.length; i++) { + target[i] = obj[i]; + } + return target; + } + if (!obj) { + return new type(0); + } + throw new flatbuffers.Error('Inalid typed array.'); + } + + objectArray(obj, decode) { + if (Array.isArray(obj)) { + const target = new Array(obj.length); + for (let i = 0; i < obj.length; i++) { + target[i] = decode(this, obj[i]); + } + return target; + } + if (!obj) { + return []; + } + throw new flatbuffers.Error('Inalid object array.'); + } +}; + +flatbuffers.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'FlatBuffers Error'; + this.message = message; + } +}; + +export const BinaryReader = flatbuffers.BinaryReader; +export const TextReader = flatbuffers.TextReader; +export const get = flatbuffers.get; diff --git a/flax.js b/flax.js new file mode 100644 index 00000000000..5ce83e2d5f0 --- /dev/null +++ b/flax.js @@ -0,0 +1,218 @@ + +// Experimental + +import * as python from './python.js'; + +const flax = {}; + +flax.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream.length > 4) { + const buffer = stream.peek(1); + if (buffer[0] === 0xDE || buffer[0] === 0xDF || ((buffer[0] & 0x80) === 0x80)) { + return 'msgpack.map'; + } + } + return null; + } + + async open(context) { + const stream = context.stream; + const packed = stream.peek(); + const execution = new python.Execution(); + // https://github.com/google/flax/blob/main/flax/serialization.py + const ext_hook = (code, data) => { + switch (code) { + case 1: { // _MsgpackExtType.ndarray + const tuple = execution.invoke('msgpack.unpackb', [ data ]); + const dtype = execution.invoke('numpy.dtype', [ tuple[1] ]); + dtype.byteorder = '<'; + return execution.invoke('numpy.ndarray', [ tuple[0], dtype, tuple[2] ]); + } + default: { + throw new flax.Error(`Unsupported MessagePack extension '${code}'.`); + } + } + }; + const obj = execution.invoke('msgpack.unpackb', [ packed, ext_hook ]); + return new flax.Model(obj); + } +}; + +flax.Model = class { + + constructor(obj) { + this.format = 'Flax'; + this.graphs = [ new flax.Graph(obj) ]; + } +}; + +flax.Graph = class { + + constructor(obj) { + this.inputs = []; + this.outputs = []; + const layers = new Map(); + const layer = (path) => { + const name = path.join('.'); + if (!layers.has(name)) { + layers.set(name, {}); + } + return layers.get(name); + }; + const flatten = (path, obj) => { + for (const [name, value] of Object.entries(obj)) { + if (flax.Utility.isTensor(value)) { + const obj = layer(path); + obj[name] = value; + } else if (Array.isArray(value)) { + const obj = layer(path); + obj[name] = value; + } else if (Object(value) === value) { + flatten(path.concat(name), value); + } else { + const obj = layer(path); + obj[name] = value; + } + } + }; + if (Array.isArray(obj)) { + layer([]).value = obj; + } else { + flatten([], obj); + } + this.nodes = Array.from(layers).map(([name, value]) => new flax.Node(name, value)); + } +}; + +flax.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +flax.Value = class { + + constructor(name, initializer) { + if (typeof name !== 'string') { + throw new flax.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : null; + this.initializer = initializer || null; + } +}; + +flax.Node = class { + + constructor(name, layer) { + this.name = name; + this.type = { name: 'Module' }; + this.attributes = []; + this.inputs = []; + this.outputs = []; + for (const [name, value] of Object.entries(layer)) { + if (flax.Utility.isTensor(value)) { + const tensor = new flax.Tensor(value); + const argument = new flax.Argument(name, [ new flax.Value('', tensor) ]); + this.inputs.push(argument); + } else if (Array.isArray(value)) { + const attribute = new flax.Attribute(name, value); + this.attributes.push(attribute); + } else { + const attribute = new flax.Attribute(name, value); + this.attributes.push(attribute); + } + } + } +}; + +flax.Attribute = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +flax.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +flax.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return (Array.isArray(this.dimensions) && this.dimensions.length > 0) ? + `[${this.dimensions.join(',')}]` : ''; + } +}; + +flax.Tensor = class { + + constructor(array) { + this.type = new flax.TensorType(array.dtype.__name__, new flax.TensorShape(array.shape)); + const dataType = this.type.dataType; + this.encoding = dataType === 'string' || dataType === 'object' ? '|' : array.dtype.byteorder; + this._data = array.tobytes(); + this._itemsize = array.dtype.itemsize; + } + + get values() { + switch (this.type.dataType) { + case 'string': { + if (this._data instanceof Uint8Array) { + const data = this._data; + const decoder = new TextDecoder('utf-8'); + const size = this.type.shape.dimensions.reduce((a, b) => a * b, 1); + this._data = new Array(size); + let offset = 0; + for (let i = 0; i < size; i++) { + const buffer = data.subarray(offset, offset + this._itemsize); + const index = buffer.indexOf(0); + this._data[i] = decoder.decode(index >= 0 ? buffer.subarray(0, index) : buffer); + offset += this._itemsize; + } + } + return this._data; + } + default: + return this._data; + } + } +}; + +flax.Utility = class { + + static isTensor(obj) { + return obj && obj.__class__ && obj.__class__.__module__ === 'numpy' && obj.__class__.__name__ === 'ndarray'; + } +}; + +flax.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Flax model.'; + } +}; + +export const ModelFactory = flax.ModelFactory; + + + diff --git a/flexbuffers.js b/flexbuffers.js new file mode 100644 index 00000000000..7fcdc5f8f51 --- /dev/null +++ b/flexbuffers.js @@ -0,0 +1,196 @@ + +const flexbuffers = {}; + +flexbuffers.BinaryReader = class { + + static open(buffer) { + const length = buffer.length; + if (length >= 3) { + const byteWidth = buffer[length - 1]; + if (byteWidth <= 8) { + const packedType = buffer[length - 2]; + return new flexbuffers.BinaryReader(buffer, length - 2 - byteWidth, byteWidth, 1 << (packedType & 3), packedType >> 2); + } + } + return null; + } + + constructor(buffer, offset, parentWidth, byteWidth, type) { + this._buffer = buffer; + this._length = buffer.length; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._utf8Decoder = new TextDecoder('utf-8'); + this._root = new flexbuffers.Reference(this, offset, parentWidth, byteWidth, type); + } + + read() { + return this._root.read(); + } + + get length() { + return this._length; + } + + int(offset, size) { + switch (size) { + case 1: return this._view.getInt8(offset); + case 2: return this._view.getInt16(offset, true); + case 4: return this._view.getInt32(offset, true); + case 8: return this._view.getInt64(offset, true); + default: throw new flexbuffers.Error(`Invalid int size '${size}'.`); + } + } + + uint(offset, size) { + switch (size) { + case 1: return this._view.getUint8(offset); + case 2: return this._view.getUint16(offset, true); + case 4: return this._view.getUint32(offset, true); + case 8: return this._view.getUint64(offset, true); + default: throw new flexbuffers.Error(`Invalid uint size '${size}'.`); + } + } + + float(offset, size) { + switch (size) { + case 4: return this._view.getFloat32(offset, true); + case 8: return this._view.getFloat64(offset, true); + default: throw new flexbuffers.Error(`Invalid float size '${size}'.`); + } + } + + string(offset, size) { + let end = size === undefined ? this._buffer.indexOf(0, offset) : offset + size; + end = end === -1 ? this._buffer.length : end; + const bytes = this._buffer.subarray(offset, end); + return this._utf8Decoder.decode(bytes); + } + + bytes(offset, size) { + return this._buffer.slice(offset, offset + size); + } +}; + +flexbuffers.Reference = class { + + constructor(reader, offset, parentWidth, byteWidth, type) { + this._reader = reader; + this._offset = offset; + this._parentWidth = parentWidth; + this._byteWidth = byteWidth; + this._type = type; + } + + read() { + switch (this._type) { + case 0x00: // null + return null; + case 0x01: // int + return this._reader.int(this._offset, this._parentWidth); + case 0x02: // uint + return this._reader.uint(this._offset, this._parentWidth); + case 0x03: // float + return this._reader.float(this._offset, this._parentWidth); + case 0x04: { // key + return this._reader.string(this._indirect()); + } + case 0x05: { // string + const offset = this._indirect(); + const size = this._reader.uint(offset - this._byteWidth, this._byteWidth); + return this._reader.string(offset, size); + } + case 0x06: // indirect int + return this._reader.int(this._indirect(), this._byteWidth); + case 0x07: // indirect uint + return this._reader.uint(this._indirect(), this._byteWidth); + case 0x08: // indirect float + return this._reader.float(this._indirect(), this._byteWidth); + case 0x09: { // map + const offset = this._indirect(); + const keysOffset = offset - (this._byteWidth * 3); + const keysVectorOffset = keysOffset - this._reader.uint(keysOffset, this._byteWidth); + const keysByteWidth = this._reader.uint(keysOffset + this._byteWidth, this._byteWidth); + const keys = this._typedVector(keysVectorOffset, keysByteWidth, 0x04); + const values = this._vector(offset, this._byteWidth); + const map = {}; + for (let i = 0; i < keys.length; i++) { + map[keys[i]] = values[i]; + } + return map; + } + case 0x0a: { // vector + return this._vector(this._indirect(), this._byteWidth); + } + case 0x0b: // vector int + case 0x0c: // vector uint + case 0x0d: // vector float + case 0x0e: // vector key + case 0x0f: // vector string deprecated + case 0x24: { // vector bool + return this._typedVector(this._indirect(), this._byteWidth, this._type - 0x0b + 0x01); + } + case 0x10: // vector int2 + case 0x11: // vector uint2 + case 0x12: // vector float2 + case 0x13: // vector int3 + case 0x14: // vector uint3 + case 0x15: // vector float3 + case 0x16: // vector int4 + case 0x17: // vector uint4 + case 0x18: { // vector float4 + const offset = this._indirect(); + const size = (((this._type - 0x10) / 3) >> 0) + 2; + const type = ((this._type - 0x10) % 3) + 0x01; + return this._typedVector(offset, this._byteWidth, type, size); + } + case 0x19: { // blob + const offset = this._indirect(); + const size = this._reader.uint(offset - this._byteWidth, this._byteWidth); + return this._reader.bytes(offset, size); + } + case 0x1a: { // bool + return this._reader.uint(this._offset, this._parentWidth) !== 0; + } + default: { + throw new flexbuffers.Error(`Unsupported reference type '${this._type}`); + } + } + } + + _indirect() { + return this._offset - this._reader.uint(this._offset, this._parentWidth); + } + + _vector(offset, byteWidth) { + const size = this._reader.uint(offset - byteWidth, byteWidth); + const packedTypeOffset = offset + (size * byteWidth); + const vector = new Array(size); + for (let i = 0; i < size; i++) { + const packedType = this._reader.uint(packedTypeOffset + i, 1); + const reference = new flexbuffers.Reference(this._reader, offset + (i * byteWidth), byteWidth, 1 << (packedType & 3), packedType >> 2); + vector[i] = reference.read(); + } + return vector; + } + + _typedVector(offset, byteWidth, type, size) { + size = size === undefined ? this._reader.uint(offset - byteWidth, byteWidth) : size; + const vector = new Array(size); + for (let i = 0; i < size; i++) { + const reference = new flexbuffers.Reference(this._reader, offset + (i * byteWidth), byteWidth, 1, type); + vector[i] = reference.read(); + } + return vector; + } +}; + +flexbuffers.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'FlexBuffers Error'; + this.message = message; + } +}; + +export const BinaryReader = flexbuffers.BinaryReader; diff --git a/flux-metadata.json b/flux-metadata.json new file mode 100644 index 00000000000..32960f8ced3 --- /dev/null +++ b/flux-metadata.json @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/flux.js b/flux.js new file mode 100644 index 00000000000..bc7a986dad7 --- /dev/null +++ b/flux.js @@ -0,0 +1,84 @@ + +// Experimental + +import * as json from './json.js'; + +const flux = {}; + +flux.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + if (stream && extension === 'bson') { + return 'flux.bson'; + } + return null; + } + + async open(context) { + let root = null; + try { + const stream = context.stream; + const reader = json.BinaryReader.open(stream); + root = reader.read(); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new flux.Error(`File format is not Flux BSON (${message.replace(/\.$/, '')}).`); + } + const metadata = context.metadata('flux-metadata.json'); + const backref = (obj, root) => { + if (Array.isArray(obj)) { + for (let i = 0; i < obj.length; i++) { + obj[i] = backref(obj[i], root); + } + } else if (obj === Object(obj)) { + if (obj.tag == 'backref' && obj.ref) { + if (!root._backrefs[obj.ref - 1]) { + throw new flux.Error(`Invalid backref '${obj.ref}'.`); + } + obj = root._backrefs[obj.ref - 1]; + } + for (const key of Object.keys(obj)) { + if (obj !== root || key !== '_backrefs') { + obj[key] = backref(obj[key], root); + } + } + } + return obj; + }; + const obj = backref(root, root); + const model = obj.model; + if (!model) { + throw new flux.Error('File does not contain Flux model.'); + } + return new flux.Model(metadata, model); + } +}; + +flux.Model = class { + + constructor(/* root */) { + this._format = 'Flux'; + this._graphs = []; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +flux.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Flux Error'; + } +}; + +export const ModelFactory = flux.ModelFactory; diff --git a/gguf.js b/gguf.js new file mode 100644 index 00000000000..e4146385ca2 --- /dev/null +++ b/gguf.js @@ -0,0 +1,392 @@ + +import * as base from './base.js'; + +const gguf = {}; + +gguf.ModelFactory = class { + + match(context) { + return gguf.Reader.open(context.stream); + } + + async open(context, target) { + target.read(); + return new gguf.Model(target); + } +}; + +gguf.Model = class { + + constructor(target) { + this.format = target.format; + this.metadata = new Map(); + const layers = new Map(); + for (const [name, tensor] of target.tensors) { + const [key, param] = name.match(/^(.*)\.(.*?)$/).slice(1); + if (!layers.has(key)) { + layers.set(key, { name: key, type: 'weights', metadata: new Map(), weights: new Map() }); + } + const layer = layers.get(key); + layer.weights.set(param, tensor); + } + const metadata = new Map(); + let architecture = '?'; + for (const [name, value] of target.metadata) { + switch (name) { + case 'general.name': this.name = value; break; + case 'general.architecture': architecture = value; break; + case 'general.description': this.description = value; break; + case 'general.author': this.metadata.set('author', value); break; + case 'general.license': this.metadata.set('license', value); break; + case 'general.file_type': + case 'general.quantization_version': + break; + default: + metadata.set(name, value); + break; + } + } + const tokenizer = { type: 'tokenizer', metadata: new Map(), layers: [] }; + const model = { type: architecture, metadata: new Map(), layers: Array.from(layers.values()) }; + for (const [name, value] of metadata) { + if (name.startsWith('tokenizer.')) { + const [, param] = name.match(/^(.*)\.(.*?)$/).slice(1); + tokenizer.metadata.set(param, value); + } else if (architecture && name.startsWith(`${architecture}.`)) { + model.metadata.set(name, value); + } else { + this.metadata.set(name, value); + } + } + const graph = { layers: [ model ] }; + if (tokenizer.metadata.size > 0) { + graph.layers.push(tokenizer); + } + this.graphs = [ new gguf.Graph(graph) ]; + } +}; + +gguf.Graph = class { + + constructor(graph) { + this.name = graph.type; + this.nodes = []; + this.inputs = []; + this.outputs = []; + for (const layer of graph.layers) { + const node = new gguf.Node(layer); + this.nodes.push(node); + } + } +}; + +gguf.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +gguf.Value = class { + + constructor(name, tensor) { + this.name = name; + this.type = tensor.type; + this.quantization = tensor.quantization; + this.initializer = tensor; + } +}; + +gguf.Node = class { + + constructor(layer) { + this.type = Array.isArray(layer.layers) && layer.layers.length > 0 ? new gguf.Graph(layer) : { name: layer.type }; + this.name = layer.name || ''; + this.inputs = []; + this.outputs = []; + this.attributes = []; + if (layer.weights) { + for (const [name, weight] of layer.weights) { + const tensor = new gguf.Tensor(weight); + const value = new gguf.Value(weight.name, tensor); + const argument = new gguf.Argument(name, [ value ]); + this.inputs.push(argument); + } + } + if (layer.metadata) { + for (const [name, value] of layer.metadata) { + const attribute = new gguf.Attribute(name, value); + this.attributes.push(attribute); + } + } + } +}; + +gguf.Attribute = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +gguf.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +gguf.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +gguf.Tensor = class { + + constructor(tensor) { + const shape = new gguf.TensorShape(tensor.ne); + this.type = new gguf.TensorType(tensor.dtype, shape); + if (tensor.type !== gguf.QuantizationType.F32 && tensor.type !== gguf.QuantizationType.F16) { + this.quantization = { + type: gguf.Utility.enum(gguf.QuantizationType, tensor.type).toLowerCase() + }; + } + if (tensor.dtype === 'float32' || tensor.dtype === 'float16' || + tensor.dtype === 'int8' || tensor.dtype === 'int16' || tensor.dtype === 'int32') { + this.encoding = '<'; + this._data = tensor.data; + } + } + + get values() { + if (this._data) { + return this._data.peek(); + } + return null; + } +}; + + +gguf.Reader = class { + + static open(stream) { + if (stream && stream.length > 4) { + const signature = String.fromCharCode.apply(null, stream.peek(4)); + if (signature === 'GGUF') { + return new gguf.Reader(stream); + } + } + return null; + } + + constructor(stream) { + this._stream = stream; + const QK_K = 256; + gguf.Reader.GGML_QUANT_SIZES = gguf.Reader.GGML_QUANT_SIZES || new Map([ + [ gguf.QuantizationType.F32, [ 1, 4, 'float32' ] ], + [ gguf.QuantizationType.F16, [ 1, 2, 'float16' ] ], + [ gguf.QuantizationType.Q4_0, [ 32, 2 + 16, '' ] ], + [ gguf.QuantizationType.Q4_1, [ 32, 2 + 2 + 16, '' ] ], + [ gguf.QuantizationType.Q5_0, [ 32, 2 + 4 + 16, '' ] ], + [ gguf.QuantizationType.Q5_1, [ 32, 2 + 2 + 4 + 16, '' ] ], + [ gguf.QuantizationType.Q8_0, [ 32, 2 + 32, ''] ], + [ gguf.QuantizationType.Q8_1, [ 32, 4 + 4 + 32, ''] ], + [ gguf.QuantizationType.Q2_K, [ 256, 2 + 2 + Math.floor(QK_K / 16) + Math.floor(QK_K / 4), '' ] ], + [ gguf.QuantizationType.Q3_K, [ 256, 2 + Math.floor(QK_K / 4) + Math.floor(QK_K / 8) + 12, '' ] ], + [ gguf.QuantizationType.Q4_K, [ 256, 2 + 2 + Math.floor(QK_K / 2) + 12, '' ] ], + [ gguf.QuantizationType.Q5_K, [ 256, 2 + 2 + Math.floor(QK_K / 2) + Math.floor(QK_K / 8) + 12, '' ] ], + [ gguf.QuantizationType.Q6_K, [ 256, 2 + Math.floor(QK_K / 2) + Math.floor(QK_K / 4) + Math.floor(QK_K / 16), '' ] ], + [ gguf.QuantizationType.Q8_K, [ 256, 4 + QK_K + Math.floor(QK_K / 8), '' ] ], + [ gguf.QuantizationType.I8, [ 1, 4, 'int8' ] ], + [ gguf.QuantizationType.I16, [ 1, 2, 'int16' ] ], + [ gguf.QuantizationType.I32, [ 1, 4, 'int32' ] ] + ]); + } + + read() { + const reader = new gguf.StreamReader(this._stream); + this.tensors = new Map(); + this.metadata = new Map(); + const context = {}; + context.header = {}; + context.header.magic = String.fromCharCode.apply(null, reader.read(4)); + context.header.version = reader.uint32(); + this.format = `GGUF v${context.header.version}`; + if (context.header.version >= 2) { + context.header.n_tensors = reader.uint64(); + context.header.n_kv = reader.uint64(); + for (let i = 0; i < context.header.n_kv; i++) { + const entry = reader.entry(); + this.metadata.set(entry.name, entry.value); + } + const tensors = context.header.n_tensors; + if (tensors > 0) { + for (let i = 0; i < tensors; i++) { + const tensor = reader.tensor(); + this.tensors.set(tensor.name, tensor); + } + context.alignment = this.metadata.get('general.alignment') || 32; + const offset_pad = reader.position % context.alignment; + if (offset_pad != 0) { + reader.skip(context.alignment - offset_pad); + } + context.offset = reader.position; + if (context.offset < this._stream.length) { + for (const tensor of this.tensors.values()) { + reader.seek(context.offset + tensor.offset); + if (!gguf.Reader.GGML_QUANT_SIZES.has(tensor.type)) { + throw new gguf.Error(`Unsupported tensor quantization type '${tensor.type}'.`); + } + const [block_size, type_size, dtype] = gguf.Reader.GGML_QUANT_SIZES.get(tensor.type); + const n_elems = tensor.ne.reduce((a, b) => a * b, 1); + const n_bytes = Math.floor(n_elems * type_size / block_size); + tensor.dtype = dtype || '?'; + tensor.data = reader.stream(n_bytes); + } + } + } + } + this._stream.seek(0); + delete this._stream; + } +}; + +gguf.StreamReader = class extends base.StreamReader { + + constructor(stream) { + super(stream); + } + + string() { + const size = this.uint64(); + const buffer = this.read(size); + return String.fromCharCode.apply(null, buffer); + } + + value(type) { + switch (type) { + case gguf.Type.UINT32: { + return this.uint32(); + } + case gguf.Type.INT32: { + return this.int32(); + } + case gguf.Type.FLOAT32: { + return this.float32(); + } + case gguf.Type.BOOL: { + return this.byte() !== 0; + } + case gguf.Type.STRING: { + return this.string(); + } + case gguf.Type.ARRAY: { + const type = this.uint32(); + const size = this.uint64(); + const value = new Array(size); + for (let i = 0; i < size; i++) { + value[i] = this.value(type); + } + return value; + } + default: { + throw new gguf.Error(`Unsupported GGUF type '${type}'.`); + } + } + } + + entry() { + const name = this.string(); + const type = this.uint32(); + const value = this.value(type); + return { name: name, value: value, type: type }; + } + + tensor() { + const tensor = {}; + tensor.name = this.string(); + const n_dims = this.uint32(); + tensor.ne = new Array(n_dims); + for (let i = 0; i < n_dims; i++) { + tensor.ne[i] = this.uint64(); + } + tensor.type = this.uint32(); + tensor.offset = this.uint64(); + return tensor; + } +}; + +gguf.Type = { + UINT8: 0, + INT8: 1, + UINT16: 2, + INT16: 3, + UINT32: 4, + INT32: 5, + FLOAT32: 6, + BOOL: 7, + STRING: 8, + ARRAY: 9, + UINT64: 10, + INT64: 11, + FLOAT64: 12, +}; + +gguf.QuantizationType = { + F32: 0, + F16: 1, + Q4_0: 2, + Q4_1: 3, + Q5_0: 6, + Q5_1: 7, + Q8_0: 8, + Q8_1: 9, + Q2_K: 10, + Q3_K: 11, + Q4_K: 12, + Q5_K: 13, + Q6_K: 14, + Q8_K: 15, + I8: 16, + I16: 17, + I32: 18, +}; + +gguf.Utility = class { + + static enum(type, value) { + gguf.Utility._enums = gguf.Utility._enums || new Map(); + if (!gguf.Utility._enums.has(type)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + gguf.Utility._enums.set(type, entries); + } + const entires = gguf.Utility._enums.get(type); + if (entires.has(value)) { + return entires.get(value); + } + return value; + } +}; + +gguf.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'GGML Error'; + } +}; + +export const ModelFactory = gguf.ModelFactory; diff --git a/grapher.css b/grapher.css new file mode 100644 index 00000000000..31a7536ef79 --- /dev/null +++ b/grapher.css @@ -0,0 +1,142 @@ + +.node path { stroke: #333; fill: none; stroke-width: 1px; } +.node line { stroke: #333; fill: none; stroke-width: 1px; } + +.node-item path { stroke-width: 0; stroke: #000; fill: #fff; } +.node-item text { font-family: -apple-system, BlinkMacSystemFont, "Segoe WPC", "Segoe UI", "Ubuntu", "Droid Sans", sans-serif, "PingFang SC"; font-size: 11px; text-rendering: geometricPrecision; user-select: none; } + +.node-item-function path { fill: #fff; } +.node-item-function text { fill: #000; } +.node-item-function:hover { cursor: pointer; } +.node-item-function:hover path { fill: #eee; } + +.node-item-type path { fill: #000; } +.node-item-type text { fill: #fff; } +.node-item-type:hover { cursor: pointer; } +.node-item-type:hover path { fill: #fff; } +.node-item-type:hover text { fill: #000; } + +.node-item-type-constant path { fill: #eee; } +.node-item-type-constant text { fill: #000; } +.node-item-type-constant:hover path { fill: #fff; } + +.node-item-type-control path { fill: #eee; } +.node-item-type-control text { fill: #000; } +.node-item-type-control:hover path { fill: #fff; } + +.node-item-type-layer path { fill: rgb(51, 85, 136); } +.node-item-type-wrapper path { fill: rgb(238, 238, 238); } +.node-item-type-wrapper text { fill: rgb(0, 0, 0) } +.node-item-type-activation path { fill: rgb(112, 41, 33); } +.node-item-type-pool path { fill: rgb(51, 85, 51); } +.node-item-type-normalization path { fill: rgb(51, 85, 68); } +.node-item-type-dropout path { fill: rgb(69, 71, 112); } +.node-item-type-shape path { fill: rgb(108, 79, 71); } +.node-item-type-tensor path { fill: rgb(89, 66, 59); } +.node-item-type-transform path { fill: rgb(51, 85, 68); } +.node-item-type-data path { fill: rgb(85, 85, 85); } +.node-item-type-quantization path { fill: rgb(80, 40, 0); } +.node-item-type-custom path { fill: rgb(128, 128, 128); } + +.node-item-input path { fill: #fff; } +.node-item-input:hover { cursor: pointer; } +.node-item-input:hover path { fill: #fff; } + +.node-item-constant path { fill: #eee; } +.node-item-constant:hover { cursor: pointer; } +.node-item-constant:hover path { fill: #fff; } + +.node-item-undefined path { fill: #f00; } +.node-item-undefined:hover { cursor: pointer; } +.node-item-undefined:hover path { fill: #fff; } + +.node-attribute-list > path { fill: #fff; stroke-width: 0; stroke: #000; } +.node-attribute-list:hover { cursor: pointer; } +.node-attribute-list:hover > path { fill: #f6f6f6; } +.node-attribute > text { font-family: -apple-system, BlinkMacSystemFont, "Segoe WPC", "Segoe UI", "Ubuntu", "Droid Sans", sans-serif, "PingFang SC"; font-size: 9px; font-weight: normal; text-rendering: geometricPrecision; user-select: none; } + +.graph-item-input path { fill: #eee; } +.graph-item-input:hover { cursor: pointer; } +.graph-item-input:hover path { fill: #fff; } + +.graph-item-output path { fill: #eee; } +.graph-item-output:hover { cursor: pointer; } +.graph-item-output:hover path { fill: #fff; } + +#arrowhead { fill: #000; } +#arrowhead-select { fill: #e00; } + +.edge-path { stroke: #000; stroke-width: 1px; fill: none; marker-end: url("#arrowhead"); } +.edge-path-hit-test { pointer-events: stroke; stroke-width: 0.5em; fill: none; stroke: #000; stroke-opacity: 0.001; } + +.select > .node.node-border { stroke: #e00; stroke-width: 2px; } +.select.edge-path { stroke: #e00; stroke-width: 1px; marker-end: url("#arrowhead-select"); } + +.edge-label { font-family: -apple-system, BlinkMacSystemFont, "Segoe WPC", "Segoe UI", "Ubuntu", "Droid Sans", sans-serif, "PingFang SC"; font-size: 10px; } +.edge-path-control-dependency { stroke-dasharray: 3, 2; } + +.cluster rect { stroke: #000; fill: #000; fill-opacity: 0.02; stroke-opacity: 0.06; stroke-width: 1px; } + +@keyframes pulse { from { stroke-dashoffset: 100px; } to { stroke-dashoffset: 0; } } + +@media (prefers-color-scheme: dark) { + + .edge-path { stroke: #888; } + + .node path { stroke: #1d1d1d; } + .node line { stroke: #1d1d1d; } + + .select > .node.node-border { stroke: #b00; } + .select.edge-path { stroke: #b00; } + + #arrowhead { fill: #888; } + #arrowhead-hover { fill: #b00; } + #arrowhead-select { fill: #b00 } + + .edge-label { fill: #b2b2b2; } + + .node-item-function path { fill: #404040; } + .node-item-function text { fill: #dfdfdfdf; } + .node-item-function:hover { cursor: pointer; } + .node-item-function:hover path { fill: #666666; } + + .node-item-type path { fill: #303030; } + .node-item-type text { fill: #dfdfdf; } + .node-item-type:hover { cursor: pointer; } + .node-item-type:hover path { fill: #808080; } + .node-item-type:hover text { fill: #dfdfdf; } + + .node-item path { stroke: #fff; } + .node-item text { fill: #dfdfdf; } + + .node-attribute > text { fill: #b2b2b2; } + .node-attribute-list > path { fill: #2d2d2d; } + .node-attribute-list:hover > path { fill: #666666; } + + .graph-item-input path { fill: #404040; } + .graph-item-input:hover { cursor: pointer; } + .graph-item-input:hover path { fill: #666666; } + + .graph-item-output path { fill: #404040; } + .graph-item-output:hover { cursor: pointer; } + .graph-item-output:hover path { fill: #666666; } + + .node-item-input path { fill: #404040; } + .node-item-input:hover path { fill: #666666; } + .node-item-constant path { fill: #4b4b4b; } + .node-item-constant:hover path { fill: #666666; } + + .node-item-type-layer path { fill: rgba(51, 85, 136, 0.7); } + .node-item-type-activation path { fill: rgba(75, 27, 22, 0.7); } + .node-item-type-activation path { fill: rgba(75, 27, 22, 0.7); } + .node-item-type-pool path { fill: rgba(51, 85, 51, 0.7); } + .node-item-type-pool path { fill: rgba(51, 85, 51, 0.7); } + .node-item-type-normalization path { fill: rgba(51, 85, 68, 0.7); } + .node-item-type-dropout path { fill: rgba(69, 71, 112, 0.7); } + .node-item-type-shape path { fill: rgba(108, 79, 71, 0.7); } + .node-item-type-tensor path { fill: rgba(89, 66, 59, 0.7); } + .node-item-type-transform path { fill: rgba(51, 85, 68, 0.7); } + .node-item-type-data path { fill: rgba(85, 85, 85, 0.7); } + .node-item-type-quantization path { fill: rgb(80, 40, 0, 0.7); } + .node-item-type-custom path { fill: rgb(64, 64, 64, 0.7); } +} diff --git a/grapher.js b/grapher.js new file mode 100644 index 00000000000..08247121ab5 --- /dev/null +++ b/grapher.js @@ -0,0 +1,884 @@ + +import * as dagre from './dagre.js'; + +const grapher = {}; + +grapher.Graph = class { + + constructor(compound, layout) { + this._layout = layout; + this._isCompound = compound; + this._nodes = new Map(); + this._edges = new Map(); + this._children = {}; + this._children['\x00'] = {}; + this._parent = {}; + } + + setNode(node) { + const key = node.name; + const value = this._nodes.get(key); + if (value) { + value.label = node; + } else { + this._nodes.set(key, { v: key, label: node }); + if (this._isCompound) { + this._parent[key] = '\x00'; + this._children[key] = {}; + this._children['\x00'][key] = true; + } + } + } + + setEdge(edge) { + if (!this._nodes.has(edge.v)) { + throw new grapher.Error(`Invalid edge '${JSON.stringify(edge.v)}'.`); + } + if (!this._nodes.has(edge.w)) { + throw new grapher.Error(`Invalid edge '${JSON.stringify(edge.w)}'.`); + } + const key = `${edge.v}:${edge.w}`; + if (!this._edges.has(key)) { + this._edges.set(key, { v: edge.v, w: edge.w, label: edge }); + } + } + + setParent(node, parent) { + if (!this._isCompound) { + throw new Error("Cannot set parent in a non-compound graph"); + } + parent += ""; + for (let ancestor = parent; ancestor; ancestor = this.parent(ancestor)) { + if (ancestor === node) { + throw new Error(`Setting ${parent} as parent of ${node} would create a cycle`); + } + } + delete this._children[this._parent[node]][node]; + this._parent[node] = parent; + this._children[parent][node] = true; + return this; + } + + get nodes() { + return this._nodes; + } + + hasNode(key) { + return this._nodes.has(key); + } + + node(key) { + return this._nodes.get(key); + } + + get edges() { + return this._edges; + } + + parent(key) { + if (this._isCompound) { + const parent = this._parent[key]; + if (parent !== '\x00') { + return parent; + } + } + return null; + } + + children(key) { + key = key === undefined ? '\x00' : key; + if (this._isCompound) { + const children = this._children[key]; + if (children) { + return Object.keys(children); + } + } else if (key === '\x00') { + return this.nodes.keys(); + } else if (this.hasNode(key)) { + return []; + } + return null; + } + + build(document, origin) { + const createGroup = (name) => { + const element = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + element.setAttribute('id', name); + element.setAttribute('class', name); + origin.appendChild(element); + return element; + }; + + const clusterGroup = createGroup('clusters'); + const edgePathGroup = createGroup('edge-paths'); + const edgeLabelGroup = createGroup('edge-labels'); + const nodeGroup = createGroup('nodes'); + + const edgePathGroupDefs = document.createElementNS('http://www.w3.org/2000/svg', 'defs'); + edgePathGroup.appendChild(edgePathGroupDefs); + const marker = (id) => { + const element = document.createElementNS('http://www.w3.org/2000/svg', 'marker'); + element.setAttribute('id', id); + element.setAttribute('viewBox', '0 0 10 10'); + element.setAttribute('refX', 9); + element.setAttribute('refY', 5); + element.setAttribute('markerUnits', 'strokeWidth'); + element.setAttribute('markerWidth', 8); + element.setAttribute('markerHeight', 6); + element.setAttribute('orient', 'auto'); + const markerPath = document.createElementNS('http://www.w3.org/2000/svg', 'path'); + markerPath.setAttribute('d', 'M 0 0 L 10 5 L 0 10 L 4 5 z'); + markerPath.style.setProperty('stroke-width', 1); + element.appendChild(markerPath); + return element; + }; + edgePathGroupDefs.appendChild(marker("arrowhead")); + edgePathGroupDefs.appendChild(marker("arrowhead-select")); + edgePathGroupDefs.appendChild(marker("arrowhead-hover")); + for (const nodeId of this.nodes.keys()) { + const entry = this.node(nodeId); + const node = entry.label; + if (this.children(nodeId).length == 0) { + node.build(document, nodeGroup); + } else { + // cluster + node.rectangle = document.createElementNS('http://www.w3.org/2000/svg', 'rect'); + if (node.rx) { + node.rectangle.setAttribute('rx', entry.rx); + } + if (node.ry) { + node.rectangle.setAttribute('ry', entry.ry); + } + node.element = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + node.element.setAttribute('class', 'cluster'); + node.element.appendChild(node.rectangle); + clusterGroup.appendChild(node.element); + } + } + + for (const edge of this.edges.values()) { + edge.label.build(document, edgePathGroup, edgeLabelGroup); + } + } + + measure() { + for (const key of this.nodes.keys()) { + const entry = this.node(key); + if (this.children(key).length == 0) { + const node = entry.label; + node.measure(); + } + } + } + + layout() { + dagre.layout(this, this._layout); + for (const key of this.nodes.keys()) { + const entry = this.node(key); + if (this.children(key).length == 0) { + const node = entry.label; + node.layout(); + } + } + } + + update() { + for (const nodeId of this.nodes.keys()) { + if (this.children(nodeId).length == 0) { + // node + const entry = this.node(nodeId); + const node = entry.label; + node.update(); + } else { + // cluster + const entry = this.node(nodeId); + const node = entry.label; + node.element.setAttribute('transform', `translate(${node.x},${node.y})`); + node.rectangle.setAttribute('x', - node.width / 2); + node.rectangle.setAttribute('y', - node.height / 2); + node.rectangle.setAttribute('width', node.width); + node.rectangle.setAttribute('height', node.height); + } + } + for (const edge of this.edges.values()) { + edge.label.update(); + } + } +}; + +grapher.Node = class { + + constructor() { + this._blocks = []; + } + + header() { + const block = new grapher.Node.Header(); + this._blocks.push(block); + return block; + } + + list() { + const block = new grapher.Node.List(); + this._blocks.push(block); + return block; + } + + canvas() { + const block = new grapher.Node.Canvas(); + this._blocks.push(block); + return block; + } + + build(document, parent) { + this.element = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + if (this.id) { + this.element.setAttribute('id', this.id); + } + this.element.setAttribute('class', this.class ? `node ${this.class}` : 'node'); + this.element.style.opacity = 0; + parent.appendChild(this.element); + this.border = document.createElementNS('http://www.w3.org/2000/svg', 'path'); + this.border.setAttribute('class', 'node node-border'); + for (let i = 0; i < this._blocks.length; i++) { + const block = this._blocks[i]; + block.first = i === 0; + block.last = i === this._blocks.length - 1; + block.build(document, this.element); + } + this.element.appendChild(this.border); + } + + measure() { + this.height = 0; + for (const block of this._blocks) { + block.measure(); + this.height = this.height + block.height; + } + this.width = Math.max(...this._blocks.map((block) => block.width)); + for (const block of this._blocks) { + block.width = this.width; + } + } + + layout() { + let y = 0; + for (const block of this._blocks) { + block.x = 0; + block.y = y; + block.width = this.width; + block.layout(); + y += block.height; + } + } + + update() { + for (const block of this._blocks) { + block.update(); + } + this.border.setAttribute('d', grapher.Node.roundedRect(0, 0, this.width, this.height, true, true, true, true)); + this.element.setAttribute('transform', `translate(${this.x - (this.width / 2)},${this.y - (this.height / 2)})`); + this.element.style.removeProperty('opacity'); + } + + select() { + if (this.element) { + this.element.classList.add('select'); + return [ this.element ]; + } + return []; + } + + deselect() { + if (this.element) { + this.element.classList.remove('select'); + } + } + + static roundedRect(x, y, width, height, r1, r2, r3, r4) { + const radius = 5; + r1 = r1 ? radius : 0; + r2 = r2 ? radius : 0; + r3 = r3 ? radius : 0; + r4 = r4 ? radius : 0; + return `M${x + r1},${y + }h${width - r1 - r2 + }a${r2},${r2} 0 0 1 ${r2},${r2 + }v${height - r2 - r3 + }a${r3},${r3} 0 0 1 ${-r3},${r3 + }h${r3 + r4 - width + }a${r4},${r4} 0 0 1 ${-r4},${-r4 + }v${-height + r4 + r1 + }a${r1},${r1} 0 0 1 ${r1},${-r1 + }z`; + } +}; + +grapher.Node.Header = class { + + constructor() { + this._entries = []; + } + + add(id, classList, content, tooltip, handler) { + const entry = new grapher.Node.Header.Entry(id, classList, content, tooltip, handler); + this._entries.push(entry); + return entry; + } + + build(document, parent) { + this._document = document; + for (const entry of this._entries) { + entry.build(document, parent); + } + if (!this.first) { + this.line = document.createElementNS('http://www.w3.org/2000/svg', 'line'); + parent.appendChild(this.line); + } + for (let i = 0; i < this._entries.length; i++) { + const entry = this._entries[i]; + if (i != 0) { + entry.line = document.createElementNS('http://www.w3.org/2000/svg', 'line'); + parent.appendChild(entry.line); + } + } + } + + measure() { + this.width = 0; + this.height = 0; + for (const entry of this._entries) { + entry.measure(); + this.height = Math.max(this.height, entry.height); + this.width += entry.width; + } + } + + layout() { + let x = this.width; + for (let i = this._entries.length - 1; i >= 0; i--) { + const entry = this._entries[i]; + if (i > 0) { + x -= entry.width; + entry.x = x; + } else { + entry.x = 0; + entry.width = x; + } + } + } + + update() { + for (let i = 0; i < this._entries.length; i++) { + const entry = this._entries[i]; + entry.element.setAttribute('transform', `translate(${entry.x},${this.y})`); + const r1 = i == 0 && this.first; + const r2 = i == this._entries.length - 1 && this.first; + const r3 = i == this._entries.length - 1 && this.last; + const r4 = i == 0 && this.last; + entry.path.setAttribute('d', grapher.Node.roundedRect(0, 0, entry.width, entry.height, r1, r2, r3, r4)); + entry.text.setAttribute('x', 6); + entry.text.setAttribute('y', entry.ty); + } + for (let i = 1; i < this._entries.length; i++) { + const entry = this._entries[i]; + const line = entry.line; + line.setAttribute('class', 'node'); + line.setAttribute('x1', entry.x); + line.setAttribute('x2', entry.x); + line.setAttribute('y1', this.y); + line.setAttribute('y2', this.y + this.height); + } + if (this.line) { + this.line.setAttribute('class', 'node'); + this.line.setAttribute('x1', 0); + this.line.setAttribute('x2', this.width); + this.line.setAttribute('y1', this.y); + this.line.setAttribute('y2', this.y); + } + } +}; + +grapher.Node.Header.Entry = class { + + constructor(id, classList, content, tooltip, handler) { + this.id = id; + this.classList = classList; + this.content = content; + this.tooltip = tooltip; + this.handler = handler; + this._events = {}; + } + + on(event, callback) { + this._events[event] = this._events[event] || []; + this._events[event].push(callback); + } + + emit(event, data) { + if (this._events && this._events[event]) { + for (const callback of this._events[event]) { + callback(this, data); + } + } + } + + build(document, parent) { + this.element = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + parent.appendChild(this.element); + this.path = document.createElementNS('http://www.w3.org/2000/svg', 'path'); + this.text = document.createElementNS('http://www.w3.org/2000/svg', 'text'); + this.element.appendChild(this.path); + this.element.appendChild(this.text); + const classList = [ 'node-item' ]; + if (this.classList) { + classList.push(...this.classList); + } + this.element.setAttribute('class', classList.join(' ')); + if (this.id) { + this.element.setAttribute('id', this.id); + } + if (this._events.click) { + this.element.addEventListener('click', (e) => { + e.stopPropagation(); + this.emit('click'); + }); + } + if (this.tooltip) { + const title = document.createElementNS('http://www.w3.org/2000/svg', 'title'); + title.textContent = this.tooltip; + this.element.appendChild(title); + } + this.text.textContent = this.content || '\u00A0'; + } + + measure() { + const yPadding = 4; + const xPadding = 7; + const boundingBox = this.text.getBBox(); + this.width = boundingBox.width + xPadding + xPadding; + this.height = boundingBox.height + yPadding + yPadding; + this.tx = xPadding; + this.ty = yPadding - boundingBox.y; + } + + layout() { + } +}; + +grapher.Node.List = class { + + constructor() { + this._items = []; + this._events = {}; + } + + add(name, value, tooltip, separator) { + const item = new grapher.Node.List.Item(name, value, tooltip, separator); + this._items.push(item); + return item; + } + + on(event, callback) { + this._events[event] = this._events[event] || []; + this._events[event].push(callback); + } + + emit(event, data) { + if (this._events && this._events[event]) { + for (const callback of this._events[event]) { + callback(this, data); + } + } + } + + build(document, parent) { + this._document = document; + this.element = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + this.element.setAttribute('class', 'node-attribute-list'); + if (this._events.click) { + this.element.addEventListener('click', (e) => { + e.stopPropagation(); + this.emit('click'); + }); + } + this.background = document.createElementNS('http://www.w3.org/2000/svg', 'path'); + this.element.appendChild(this.background); + parent.appendChild(this.element); + for (const item of this._items) { + const group = document.createElementNS('http://www.w3.org/2000/svg', 'g'); + group.setAttribute('class', 'node-attribute'); + const text = document.createElementNS('http://www.w3.org/2000/svg', 'text'); + text.setAttribute('xml:space', 'preserve'); + if (item.tooltip) { + const title = document.createElementNS('http://www.w3.org/2000/svg', 'title'); + title.textContent = item.tooltip; + text.appendChild(title); + } + const colon = item.type === 'node' || item.type === 'node[]'; + const name = document.createElementNS('http://www.w3.org/2000/svg', 'tspan'); + name.textContent = colon ? `${item.name}:` : item.name; + if (item.separator.trim() !== '=' && !colon) { + name.style.fontWeight = 'bold'; + } + text.appendChild(name); + group.appendChild(text); + this.element.appendChild(group); + item.group = group; + item.text = text; + if (item.type === 'node') { + const node = item.value; + node.build(document, item.group); + } else if (item.type === 'node[]') { + for (const node of item.value) { + node.build(document, item.group); + } + } else { + const tspan = document.createElementNS('http://www.w3.org/2000/svg', 'tspan'); + tspan.textContent = item.separator + item.value; + item.text.appendChild(tspan); + } + } + if (!this.first) { + this.line = document.createElementNS('http://www.w3.org/2000/svg', 'line'); + this.line.setAttribute('class', 'node'); + this.element.appendChild(this.line); + } + } + + measure() { + this.width = 75; + this.height = 3; + const yPadding = 1; + const xPadding = 6; + for (let i = 0; i < this._items.length; i++) { + const item = this._items[i]; + const size = item.text.getBBox(); + item.width = xPadding + size.width + xPadding; + item.height = yPadding + size.height + yPadding; + item.offset = size.y; + this.height += item.height; + if (item.type === 'node') { + const node = item.value; + node.measure(); + this.width = Math.max(150, this.width, node.width + (2 * xPadding)); + this.height += node.height + yPadding + yPadding + yPadding + yPadding; + if (i === this._items.length - 1) { + this.height += 3; + } + } else if (item.type === 'node[]') { + for (const node of item.value) { + node.measure(); + this.width = Math.max(150, this.width, node.width + (2 * xPadding)); + this.height += node.height + yPadding + yPadding + yPadding + yPadding; + } + if (i === this._items.length - 1) { + this.height += 3; + } + } + this.width = Math.max(this.width, item.width); + } + this.height += 3; + } + + layout() { + const yPadding = 1; + const xPadding = 6; + let y = 3; + for (const item of this._items) { + item.x = this.x + xPadding; + item.y = y + yPadding - item.offset; + y += item.height; + if (item.type === 'node') { + const node = item.value; + node.width = this.width - xPadding - xPadding; + node.layout(); + node.x = this.x + xPadding + (node.width / 2); + node.y = y + (node.height / 2) + yPadding + yPadding; + y += node.height + yPadding + yPadding + yPadding + yPadding; + } else if (item.type === 'node[]') { + for (const node of item.value) { + node.width = this.width - xPadding - xPadding; + node.layout(); + node.x = this.x + xPadding + (node.width / 2); + node.y = y + (node.height / 2) + yPadding + yPadding; + y += node.height + yPadding + yPadding + yPadding + yPadding; + } + } + } + } + + update() { + this.element.setAttribute('transform', `translate(${this.x},${this.y})`); + this.background.setAttribute('d', grapher.Node.roundedRect(0, 0, this.width, this.height, this.first, this.first, this.last, this.last)); + for (const item of this._items) { + const text = item.text; + text.setAttribute('x', item.x); + text.setAttribute('y', item.y); + if (item.type === 'node') { + const node = item.value; + node.update(); + } else if (item.type === 'node[]') { + for (const node of item.value) { + node.update(); + } + } + } + if (this.line) { + this.line.setAttribute('x1', 0); + this.line.setAttribute('x2', this.width); + this.line.setAttribute('y1', 0); + this.line.setAttribute('y2', 0); + } + for (const item of this._items) { + if (item.value instanceof grapher.Node) { + const node = item.value; + node.update(); + } + } + } +}; + +grapher.Node.List.Item = class { + + constructor(name, value, tooltip, separator) { + this.name = name; + this.value = value; + this.tooltip = tooltip; + this.separator = separator; + if (value instanceof grapher.Node) { + this.type = 'node'; + } else if (Array.isArray(value) && value.every((value) => value instanceof grapher.Node)) { + this.type = 'node[]'; + } + } +}; + +grapher.Node.Canvas = class { + + constructor() { + this.width = 0; + this.height = 80; + } + + build(/* document, parent */) { + } + + update(/* parent, top, width , first, last */) { + } +}; + +grapher.Edge = class { + + constructor(from, to) { + this.from = from; + this.to = to; + } + + build(document, edgePathGroupElement, edgeLabelGroupElement) { + const createElement = (name) => { + return document.createElementNS('http://www.w3.org/2000/svg', name); + }; + this.element = createElement('path'); + if (this.id) { + this.element.setAttribute('id', this.id); + } + this.element.setAttribute('class', this.class ? `edge-path ${this.class}` : 'edge-path'); + edgePathGroupElement.appendChild(this.element); + this.hitTest = createElement('path'); + this.hitTest.setAttribute('class', 'edge-path-hit-test'); + this.hitTest.addEventListener('pointerover', () => this.emit('pointerover')); + this.hitTest.addEventListener('pointerleave', () => this.emit('pointerleave')); + this.hitTest.addEventListener('click', () => this.emit('click')); + edgePathGroupElement.appendChild(this.hitTest); + if (this.label) { + const tspan = createElement('tspan'); + tspan.setAttribute('xml:space', 'preserve'); + tspan.setAttribute('dy', '1em'); + tspan.setAttribute('x', '1'); + tspan.appendChild(document.createTextNode(this.label)); + this.labelElement = createElement('text'); + this.labelElement.appendChild(tspan); + this.labelElement.style.opacity = 0; + this.labelElement.setAttribute('class', 'edge-label'); + if (this.id) { + this.labelElement.setAttribute('id', `edge-label-${this.id}`); + } + edgeLabelGroupElement.appendChild(this.labelElement); + const edgeBox = this.labelElement.getBBox(); + this.width = edgeBox.width; + this.height = edgeBox.height; + } + } + + update() { + const intersectRect = (node, point) => { + const x = node.x; + const y = node.y; + const dx = point.x - x; + const dy = point.y - y; + let h = node.height / 2; + let w = node.width / 2; + if (Math.abs(dy) * w > Math.abs(dx) * h) { + if (dy < 0) { + h = -h; + } + return { x: x + (dy === 0 ? 0 : h * dx / dy), y: y + h }; + } + if (dx < 0) { + w = -w; + } + return { x: x + w, y: y + (dx === 0 ? 0 : w * dy / dx) }; + }; + const curvePath = (edge, tail, head) => { + const points = edge.points.slice(1, edge.points.length - 1); + points.unshift(intersectRect(tail, points[0])); + points.push(intersectRect(head, points[points.length - 1])); + return new grapher.Edge.Curve(points).path.data; + }; + const edgePath = curvePath(this, this.from, this.to); + this.element.setAttribute('d', edgePath); + this.hitTest.setAttribute('d', edgePath); + if (this.labelElement) { + this.labelElement.setAttribute('transform', `translate(${this.x - (this.width / 2)},${this.y - (this.height / 2)})`); + this.labelElement.style.opacity = 1; + } + } + + select() { + if (this.element) { + if (!this.element.classList.contains('select')) { + const path = this.element; + path.classList.add('select'); + this.element = path.cloneNode(true); + path.parentNode.replaceChild(this.element, path); + } + return [ this.element ]; + } + return []; + } + + deselect() { + if (this.element && this.element.classList.contains('select')) { + const path = this.element; + path.classList.remove('select'); + this.element = path.cloneNode(true); + path.parentNode.replaceChild(this.element, path); + } + } +}; + +grapher.Edge.Curve = class { + + constructor(points) { + this._path = new grapher.Edge.Path(); + this._x0 = NaN; + this._x1 = NaN; + this._y0 = NaN; + this._y1 = NaN; + this._state = 0; + for (let i = 0; i < points.length; i++) { + const point = points[i]; + this.point(point.x, point.y); + if (i === points.length - 1) { + switch (this._state) { + case 3: + this.curve(this._x1, this._y1); + this._path.lineTo(this._x1, this._y1); + break; + case 2: + this._path.lineTo(this._x1, this._y1); + break; + default: + break; + } + if (this._line || (this._line !== 0 && this._point === 1)) { + this._path.closePath(); + } + this._line = 1 - this._line; + } + } + } + + get path() { + return this._path; + } + + point(x, y) { + x = +x; + y = +y; + switch (this._state) { + case 0: + this._state = 1; + if (this._line) { + this._path.lineTo(x, y); + } else { + this._path.moveTo(x, y); + } + break; + case 1: + this._state = 2; + break; + case 2: + this._state = 3; + this._path.lineTo((5 * this._x0 + this._x1) / 6, (5 * this._y0 + this._y1) / 6); + this.curve(x, y); + break; + default: + this.curve(x, y); + break; + } + this._x0 = this._x1; + this._x1 = x; + this._y0 = this._y1; + this._y1 = y; + } + + curve(x, y) { + this._path.bezierCurveTo( + (2 * this._x0 + this._x1) / 3, + (2 * this._y0 + this._y1) / 3, + (this._x0 + 2 * this._x1) / 3, + (this._y0 + 2 * this._y1) / 3, + (this._x0 + 4 * this._x1 + x) / 6, + (this._y0 + 4 * this._y1 + y) / 6 + ); + } +}; + +grapher.Edge.Path = class { + + constructor() { + this._x0 = null; + this._y0 = null; + this._x1 = null; + this._y1 = null; + this._data = ''; + } + + moveTo(x, y) { + this._data += `M${this._x0 = this._x1 = +x},${this._y0 = this._y1 = +y}`; + } + + lineTo(x, y) { + this._data += `L${this._x1 = +x},${this._y1 = +y}`; + } + + bezierCurveTo(x1, y1, x2, y2, x, y) { + this._data += `C${+x1},${+y1},${+x2},${+y2},${this._x1 = +x},${this._y1 = +y}`; + } + + closePath() { + if (this._x1 !== null) { + this._x1 = this._x0; + this._y1 = this._y0; + this._data += "Z"; + } + } + + get data() { + return this._data; + } +}; + +export const { Graph, Node, Edge } = grapher; diff --git a/hailo-metadata.json b/hailo-metadata.json new file mode 100644 index 00000000000..ade44731c9f --- /dev/null +++ b/hailo-metadata.json @@ -0,0 +1,1025 @@ +[ + { + "name": "input_layer", + "description": "Represents an input of the model", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + } + ] + }, + { + "name": "output_layer", + "description": "Represents an output of the model", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "postprocess", + "description": "Represents a whole post-processing function of some meta-architecture", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "max_proposals_per_class", + "type": "int64", + "description": "Maximum number of proposals per class", + "visible": false + }, + { + "name": "iou_th", + "type": "float32", + "visible": false, + "description": "Intersection over union overlap threshold, used in the NMS iterative elimination process where potential duplicates of detected items are ignored" + }, + { + "name": "meta_arch", + "type": "string", + "visible": false, + "description": "Postprocessing meta-architecture name" + }, + { + "name": "max_total_output_proposals", + "type": "int64", + "visible": false, + "description": "Maximum number of bounding box proposals" + }, + { + "name": "postprocess_type", + "type": "string", + "visible": false, + "description": "Postprocessing type name" + } + ] + }, + { + "name": "conv", + "category": "Layer", + "description": "Convolution layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "strides", + "type": "int64[]", + "description": "Stride along each axis (batch, height, width, features)" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "Dilation value along each axis (batch, height, width, features)" + }, + { + "name": "padding", + "type": "string", + "description": "Padding mode, either VALID, SAME (symmetric, Caffe-like), SAME_TENSORFLOW, or DECONV" + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into" + }, + { + "name": "batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer" + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + }, + { + "name": "pre_layer_batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer, before the operation itself", + "visible": false + }, + { + "name": "transpose_output_width_features", + "type": "boolean", + "description": "Whether to transpose the width and the features axes of the layer's output tensor", + "visible": false + }, + { + "name": "spatial_flatten_output", + "type": "boolean", + "description": "Whether to flatten the layer's output to one row", + "visible": false + } + ] + }, + { + "name": "relu", + "category": "Activation" + }, + { + "name": "activation", + "category": "Activation", + "description": "Activation function", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer", + "visible": false + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "argmax", + "description": "Argmax layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "avgpool", + "category": "Pool", + "description": "Average pooling layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "strides", + "type": "int64[]", + "description": "Stride along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "padding", + "type": "string", + "description": "Padding mode, either VALID, SAME (symmetric, Caffe-like), SAME_TENSORFLOW, or DECONV", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "batch_norm", + "category": "Normalization", + "description": "Batch normalization layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "bbox_decoder", + "description": "Bounding box decoding layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + } + ] + }, + { + "name": "deconv", + "category": "Layer", + "description": "Deconvolution (transposed convolution) layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "strides", + "type": "int64[]", + "description": "Stride along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "dilations", + "type": "int64[]", + "description": "Dilation value along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "padding", + "type": "string", + "description": "Padding mode, either VALID, SAME (symmetric, Caffe-like), SAME_TENSORFLOW, or DECONV", + "visible": false + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into", + "visible": false + }, + { + "name": "batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer", + "visible": false + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "dense", + "category": "Layer", + "description": "Dense (fully connected) layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer", + "visible": false + } + ] + }, + { + "name": "depth_to_space", + "description": "Depth to space layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "block_sizes", + "type": "int64[]", + "description": "Block size along each spatial axis", + "visible": false + }, + { + "name": "depth_to_space_type", + "type": "string", + "description": "Depth to space variant, either dcr (depth-column-row) or crd (column-row-depth)", + "visible": false + } + ] + }, + { + "name": "dw", + "description": "Depthwise convolution layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "strides", + "type": "int64[]", + "description": "Stride along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "dilations", + "type": "int64[]", + "description": "Dilation value along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "padding", + "type": "string", + "description": "Padding mode, either VALID, SAME (symmetric, Caffe-like), SAME_TENSORFLOW, or DECONV", + "visible": false + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into", + "visible": false + }, + { + "name": "batch_norm", + "type": "boolean", + "description": "Whether batch normalization is folded into the layer", + "visible": false + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + }, + { + "name": "transpose_output_width_features", + "type": "string", + "description": "Whether to transpose the width and the features axes of the layer's output tensor", + "visible": false + }, + { + "name": "dynamic_weights", + "type": "boolean", + "description": "Whether the layer's weights are data driven", + "visible": false + } + ] + }, + { + "name": "external_pad", + "description": "Padding layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "external_pad_params", + "type": "int64[]", + "description": "Padding value in pixels in each edge (top, bottom, left, right)", + "visible": false + } + ] + }, + { + "name": "feature_interleave", + "description": "Feature interleave layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "feature_multiplier", + "description": "Elementwise feature multiplication layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "feature_multiplier_type", + "type": "string", + "description": "Feature multiplier variant, either square (to multiply each value by itself), or user_specified", + "visible": false + } + ] + }, + { + "name": "feature_shuffle", + "description": "Feature shuffle layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "format_conversion", + "description": "Reshapes the input tensor between different memory layouts", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "expand_spatial_sizes", + "type": "int64[]", + "description": "New output tensor dimensions after the reshape (height, width)", + "visible": false + }, + { + "name": "conversion_type", + "type": "string", + "visible": false, + "description": "Format conversion variant" + } + ] + }, + { + "name": "global_avg_pool", + "category": "Pool", + "description": "Global average pooling layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "maxpool", + "category": "Pool", + "description": "Maximum pooling layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "strides", + "type": "int64[]", + "description": "Stride along each axis (batch, height, width, features)", + "visible": false + }, + { + "name": "padding", + "type": "string", + "description": "Padding mode, either VALID, SAME (symmetric, Caffe-like), SAME_TENSORFLOW, or DECONV", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "nms", + "description": "Non-maximum suppression layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "scores_threshold", + "type": "float32", + "description": "Confidence threshold for NMS filtering", + "visible": false + }, + { + "name": "iou_threshold", + "type": "float32", + "description": "Intersection over union overlap threshold, used in the NMS iterative elimination process where potential duplicates of detected items are ignored", + "visible": false + }, + { + "name": "classes", + "type": "int64", + "description": "Number of NMS classes", + "visible": false + }, + { + "name": "max_output_size", + "type": "int64", + "description": "Maximum number of proposals per class", + "visible": false + } + ] + }, + { + "name": "normalization", + "category": "Normalization", + "description": "Normalization layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "elementwise_add", + "type": "boolean", + "description": "Whether elementwise addition is folded into the layer", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "proposal_generator", + "description": "Proposal generator layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "reduce_l2", + "description": "Reduce layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "reduce_max", + "description": "Reduce Max layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into", + "visible": false + } + ] + }, + { + "name": "reduce_sum", + "description": "Reduce Sum layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + }, + { + "name": "reduce_axes", + "type": "int64[]", + "description": "List of axes to reduce", + "visible": false + } + ] + }, + { + "name": "resize", + "category": "Tensor", + "description": "Resize layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "resize_h_ratio_list", + "type": "float32[]", + "visible": true + }, + { + "name": "resize_w_ratio_list", + "type": "float32[]", + "visible": true + }, + { + "name": "resize_f_ratio_list", + "type": "float32[]", + "visible": true + }, + { + "name": "method", + "type": "string", + "description": "Resize method, either bilinear or nearest_neighbor", + "visible": false + }, + { + "name": "resize_bilinear_pixels_mode", + "type": "string", + "description": "Bilinear resize variant, either half_pixels, align_corners, or disabled (where both align_corners and half_pixels are false)", + "visible": false + } + ] + }, + { + "name": "shortcut", + "description": "Shortcut layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "slice", + "description": "Slice layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "height_slice", + "type": "int64[]", + "visible": false, + "description": "Slice in the height axis (start, stop, step)" + }, + { + "name": "width_slice", + "type": "int64[]", + "visible": false, + "description": "Slice in the width axis (start, stop, step)" + }, + { + "name": "features_slice", + "type": "int64[]", + "visible": false, + "description": "Slice in the features axis (start, stop, step)" + } + ] + }, + { + "name": "softmax", + "category": "Activation", + "description": "Softmax layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "groups", + "type": "int64", + "description": "Number of groups input channels and output channels are divided into", + "visible": false + } + ] + }, + { + "name": "space_to_depth", + "description": "Space to depth layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "block_sizes", + "type": "int64[]", + "description": "Block size along each spatial axis", + "visible": false + }, + { + "name": "space_to_depth_type", + "type": "string", + "description": "Space to depth variant, either classic_dcr (depth-column-row) classic_crd (column-row-depth), serial (used by Transformers patchify function), or focus (Yolov5-like)", + "visible": false + }, + { + "name": "spatial_flatten_output", + "type": "boolean", + "description": "Whether to flatten the layer's output to one row", + "visible": false + } + ] + }, + { + "name": "output_mux", + "description": "Output muxer layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "concat", + "category": "Tensor", + "description": "Concatenation layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "concat_axis", + "type": "int64", + "description": "Axis to concatenate along, either features or spatial_w (which means the width axis)", + "visible": false + }, + { + "name": "spatial_w_concat", + "type": "boolean", + "description": "Whether the concat operation is in the width dimension", + "visible": false + } + ] + }, + { + "name": "matmul", + "description": "Matrix multiplication layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "kernel_shape", + "type": "int64[]", + "label": "kernel", + "description": "Shape of the kernel in Tensorflow convention (kernel height, kernel width, features in, features out)", + "visible": true + }, + { + "name": "dynamic_weights", + "type": "boolean", + "description": "Whether the layer's weights are data driven", + "visible": false + }, + { + "name": "transpose_matmul_input", + "type": "boolean", + "description": "Whether to transpose the width and the features axes of the layer's second input tensor", + "visible": false + } + ] + }, + { + "name": "ew_add", + "description": "Elementwise addition layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "ew_div", + "description": "Elementwise division layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "ew_mult", + "description": "Elementwise multiplication layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "ew_sub", + "description": "Elementwise subtraction layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }, + { + "name": "activation", + "type": "string", + "description": "Activation function name", + "visible": false + } + ] + }, + { + "name": "demux", + "description": "Demuxer layer", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "row_splitter", + "description": "Splits the input tensor along the height axis", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "feature_splitter", + "description": "Splits the input tensor along the features axis", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + }, + { + "name": "const_input", + "description": "Constant input", + "attributes": [ + { + "name": "original_names", + "type": "string[]", + "description": "Name of this layer in the original framework, such as Pytorch or Tensorflow", + "visible": false + }] + } +] + diff --git a/hailo.js b/hailo.js new file mode 100644 index 00000000000..7e2f67627fe --- /dev/null +++ b/hailo.js @@ -0,0 +1,328 @@ +// Experimental + +const hailo = {}; + +hailo.ModelFactory = class { + + match(context) { + return hailo.Container.open(context); + } + + async open(context, target) { + const metadata = await context.metadata('hailo-metadata.json'); + await target.read(); + return new hailo.Model(metadata, target); + } +}; + +hailo.Model = class { + + constructor(metadata, container) { + const configuration = container.configuration; + this.graphs = [ new hailo.Graph(metadata, configuration, container.weights) ]; + this.name = configuration && configuration.name || ""; + this.format = container.format + (container.metadata && container.metadata.sdk_version ? ` v${container.metadata.sdk_version}` : ''); + this.metadata = new Map(); + if (container.metadata && container.metadata.state) { + this.metadata.set('state', container.metadata.state); + } + } +}; + +hailo.Graph = class { + + constructor(metadata, configuration, weights) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = new Map(); + values.map = (name, type, tensor) => { + if (name.length === 0 && tensor) { + return new hailo.Value(name, type || null, tensor); + } + if (!values.has(name)) { + values.set(name, new hailo.Value(name, type || null, tensor || null)); + } else if (tensor) { + throw new hailo.Error(`Duplicate value '${name}'.`); + } else if (type && !type.equals(values.get(name).type)) { + throw new hailo.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const layers = Object.entries(configuration.layers || {}).map(([ name, value ]) => { + value.name = name; + return value; + }); + for (const layer of layers) { + switch (layer.type) { + case 'input_layer': + case 'const_input': { + const shape = Array.isArray(layer.output_shapes) && layer.output_shapes.length > 0 ? layer.output_shapes[0] : null; + const type = shape ? new hailo.TensorType('?', new hailo.TensorShape(shape)) : null; + const argument = new hailo.Argument('input', [ values.map(layer.name, type) ]); + this.inputs.push(argument); + break; + } + case 'output_layer': { + for (let i = 0; i < layer.input.length; i++) { + const shape = Array.isArray(layer.input_shapes) && layer.input_shapes.length > 0 ? layer.input_shapes[i] : null; + const type = shape ? new hailo.TensorType('?', new hailo.TensorShape(shape)) : null; + const argument = new hailo.Argument('output', [ values.map(layer.input[i], type) ]); + this.outputs.push(argument); + } + break; + } + default: { + const node = new hailo.Node(metadata, layer, values, weights.get(layer.name)); + this.nodes.push(node); + break; + } + } + } + } +}; + +hailo.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +hailo.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new hailo.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : type; + this.initializer = initializer; + } +}; + +hailo.Node = class { + + constructor(metadata, layer, values, weights) { + weights = weights || new Map(); + this.name = layer.name || ''; + this.type = metadata.type(layer.type); + if (layer.type === 'activation') { + this.type = Object.assign({}, this.type, { name: layer.params.activation || layer.name || '' }); + } + this.inputs = layer.input.map((name, index) => { + const shape = layer.input_shapes ? layer.input_shapes[index] : null; + const type = shape ? new hailo.TensorType('?', new hailo.TensorShape(shape)) : null; + return new hailo.Argument("input", [ values.map(name, type) ]); + }); + const layer_params = layer.params ? Object.entries(layer.params) : []; + const params_list = layer_params.reduce((acc, [ name, value ]) => { + const schema = metadata.attribute(layer.type, name) || {}; + if (schema.visible) { + const label = schema.label ? schema.label : name; + if (!weights.has(label)) { + const array = weights.get(label); + const tensor = new hailo.Tensor(array, value); + acc.push(new hailo.Argument(label, [ values.map('', tensor.type, tensor) ])); + } + } + return acc; + }, []); + const params_from_npz = Array.from(weights).filter(([, value]) => value).map(([ name, value ]) => { + const tensor = new hailo.Tensor(value); + return new hailo.Argument(name, [ values.map('', tensor.type, tensor) ]); + }); + this.inputs = this.inputs.concat(params_list).concat(params_from_npz); + this.outputs = (layer.output || []).map((_, index) => { + const shape = layer.output_shapes ? layer.output_shapes[index] : null; + const type = shape ? new hailo.TensorType('?', new hailo.TensorShape(shape)) : null; + return new hailo.Argument("output", [ values.map(layer.name, type) ]); + }); + const attrs = Object.assign(layer.params || {}, { original_names: layer.original_names || [] }); + this.attributes = Object.entries(attrs).map(([name, value]) => new hailo.Attribute(metadata.attribute(layer.type, name), name, value)); + this.chain = []; + if (layer && layer.params && layer.params.activation && layer.params.activation !== 'linear' && layer.type !== 'activation') { + const activation = { + type: layer.params.activation, + name: layer.params.activation, + input: [], + output: [] + }; + const node = new hailo.Node(metadata, activation, values.map); + this.chain.push(node); + } + } +}; + +hailo.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + this.type = metadata && metadata.type ? metadata.type : ''; + if (metadata && metadata.visible === false) { + this.visible = false; + } + if (name === 'original_names') { + this.visible = false; + } + } +}; + +hailo.Tensor = class { + + constructor(array, shape) { + const dataType = array && array.dtype ? array.dtype.__name__ : '?'; + shape = array && array.shape ? array.shape : shape; + this.type = new hailo.TensorType(dataType, new hailo.TensorShape(shape)); + if (array) { + this.stride = array.strides.map((stride) => stride / array.itemsize); + this.layout = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.tolist() : array.tobytes(); + } + } +}; + +hailo.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +hailo.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + equals(obj) { + if (obj && Array.isArray(obj.dimensions) && Array.isArray(this.dimensions)) { + if (this.dimensions.length === obj.dimensions.length) { + return obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + const a = this.dimensions.filter((value, index) => index === 0 || index === this.dimensions.length - 1 || value !== 1); + const b = obj.dimensions.filter((value, index) => index === 0 || index === obj.dimensions.length - 1 || value !== 1); + if (a.length === b.length) { + return a.every((value, index) => b[index] === value); + } + } + return false; + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +hailo.Container = class { + + static open(context) { + const identifier = context.identifier; + const basename = identifier.split('.'); + basename.pop(); + if (identifier.toLowerCase().endsWith('.hn')) { + if (basename.length > 1 && (basename[basename.length - 1] === 'native' || basename[basename.length - 1] === 'fp')) { + basename.pop(); + } + const configuration = context.peek('json'); + if (configuration && configuration.name && configuration.net_params && configuration.layers) { + return new hailo.Container(context, basename.join('.'), configuration, null); + } + } else if (identifier.toLowerCase().endsWith('.metadata.json')) { + basename.pop(); + const metadata = context.peek('json'); + if (metadata && metadata.state && metadata.hn) { + return new hailo.Container(context, basename.join('.'), null, metadata); + } + } + return null; + } + + constructor(context, basename, configuration, metadata) { + this._context = context; + this._basename = basename; + this.configuration = configuration; + this.metadata = metadata; + } + + async _request(name, type) { + try { + const content = await this._context.fetch(name); + if (content) { + return content.read(type); + } + } catch (error) { + // continue regardless of error + } + return null; + } + + async read() { + this.format = 'Hailo NN'; + this.weights = new Map(); + if (!this.metadata) { + this.metadata = await this._request(`${this._basename}.metadata.json`, 'json'); + } + if (this.metadata) { + this.format = 'Hailo Archive'; + this.configuration = await this._request(this.metadata.hn, 'json'); + if (!this.configuration) { + throw new hailo.Error("Archive does not contain '.nn' configuration."); + } + let extension = undefined; + switch (this.metadata.state) { + case 'fp_optimized_model': extension = '.fpo.npz'; break; + case 'quantized_model': extension = '.q.npz'; break; + case 'compiled_model': extension = '.q.npz'; break; + default: extension = '.npz'; break; + } + const entries = await this._request(this._basename + extension, 'npz'); + if (entries && entries.size > 0) { + const inputs = new Set([ + 'kernel', 'bias', + 'input_activation_bits', 'output_activation_bits', 'weight_bits', 'bias_decomposition' + ]); + for (const [name, value] of entries) { + const key = name.split('.').slice(0, -1).join('.'); + const match = key.match(/.*?(?=:[0-9])/); + if (match) { + const path = match[0].split('/'); + if (inputs.has(path[2])) { + const layer = `${path[0]}/${path[1]}`; + if (!this.weights.has(layer)) { + this.weights.set(layer, new Map()); + } + const weights = this.weights.get(layer); + weights.set(path[2], value); + } + } + } + } + } + } +}; + +hailo.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Hailo model.'; + } +}; + +export const ModelFactory = hailo.ModelFactory; + diff --git a/hdf5.js b/hdf5.js new file mode 100755 index 00000000000..d7e63aa3f7d --- /dev/null +++ b/hdf5.js @@ -0,0 +1,1634 @@ + +// Experimental HDF5 reader + +import * as zip from './zip.js'; + +const hdf5 = {}; + +hdf5.File = class { + + static open(data) { + if (data && data.length >= 8) { + const buffer = data instanceof Uint8Array ? data : data.peek(8); + const signature = [ 0x89, 0x48, 0x44, 0x46, 0x0D, 0x0A, 0x1A, 0x0A ]; // \x89HDF\r\n\x1A\n + if (signature.every((value, index) => value === buffer[index])) { + return new hdf5.File(data); + } + } + return null; + } + + constructor(data) { + this.data = data; + } + + read() { + if (this.data) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html + const data = this.data; + delete this.data; + const reader = data instanceof Uint8Array ? new hdf5.BinaryReader(data) : (data.length < 0x10000000 ? new hdf5.BinaryReader(data.peek()) : new hdf5.StreamReader(data)); + reader.skip(8); + this._globalHeap = new hdf5.GlobalHeap(reader); + const version = reader.byte(); + switch (version) { + case 0: + case 1: { + this._freeSpaceStorageVersion = reader.byte(); + this._rootGroupEntryVersion = reader.byte(); + reader.skip(1); + this._sharedHeaderMessageVersionFormat = reader.byte(); + reader.initialize(); + reader.skip(1); + this._groupLeafNodeK = reader.uint16(); // 0x04? + this._groupInternalNodeK = reader.uint16(); // 0x10? + reader.skip(4); + if (version > 0) { + this._indexedStorageInternalNodeK = reader.uint16(); + this.skip(2); // Reserved + } + this._baseAddress = reader.offset(); + reader.offset(); // Address of File Free space Info + this._endOfFileAddress = reader.offset(); + reader.offset(); // Driver Information Block Address + if (this._baseAddress != 0) { + throw new hdf5.Error('Base address is not zero.'); + } + const rootGroupEntry = new hdf5.SymbolTableEntry(reader); + this._rootGroup = new hdf5.Group(reader, rootGroupEntry, null, this._globalHeap, '', ''); + break; + } + case 2: + case 3: { + reader.initialize(); + reader.byte(); + this._baseAddress = reader.offset(); + this._superBlockExtensionAddress = reader.offset(); + this._endOfFileAddress = reader.offset(); + const rootGroupObjectHeader = new hdf5.DataObjectHeader(reader.at(reader.offset())); + this._rootGroup = new hdf5.Group(reader, null, rootGroupObjectHeader, this._globalHeap, '', ''); + break; + } + default: + throw new hdf5.Error(`Unsupported Superblock version ${version}.`); + } + delete this.data; + this._rootGroup.attributes; + } + return this._rootGroup; + } +}; + +hdf5.Group = class { + + constructor(reader, entry, objectHeader, globalHeap, parentPath, name) { + this._reader = reader; + this._entry = entry; + this._dataObjectHeader = objectHeader; + this._globalHeap = globalHeap; + this._name = name; + this._path = parentPath == '/' ? (parentPath + name) : (`${parentPath}/${name}`); + } + + get name() { + return this._name; + } + + get path() { + return this._path; + } + + group(path) { + this._decodeGroups(); + if (this._groups.has(path)) { + return this._groups.get(path); + } + const index = path.indexOf('/'); + if (index !== -1) { + const group = this.group(path.substring(0, index)); + if (group) { + return group.group(path.substring(index + 1)); + } + } + return null; + } + + get groups() { + this._decodeGroups(); + return this._groups; + } + + get attributes() { + this._decodeDataObject(); + return this._attributes; + } + + get value() { + this._decodeDataObject(); + return this._value; + } + + _decodeDataObject() { + if (!this._dataObjectHeader) { + const reader = this._reader.at(this._entry.objectHeaderAddress); + this._dataObjectHeader = new hdf5.DataObjectHeader(reader); + } + if (!this._attributes) { + this._attributes = new Map(); + for (const attribute of this._dataObjectHeader.attributes) { + const name = attribute.name; + const value = attribute.decodeValue(this._globalHeap); + this._attributes.set(name, value); + } + this._value = null; + const datatype = this._dataObjectHeader.datatype; + const dataspace = this._dataObjectHeader.dataspace; + const dataLayout = this._dataObjectHeader.dataLayout; + const filterPipeline = this._dataObjectHeader.filterPipeline; + if (datatype && dataspace && dataLayout) { + this._value = new hdf5.Variable(this._reader, this._globalHeap, datatype, dataspace, dataLayout, filterPipeline); + } + } + } + + _decodeGroups() { + if (!this._groups) { + this._groups = new Map(); + if (this._entry) { + if (this._entry.treeAddress || this._entry.heapAddress) { + const heap = new hdf5.Heap(this._reader.at(this._entry.heapAddress)); + const tree = new hdf5.Tree(this._reader.at(this._entry.treeAddress)); + for (const node of tree.nodes) { + for (const entry of node.entries) { + const name = heap.getString(entry.linkNameOffset); + const group = new hdf5.Group(this._reader, entry, null, this._globalHeap, this._path, name); + this._groups.set(name, group); + } + } + } + } else { + this._decodeDataObject(); + for (const link of this._dataObjectHeader.links) { + if (Object.prototype.hasOwnProperty.call(link, 'objectHeaderAddress')) { + const name = link.name; + const objectHeader = new hdf5.DataObjectHeader(this._reader.at(link.objectHeaderAddress)); + const linkGroup = new hdf5.Group(this._reader, null, objectHeader, this._globalHeap, this._path, name); + this._groups.set(name, linkGroup); + } + } + } + } + } +}; + +hdf5.Variable = class { + + constructor(reader, globalHeap, datatype, dataspace, dataLayout, filterPipeline) { + this._reader = reader; + this._globalHeap = globalHeap; + this._datatype = datatype; + this._dataspace = dataspace; + this._dataLayout = dataLayout; + this._filterPipeline = filterPipeline; + } + + get type () { + return this._datatype.type; + } + + get littleEndian() { + return this._datatype.littleEndian; + } + + get shape() { + return this._dataspace.shape; + } + + get value() { + const data = this.data; + if (data) { + const reader = data instanceof hdf5.BinaryReader ? data : new hdf5.BinaryReader(data); + const array = this._dataspace.read(this._datatype, reader); + return this._dataspace.decode(this._datatype, array, array, this._globalHeap); + } + return null; + } + + get data() { + switch (this._dataLayout.layoutClass) { + case 1: // Contiguous + if (this._dataLayout.address) { + return this._reader.at(this._dataLayout.address).stream(this._dataLayout.size); + } + break; + case 2: { // Chunked + const dimensionality = this._dataLayout.dimensionality; + const tree = new hdf5.Tree(this._reader.at(this._dataLayout.address), dimensionality); + const item_size = this._dataLayout.datasetElementSize; + const chunk_shape = this._dataLayout.dimensionSizes; + const data_shape = this._dataspace.shape; + const chunk_size = chunk_shape.reduce((a, b) => a * b, 1); + const data_size = data_shape.reduce((a, b) => a * b, 1); + const max_dim = data_shape.length - 1; + let data_stride = 1; + const data_strides = data_shape.slice().reverse().map((d2) => { + const s = data_stride; + data_stride *= d2; + return s; + }).reverse(); + const data = new Uint8Array(data_size * item_size); + for (const node of tree.nodes) { + if (node.filterMask !== 0) { + return null; + } + let chunk = node.data; + if (this._filterPipeline) { + for (const filter of this._filterPipeline.filters) { + chunk = filter.decode(chunk); + } + } + const chunk_offset = node.fields; + const data_pos = chunk_offset.slice(); + const chunk_pos = data_pos.map(() => 0); + for (let chunk_index = 0; chunk_index < chunk_size; chunk_index++) { + for (let i = max_dim; i >= 0; i--) { + if (chunk_pos[i] >= chunk_shape[i]) { + chunk_pos[i] = 0; + data_pos[i] = chunk_offset[i]; + if (i > 0) { + chunk_pos[i - 1]++; + data_pos[i - 1]++; + } + } else { + break; + } + } + let index = 0; + let inbounds = true; + const length = data_pos.length - 1; + for (let i = 0; i < length; i++) { + const pos = data_pos[i]; + inbounds = inbounds && pos < data_shape[i]; + index += pos * data_strides[i]; + } + if (inbounds) { + let chunk_offset = chunk_index * item_size; + let target_offset = index * item_size; + const target_end = target_offset + item_size; + while (target_offset < target_end) { + data[target_offset++] = chunk[chunk_offset++]; + } + } + chunk_pos[max_dim]++; + data_pos[max_dim]++; + } + } + return data; + } + default: { + throw new hdf5.Error(`Unsupported data layout class '${this.layoutClass}'.`); + } + } + return null; + } +}; + +hdf5.Reader = class { + + constructor() { + } + + initialize() { + this._offsetSize = this.byte(); + this._lengthSize = this.byte(); + } + + int8() { + const position = this.take(1); + return this._view.getInt8(position); + } + + byte() { + const position = this.take(1); + return this._view.getUint8(position); + } + + int16() { + const position = this.take(2); + return this._view.getInt16(position, true); + } + + uint16() { + const position = this.take(2); + return this._view.getUint16(position, true); + } + + int32() { + const position = this.take(4); + return this._view.getInt32(position, true); + } + + uint32() { + const position = this.take(4); + return this._view.getUint32(position, true); + } + + int64() { + const position = this.take(8); + return this._view.getInt64(position, true).toNumber(); + } + + uint64() { + const position = this.take(8); + return this._view.getUint64(position, true).toNumber(); + } + + uint(size) { + switch (size) { + case 0: return this.byte(); + case 1: return this.uint16(); + case 2: return this.uint32(); + case 3: return this.uint64(); + default: throw new hdf5.Error(`Unsupported uint size '${size}'.`); + } + } + + float16() { + const position = this.take(2); + const value = this._view.getUint16(position, true); + // decode float16 value + const s = (value & 0x8000) >> 15; + const e = (value & 0x7C00) >> 10; + const f = value & 0x03FF; + if (e == 0) { + return (s ? -1 : 1) * Math.pow(2, -14) * (f / Math.pow(2, 10)); + } else if (e == 0x1F) { + return f ? NaN : ((s ? -1 : 1) * Infinity); + } + return (s ? -1 : 1) * Math.pow(2, e-15) * (1 + (f / Math.pow(2, 10))); + } + + float32() { + const position = this.take(4); + return this._view.getFloat32(position, true); + } + + float64() { + const position = this.take(8); + return this._view.getFloat64(position, true); + } + + offset() { + switch (this._offsetSize) { + case 8: { + const position = this.take(8); + const value = this._view.getUint64(position, true); + if (value.low === -1 && value.high === -1) { + return undefined; + } + return value.toNumber(); + } + case 4: { + const value = this.uint32(); + if (value === 0xffffffff) { + return undefined; + } + return value; + } + default: { + throw new hdf5.Error(`Unsupported offset size '${this._offsetSize}'.`); + } + } + } + + length() { + switch (this._lengthSize) { + case 8: { + const position = this.take(8); + const value = this._view.getUint64(position, true); + if (value.low === -1 && value.high === -1) { + return undefined; + } + return value.toNumber(); + } + case 4: { + const value = this.uint32(); + if (value === 0xffffffff) { + return undefined; + } + return value; + } + default: { + throw new hdf5.Error(`Unsupported length size '${this._lengthSize}'.`); + } + } + } + + string(size, encoding) { + if (!size || size == -1) { + size = this.size(0x00); + } + const data = this.read(size); + if (encoding == 'utf-8') { + hdf5.Reader._utf8Decoder = hdf5.Reader._utf8Decoder || new TextDecoder('utf-8'); + return hdf5.Reader._utf8Decoder.decode(data).replace(/\0/g, ''); + } + hdf5.Reader._asciiDecoder = hdf5.Reader._asciiDecoder = new TextDecoder('ascii'); + return hdf5.Reader._asciiDecoder.decode(data).replace(/\0/g, ''); + } + + match(text) { + if (this.position + text.length > this._length) { + return false; + } + const buffer = this.read(text.length); + for (let i = 0; i < text.length; i++) { + if (text.charCodeAt(i) != buffer[i]) { + this.skip(-text.length); + return false; + } + } + return true; + } +}; + +hdf5.BinaryReader = class extends hdf5.Reader { + + constructor(buffer, view, offset, position, offsetSize, lengthSize) { + super(); + this._buffer = buffer; + this._view = view || new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._offset = offset || 0; + this._position = position || 0; + this._offsetSize = offsetSize; + this._lengthSize = lengthSize; + } + + get position() { + return this._position + this._offset; + } + + take(offset) { + const position = this._offset + this._position; + this.skip(offset); + return position; + } + + skip(offset) { + this._position += offset; + if (this._offset + this._position > this._buffer.length) { + throw new hdf5.Error(`Unexpected end of file. Expected ${this._offset + this._position - this._buffer.length} more bytes. The file might be corrupted.`); + } + } + + align(mod) { + if (this._position % mod != 0) { + this._position = (Math.floor(this._position / mod) + 1) * mod; + } + } + + peek(length) { + const position = this._offset + this._position; + length = length !== undefined ? length : this._buffer.length - position; + this.take(length); + const buffer = this._buffer.subarray(position, position + length); + this._position = position - this._offset; + return buffer; + } + + read(length) { + const position = this.take(length); + return this._buffer.subarray(position, position + length); + } + + stream(length) { + const position = this.take(length); + const buffer = this._buffer.subarray(position, position + length); + return new hdf5.BinaryReader(buffer); + } + + size(terminator) { + let position = this._offset + this._position; + while (this._buffer[position] !== terminator) { + position++; + } + return position - this._offset - this._position + 1; + } + + at(offset) { + return new hdf5.BinaryReader(this._buffer, this._view, offset, 0, this._offsetSize, this._lengthSize); + } + + clone() { + return new hdf5.BinaryReader(this._buffer, this._view, this._offset, this._position, this._offsetSize, this._lengthSize); + } +}; + +hdf5.StreamReader = class extends hdf5.Reader { + + constructor(stream, view, window, offset, position, offsetSize, lengthSize) { + super(); + this._stream = stream; + this._length = stream.length; + this._view = view; + this._window = window || 0; + this._offset = offset || 0; + this._position = position || 0; + this._offsetSize = offsetSize; + this._lengthSize = lengthSize; + } + + get position() { + return this._offset + this._position; + } + + skip(offset) { + this._position += offset; + if (this._position > this._length) { + throw new hdf5.Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + align(mod) { + if (this._position % mod != 0) { + this._position = (Math.floor(this._position / mod) + 1) * mod; + } + } + + read(length) { + this._stream.seek(this._offset + this._position); + this.skip(length); + return this._stream.read(length); + } + + stream(length) { + this._stream.seek(this._offset + this._position); + this.skip(length); + return this._stream.stream(length); + } + + byte() { + const position = this.take(1); + return this._view.getUint8(position); + } + + uint16() { + const position = this.take(2); + return this._view.getUint16(position, true); + } + + int32() { + const position = this.take(4); + return this._view.getInt32(position, true); + } + + uint32() { + const position = this.take(4); + return this._view.getUint32(position, true); + } + + int64() { + const position = this.take(8); + return this._view.getInt64(position, true).toNumber(); + } + + float32() { + const position = this.take(4); + return this._view.getFloat32(position, true); + } + + float64() { + const position = this.take(8); + return this._view.getFloat64(position, true); + } + + at(offset) { + return new hdf5.StreamReader(this._stream, this._view, this._window, offset, 0, this._offsetSize, this._lengthSize); + } + + clone() { + return new hdf5.StreamReader(this._stream, this._view, this._window, this._offset, this._position, this._offsetSize, this._lengthSize); + } + + size(terminator) { + const position = this._position; + let size = 0; + while (this.byte() != terminator) { + size++; + } + this._position = position; + return size; + } + + + take(length) { + const position = this.position; + if (position + length > this._length) { + throw new Error(`Expected ${position + length - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + if (!this._buffer || position < this._window || position + length > this._window + this._buffer.length) { + this._window = position; + this._stream.seek(this._window); + this._buffer = this._stream.read(Math.min(0x1000, this._length - this._window)); + this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + } + this._position += length; + return position - this._window; + } +}; + +hdf5.SymbolTableNode = class { + + constructor(reader) { + if (!reader.match('SNOD')) { + throw new hdf5.Error("Not a valid 'SNOD' block."); + } + const version = reader.byte(); + if (version == 1) { + reader.skip(1); + const entriesUsed = reader.uint16(); + this.entries = []; + for (let i = 0; i < entriesUsed; i++) { + const entry = new hdf5.SymbolTableEntry(reader); + this.entries.push(entry); + } + } else { + throw new hdf5.Error(`Unsupported symbol table node version '${version}'.`); + } + } +}; + +hdf5.SymbolTableEntry = class { + + constructor(reader) { + this.linkNameOffset = reader.offset(); + this.objectHeaderAddress = reader.offset(); + const cacheType = reader.uint32(); + reader.skip(4); // Reserved + switch (cacheType) { + case 0: + break; + case 1: { + const scratchReader = reader.clone(); + this.treeAddress = scratchReader.offset(); + this.heapAddress = scratchReader.offset(); + break; + } + default: + throw new hdf5.Error(`Unsupported cache type '${cacheType}'.`); + } + reader.skip(16); // Scratch-pad space + } +}; + +hdf5.DataObjectHeader = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#ObjectHeader + this.attributes = []; + this.links = []; + this.continuations = []; + reader.match('OHDR'); + const version = reader.byte(); + switch (version) { + case 1: { + reader.skip(1); + const count = reader.uint16(); + reader.uint32(); + const objectHeaderSize = reader.uint32(); + reader.align(8); + let end = reader.position + objectHeaderSize; + for (let i = 0; i < count; i++) { + const type = reader.uint16(); + const size = reader.uint16(); + const flags = reader.byte(); + reader.skip(3); + reader.align(8); + const next = this._readMessage(reader, type, size, flags); + if ((!next || reader.position >= end) && this.continuations.length > 0) { + const continuation = this.continuations.shift(); + reader = reader.at(continuation.offset); + end = continuation.offset + continuation.length; + } else { + reader.align(8); + } + } + break; + } + case 2: { + const flags = reader.byte(); + if ((flags & 0x20) != 0) { + reader.uint32(); // access time + reader.uint32(); // modification time + reader.uint32(); // change time + reader.uint32(); // birth time + } + if ((flags & 0x10) != 0) { + reader.uint16(); // max compact attributes + reader.uint16(); // min compact attributes + } + const order = (flags & 0x04) != 0; + const size = reader.uint(flags & 0x03); + let next = true; + let end = reader.position + size; + while (next && reader.position < end) { + const type = reader.byte(); + const size = reader.uint16(); + const flags = reader.byte(); + if (reader.position < end) { + if (order) { + reader.uint16(); // creation order + } + next = this._readMessage(reader, type, size, flags); + } + if ((!next || reader.position >= end) && this.continuations.length > 0) { + const continuation = this.continuations.shift(); + reader = reader.at(continuation.offset); + end = continuation.offset + continuation.length; + if (!reader.match('OCHK')) { + throw new hdf5.Error('Invalid continuation block signature.'); + } + next = true; + } + } + break; + } + default: { + throw new hdf5.Error(`Unsupported data object header version '${version}'.`); + } + } + } + + _readMessage(reader, type, size, flags) { + switch (type) { + case 0x0000: // NIL + return false; + case 0x0001: // Dataspace + this.dataspace = (size != 4 || flags != 1) ? new hdf5.Dataspace(reader.clone()) : null; + break; + case 0x0002: // Link Info + this.linkInfo = new hdf5.LinkInfo(reader.clone()); + break; + case 0x0003: // Datatype + this.datatype = new hdf5.Datatype(reader.clone()); + break; + case 0x0004: + case 0x0005: // Fill Value + this.fillValue = new hdf5.FillValue(reader.clone(), type); + break; + case 0x0006: // Link + this.links.push(new hdf5.Link(reader.clone())); + break; + case 0x0008: // Data Layout + this.dataLayout = new hdf5.DataLayout(reader.clone()); + break; + case 0x000A: // Group Info + this.groupInfo = new hdf5.GroupInfo(reader.clone()); + break; + case 0x000B: // Filter Pipeline + this.filterPipeline = new hdf5.FilterPipeline(reader.clone()); + break; + case 0x000C: // Attribute + this.attributes.push(new hdf5.Attribute(reader.clone())); + break; + case 0x000D: // Object Comment Message + this.comment = reader.string(-1, 'ascii'); + break; + case 0x0010: // Object Header Continuation + this.continuations.push(new hdf5.ObjectHeaderContinuation(reader.clone())); + break; + case 0x0011: // Symbol Table + this.symbolTable = new hdf5.SymbolTable(reader.clone()); + break; + case 0x000E: // Object Modification Time (Old) + case 0x0012: // Object Modification Time + this.objectModificationTime = new hdf5.ObjectModificationTime(reader.clone(), type); + break; + case 0x0015: // Attribute Info + this.attributeInfo = new hdf5.AttributeInfo(reader.clone()); + break; + default: + throw new hdf5.Error(`Unsupported message type '${type}'.`); + } + reader.skip(size); + return true; + } +}; + +hdf5.Message = class { + + constructor(type, data, flags) { + this._type = type; + this._data = data; + this._flags = flags; + } +}; + +hdf5.Dataspace = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#DataspaceMessage + this._sizes = []; + const version = reader.byte(); + switch (version) { + case 1: + this._dimensions = reader.byte(); + this._flags = reader.byte(); + reader.skip(1); + reader.skip(4); + for (let i = 0; i < this._dimensions; i++) { + this._sizes.push(reader.length()); + } + if ((this._flags & 0x01) != 0) { + this._maxSizes = []; + for (let j = 0; j < this._dimensions; j++) { + this._maxSizes.push(reader.length()); + if (this._maxSizes[j] != this._sizes[j]) { + throw new hdf5.Error('Max size is not supported.'); + } + } + } + if ((this._flags & 0x02) != 0) { + throw new hdf5.Error('Permutation indices not supported.'); + } + break; + case 2: + this._dimensions = reader.byte(); + this._flags = reader.byte(); + this._type = reader.byte(); // 0 scalar, 1 simple, 2 null + for (let k = 0; k < this._dimensions; k++) { + this._sizes.push(reader.length()); + } + if ((this._flags & 0x01) != 0) { + this._maxSizes = []; + for (let l = 0; l < this._dimensions; l++) { + this._maxSizes.push(reader.length()); + } + } + break; + default: + throw new hdf5.Error(`Unsupported dataspace message version '${version}'.`); + + } + } + + get shape() { + return this._sizes; + } + + read(datatype, reader) { + if (this._dimensions == 0) { + return datatype.read(reader); + } + return this._readArray(datatype, reader, this._sizes, 0); + } + + _readArray(datatype, reader, shape, dimension) { + const array = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + array.push(datatype.read(reader)); + } + } else { + for (let j = 0; j < size; j++) { + array.push(this._readArray(datatype, reader, shape, dimension + 1)); + } + } + return array; + } + + decode(datatype, data, globalHeap) { + if (this._dimensions == 0) { + return datatype.decode(data, globalHeap); + } + return this._decodeArray(datatype, data, globalHeap, this._sizes, 0); + } + + _decodeArray(datatype, data, globalHeap, shape, dimension) { + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + data[i] = datatype.decode(data[i], globalHeap); + } + } else { + for (let j = 0; j < size; j++) { + data[j] = this._decodeArray(datatype, data[j], shape, dimension + 1); + } + } + return data; + } +}; + +hdf5.LinkInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 1) != 0) { + this.maxCreationIndex = reader.uint64(); + } + this.fractalHeapAddress = reader.offset(); + this.nameIndexTreeAddress = reader.offset(); + if ((flags & 2) != 0) { + this.creationOrderIndexTreeAddress = reader.offset(); + } + break; + } + default: + throw new hdf5.Error(`Unsupported link info message version '${version}'.`); + } + } +}; + +hdf5.Datatype = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#DatatypeMessage + const format = reader.byte(); + const version = format >> 4; + this._class = format & 0xf; + switch (version) { + case 1: + case 2: { + this._flags = reader.byte() | reader.byte() << 8 | reader.byte() << 16; + this._size = reader.uint32(); + switch (this._class) { + case 0: { // fixed-Point + this._bitOffset = reader.uint16(); + this._bitPrecision = reader.uint16(); + break; + } + case 8: { // enumerated + this._base = new hdf5.Datatype(reader); + this._names = []; + this._values = []; + const count = this._flags & 0xffff; + for (let i = 0; i < count; i++) { + const name = reader.clone().string(-1, 'ascii'); + this._names.push(name); + reader.skip(Math.round((name.length + 1) / 8) * 8); + } + for (let i = 0; i < count; i++) { + this._values.push(this._base.read(reader)); + } + break; + } + default: { + break; + } + } + break; + } + default: { + throw new hdf5.Error(`Unsupported datatype version '${version}'.`); + } + } + } + + get type() { + switch (this._class) { + case 0: // fixed-point + if ((this._flags & 0xfff6) === 0) { + if ((this._flags && 0x08) !== 0) { + switch (this._size) { + case 1: return 'int8'; + case 2: return 'int16'; + case 4: return 'int32'; + case 8: return 'int64'; + default: throw new hdf5.Error(`Unsupported int size '${this._size}'.`); + } + } else { + switch (this._size) { + case 1: return 'uint8'; + case 2: return 'uint16'; + case 4: return 'uint32'; + case 8: return 'uint64'; + default: throw new hdf5.Error(`Unsupported uint size '${this._size}'.`); + } + } + } + break; + case 1: // floating-point + if (this._size == 2 && this._flags == 0x0f20) { + return 'float16'; + } else if (this._size == 4 && this._flags == 0x1f20) { + return 'float32'; + } else if (this._size == 8 && this._flags == 0x3f20) { + return 'float64'; + } + break; + case 3: // string + return 'string'; + case 5: // opaque + return 'uint8[]'; + case 6: // compound + return 'compound'; + case 8: // enumerated + if (this._base.type === 'int8' && + this._names.length === 2 && this._names[0] === 'FALSE' && this._names[1] === 'TRUE' && + this._values.length === 2 && this._values[0] === 0 && this._values[1] === 1) { + return 'boolean'; + } + break; + case 9: // variable-length + if ((this._flags & 0x0f) == 1) { // type + return 'char[]'; + } + break; + default: + break; + } + throw new hdf5.Error(`Unsupported datatype class '${this._class}'.`); + } + + get littleEndian() { + switch (this._class) { + case 0: // fixed-point + case 1: // floating-point + return (this.flags & 0x01) == 0; + default: + return true; + } + } + + read(reader) { + switch (this._class) { + case 0: // fixed-point + if (this._size == 1) { + return ((this._flags & 0x8) != 0) ? reader.int8() : reader.byte(); + } else if (this._size == 2) { + return ((this._flags & 0x8) != 0) ? reader.int16() : reader.uint16(); + } else if (this._size == 4) { + return ((this._flags & 0x8) != 0) ? reader.int32() : reader.uint32(); + } else if (this._size == 8) { + return ((this._flags & 0x8) != 0) ? reader.int64() : reader.uint64(); + } + throw new hdf5.Error('Unsupported fixed-point datatype.'); + case 1: // floating-point + if (this._size == 2 && this._flags == 0x0f20) { + return reader.float16(); + } else if (this._size == 4 && this._flags == 0x1f20) { + return reader.float32(); + } else if (this._size == 8 && this._flags == 0x3f20) { + return reader.float64(); + } + throw new hdf5.Error('Unsupported floating-point datatype.'); + case 3: // string + switch ((this._flags >> 8) & 0x0f) { // character set + case 0: + return reader.string(this._size, 'ascii'); + case 1: + return reader.string(this._size, 'utf-8'); + default: + throw new hdf5.Error('Unsupported character encoding.'); + } + case 5: // opaque + return reader.read(this._size); + case 8: // enumerated + return reader.read(this._size); + case 9: // variable-length + return { + length: reader.uint32(), + globalHeapID: new hdf5.GlobalHeapID(reader) + }; + default: + throw new hdf5.Error(`Unsupported datatype class '${this._class}'.`); + } + } + + decode(data, globalHeap) { + switch (this._class) { + case 0: // fixed-point + return data; + case 1: // floating-point + return data; + case 3: // string + return data; + case 5: // opaque + return data; + case 8: // enumerated + return data; + case 9: { // variable-length + const globalHeapObject = globalHeap.get(data.globalHeapID); + if (globalHeapObject != null) { + const characterSet = (this._flags >> 8) & 0x0f; + const reader = globalHeapObject.reader(); + switch (characterSet) { + case 0: + return reader.string(reader.length(), 'ascii'); + case 1: + return reader.string(reader.length(), 'utf-8'); + default: + throw new hdf5.Error('Unsupported character encoding.'); + } + } + break; + } + default: + throw new hdf5.Error(`Unsupported datatype class '${this._class}'.`); + } + return null; + } +}; + +hdf5.FillValue = class { + + constructor(reader, type) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FillValueMessage + switch (type) { + case 0x0004: { + const size = reader.uint32(); + this.data = reader.read(size); + break; + } + case 0x0005: + default: { + const version = reader.byte(); + switch (version) { + case 1: + case 2: { + reader.byte(); + reader.byte(); + const valueDefined = reader.byte(); + if (version === 1 || valueDefined === 1) { + const size = reader.uint32(); + this.data = reader.read(size); + } + break; + } + case 3: { + const flags = reader.byte(); + if ((flags & 0x20) !== 0) { + const size = reader.uint32(); + this.data = reader.read(size); + } + break; + } + default: + throw new hdf5.Error(`Unsupported fill value version '${version}'.`); + } + break; + } + } + } +}; + +hdf5.Link = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FillValueMessage + const version = reader.byte(); + switch (version) { + case 1: { + const flags = reader.byte(); + this.type = (flags & 0x08) != 0 ? reader.byte() : 0; + if ((flags & 0x04) != 0) { + this.creationOrder = reader.uint32(); + } + const encoding = ((flags & 0x10) != 0 && reader.byte() == 1) ? 'utf-8' : 'ascii'; + this.name = reader.string(reader.uint(flags & 0x03), encoding); + switch (this.type) { + case 0: // hard link + this.objectHeaderAddress = reader.offset(); + break; + case 1: // soft link + break; + default: + throw new hdf5.Error(`Unsupported link message type '${this.type}'.`); + } + break; + } + default: + throw new hdf5.Error(`Unsupported link message version '${version}'.`); + } + } +}; + +hdf5.DataLayout = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#LayoutMessage + const version = reader.byte(); + switch (version) { + case 1: + case 2: { + this.dimensionality = reader.byte(); + this.layoutClass = reader.byte(); + reader.skip(5); + switch (this.layoutClass) { + case 1: + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + break; + case 2: // Chunked + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + this.datasetElementSize = reader.int32(); + break; + default: + throw new hdf5.Error(`Unsupported data layout class '${this.layoutClass}'.`); + } + break; + } + case 3: { + this.layoutClass = reader.byte(); + switch (this.layoutClass) { + case 0: // Compact + this.size = reader.uint16(); + reader.skip(2); + this.address = reader.position; + break; + case 1: // Contiguous + this.address = reader.offset(); + this.size = reader.length(); + break; + case 2: // Chunked + this.dimensionality = reader.byte(); + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + this.datasetElementSize = reader.int32(); + break; + default: + throw new hdf5.Error(`Unsupported data layout class '${this.layoutClass}'.`); + } + break; + } + default: { + throw new hdf5.Error(`Unsupported data layout version '${version}'.`); + } + } + } +}; + +hdf5.GroupInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 0x01) != 0) { + this.maxCompactLinks = reader.uint16(); + this.minDenseLinks = reader.uint16(); + } + if ((flags & 0x02) != 0) { + this.estimatedEntriesNumber = reader.uint16(); + this.estimatedLinkNameLengthEntries = reader.uint16(); + } + break; + } + default: + throw new hdf5.Error(`Unsupported group info version '${version}'.`); + } + } +}; + +hdf5.FilterPipeline = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FilterMessage + const version = reader.byte(); + switch (version) { + case 1: { + this.filters = []; + const numberOfFilters = reader.byte(); + reader.skip(2); + reader.skip(4); + for (let i = 0; i < numberOfFilters; i++) { + this.filters.push(new hdf5.Filter(reader)); + reader.align(8); + } + break; + } + default: + throw new hdf5.Error(`Unsupported filter pipeline message version '${version}'.`); + } + } +}; + +hdf5.Filter = class { + + constructor(reader) { + this.id = reader.int16(); + const nameLength = reader.int16(); + this.flags = reader.int16(); + const clientDataSize = reader.int16(); + this.name = reader.string(nameLength, 'ascii'); + this.clientData = reader.read(clientDataSize * 4); + } + + decode(data) { + switch (this.id) { + case 1: { // gzip + const archive = zip.Archive.open(data); + return archive.entries.get('').peek(); + } + default: { + throw new hdf5.Error(`Unsupported filter '${this.name}'.`); + } + } + } +}; + +hdf5.Attribute = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 1: { + reader.skip(1); + const nameSize = reader.uint16(); + const datatypeSize = reader.uint16(); + const dataspaceSize = reader.uint16(); + this.name = reader.string(nameSize, 'utf-8'); + reader.align(8); + this._datatype = new hdf5.Datatype(reader.clone()); + reader.skip(datatypeSize); + reader.align(8); + this._dataspace = new hdf5.Dataspace(reader.clone()); + reader.skip(dataspaceSize); + reader.align(8); + this._data = this._dataspace.read(this._datatype, reader); + break; + } + case 3: { + reader.byte(); + const nameSize = reader.uint16(); + const datatypeSize = reader.uint16(); + const dataspaceSize = reader.uint16(); + const encoding = reader.byte() == 1 ? 'utf-8' : 'ascii'; + this.name = reader.string(nameSize, encoding); + this._datatype = new hdf5.Datatype(reader.clone()); + reader.skip(datatypeSize); + this._dataspace = new hdf5.Dataspace(reader.clone()); + reader.skip(dataspaceSize); + this._data = this._dataspace.read(this._datatype, reader); + break; + } + default: + throw new hdf5.Error(`Unsupported attribute message version '${version}'.`); + } + } + + decodeValue(globalHeap) { + if (this._data) { + return this._dataspace.decode(this._datatype, this._data, globalHeap); + } + return null; + } +}; + +hdf5.ObjectHeaderContinuation = class { + + constructor(reader) { + this.offset = reader.offset(); + this.length = reader.length(); + } +}; + +hdf5.SymbolTable = class { + + constructor(reader) { + this.treeAddress = reader.offset(); // hdf5.Tree pointer + this.heapAddress = reader.offset(); // hdf5.Heap pointer + } +}; + +hdf5.ObjectModificationTime = class { + + constructor(reader, type) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#ModificationTimeMessage + switch (type) { + case 0x000E: { + this.year = reader.uint32(); + this.month = reader.uint16(); + this.day = reader.uint16(); + this.hour = reader.uint16(); + this.minute = reader.uint16(); + this.second = reader.uint16(); + reader.skip(2); + break; + } + case 0x0012: { + const version = reader.byte(); + reader.skip(3); + switch (version) { + case 1: + this.timestamp = reader.uint32(); + break; + default: + throw new hdf5.Error(`Unsupported object modification time message version '${version}'.`); + } + break; + } + default: { + throw new hdf5.Error(`Unsupported object modification time message type '${type}'.`); + } + } + } +}; + +hdf5.AttributeInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 1) != 0) { + this.maxCreationIndex = reader.uint64(); + } + this.fractalHeapAddress = reader.offset(); + this.attributeNameTreeAddress = reader.offset(); + if ((flags & 2) != 0) { + this.attributeCreationOrderTreeAddress = reader.offset(); + } + break; + } + default: + throw new hdf5.Error(`Unsupported attribute info message version '${version}'.`); + } + } +}; + +hdf5.Tree = class { + + constructor(reader, dimensionality) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#V1Btrees + if (!reader.match('TREE')) { + throw new hdf5.Error("Not a valid 'TREE' block."); + } + this.type = reader.byte(); + this.level = reader.byte(); + const entries = reader.uint16(); + reader.offset(); // address of left sibling + reader.offset(); // address of right sibling + this.nodes = []; + switch (this.type) { + case 0: { // Group nodes + for (let i = 0; i < entries; i++) { + reader.length(); + const childPointer = reader.offset(); + if (this.level == 0) { + const node = new hdf5.SymbolTableNode(reader.at(childPointer)); + this.nodes.push(node); + } else { + const tree = new hdf5.Tree(reader.at(childPointer)); + this.nodes.push(...tree.nodes); + } + } + break; + } + case 1: { // Raw data chunk nodes + for (let i = 0; i < entries; i++) { + const size = reader.int32(); + const filterMask = reader.int32(); + const fields = []; + for (let j = 0; j < dimensionality; j++) { + fields.push(reader.uint64()); + } + const childPointer = reader.offset(); + if (this.level == 0) { + const data = reader.at(childPointer).read(size); + this.nodes.push({ data: data, fields: fields, filterMask: filterMask }); + } else { + const tree = new hdf5.Tree(reader.at(childPointer), dimensionality); + this.nodes.push(...tree.nodes); + } + } + break; + } + default: { + throw new hdf5.Error(`Unsupported B-Tree node type '${this.type}'.`); + } + } + } +}; + +hdf5.Heap = class { + + constructor(reader) { + this._reader = reader; + if (!reader.match('HEAP')) { + throw new hdf5.Error("Not a valid 'HEAP' block."); + } + const version = reader.byte(); + switch (version) { + case 0: { + reader.skip(3); + this._dataSize = reader.length(); + this._offsetToHeadOfFreeList = reader.length(); + this._dataAddress = reader.offset(); + break; + } + default: { + throw new hdf5.Error(`Unsupported Local Heap version '${version}'.`); + } + } + } + + getString(offset) { + const reader = this._reader.at(this._dataAddress + offset); + return reader.string(-1, 'utf-8'); + } +}; + +hdf5.GlobalHeap = class { + + constructor(reader) { + this._reader = reader; + this._collections = new Map(); + } + + get(globalHeapID) { + const address = globalHeapID.address; + if (!this._collections.has(address)) { + this._collections.set(address, new hdf5.GlobalHeapCollection(this._reader.at(address))); + } + return this._collections.get(globalHeapID.address).getObject(globalHeapID.objectIndex); + } +}; + +hdf5.GlobalHeapCollection = class { + + constructor(reader) { + const startPosition = reader.position; + if (!reader.match('GCOL')) { + throw new hdf5.Error("Not a valid 'GCOL' block."); + } + const version = reader.byte(); + switch (version) { + case 1: { + reader.skip(3); + this._objects = new Map(); + const size = reader.length(); + const endPosition = startPosition + size; + while (reader.position < endPosition) { + const index = reader.uint16(); + if (index == 0) { + break; + } + this._objects.set(index, new hdf5.GlobalHeapObject(reader)); + reader.align(8); + } + break; + } + default: { + throw new hdf5.Error(`Unsupported global heap collection version '${version}'.`); + } + } + } + + getObject(objectIndex) { + if (this._objects.has(objectIndex)) { + return this._objects.get(objectIndex); + } + return null; + } +}; + +hdf5.GlobalHeapObject = class { + + constructor(reader) { + reader.uint16(); + reader.skip(4); + this._position = reader.position; + this._reader = reader; + const length = reader.length(); + reader.skip(length); + } + + reader() { + return this._reader.at(this._position); + } +}; + +hdf5.GlobalHeapID = class { + + constructor(reader) { + this.address = reader.offset(); + this.objectIndex = reader.uint32(); + } +}; + +hdf5.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'HDF5 Error'; + } +}; + +export const File = hdf5.File; diff --git a/hickle.js b/hickle.js new file mode 100644 index 00000000000..0018df2d409 --- /dev/null +++ b/hickle.js @@ -0,0 +1,168 @@ + +const hickle = {}; + +hickle.ModelFactory = class { + + match(context) { + const group = context.peek('hdf5'); + if (group && group.attributes && group.attributes.get('CLASS') === 'hickle') { + return group; + } + return null; + } + + async open(context, target) { + return new hickle.Model(target); + } +}; + +hickle.Model = class { + + constructor(group) { + this.format = 'Hickle Weights'; + this.graphs = [ new hickle.Graph(group) ]; + } +}; + +hickle.Graph = class { + + constructor(group) { + this.inputs = []; + this.outputs = []; + const deserialize = (group) => { + if (group && group.attributes.has('type')) { + const type = group.attributes.get('type'); + if (Array.isArray(type) && type.length && typeof type[0] === 'string') { + switch (type[0]) { + case 'hickle': + case 'dict_item': { + if (group.groups.size == 1) { + return deserialize(group.groups.values().next().value); + } + throw new hickle.Error(`Invalid Hickle type value '${type[0]}'.`); + } + case 'dict': { + const dict = new Map(); + for (const [name, obj] of group.groups) { + const value = deserialize(obj); + dict.set(name, value); + } + return dict; + } + case 'ndarray': { + return group.value; + } + default: { + throw new hickle.Error(`Unsupported Hickle type '${type[0]}'`); + } + } + } + throw new hickle.Error(`Unsupported Hickle type '${JSON.stringify(type)}'`); + } + throw new hickle.Error('Unsupported Hickle group.'); + }; + const obj = deserialize(group); + const layers = new Map(); + if (obj && obj instanceof Map && Array.from(obj.values()).every((value) => value.type && value.shape)) { + for (const [key, value] of obj) { + const tensor = new hickle.Tensor(key, value.shape, value.type, value.littleEndian, value.type === 'string' ? value.value : value.data); + const bits = key.split('.'); + const parameter = bits.pop(); + const layer = bits.join('.'); + if (!layers.has(layer)) { + layers.set(layer, []); + } + layers.get(layer).push({ name: parameter, value: tensor }); + } + } + this.nodes = Array.from(layers).map(([name, value]) => new hickle.Node(name, value)); + } +}; + +hickle.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +hickle.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new hickle.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name= name; + this.type = type ? type : initializer ? initializer.type : null; + this.initializer = initializer || null; + } +}; + +hickle.Node = class { + + constructor(name, parameters) { + this.type = { name: 'Weights' }; + this.name = name; + this.inputs = parameters.map((parameter) => { + return new hickle.Argument(parameter.name, [ + new hickle.Value(parameter.value.name, null, parameter.value) + ]); + }); + this.outputs = []; + this.attributes = []; + } +}; + +hickle.Tensor = class { + + constructor(name, shape, type, littleEndian, data) { + this.name = name; + this.type = new hickle.TensorType(type, new hickle.TensorShape(shape)); + this.encoding = littleEndian ? '<' : '>'; + this._data = data; + } + + get values() { + if (Array.isArray(this._data) || this._data === null) { + return null; + } + if (this._data instanceof Uint8Array) { + return this._data; + } + return this._data.peek(); + } +}; + +hickle.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +hickle.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +hickle.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Hickle model.'; + } +}; + +export const ModelFactory = hickle.ModelFactory; diff --git a/icon.png b/icon.png new file mode 100644 index 00000000000..75a1a0a6a04 Binary files /dev/null and b/icon.png differ diff --git a/imgdnn.js b/imgdnn.js new file mode 100644 index 00000000000..6a3dfe270be --- /dev/null +++ b/imgdnn.js @@ -0,0 +1,66 @@ + +const imgdnn = {}; + +imgdnn.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0x49, 0x4d, 0x47, 0x44, 0x4e, 0x4e ]; // IMGDNN + if (stream && stream.length >= signature.length && stream.peek(6).every((value, index) => value === signature[index])) { + return 'imgdnn'; + } + return null; + } + + open(/* context */) { + throw new imgdnn.Error('Invalid file content. File contains undocumented IMGDNN data.'); + } +}; + +imgdnn.Model = class { + + constructor(metadata, model) { + this._format = 'IMGDNN'; + this._graphs = [ new imgdnn.Graph(metadata, model) ]; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +imgdnn.Graph = class { + + constructor(/* metadata, model */) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +imgdnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading IMGDNN model.'; + } +}; + +export const ModelFactory = imgdnn.ModelFactory; + diff --git a/index.html b/index.html new file mode 100644 index 00000000000..9e17b8c6584 --- /dev/null +++ b/index.html @@ -0,0 +1,504 @@ + + + + + + + + + +Netron + + + + + + + + + + +
+ +
+ +
+ + + +
+ +
+
+ +
+ + +
+
+ +
+ +
+
+
+ +
+
+ + +
+
+ +
+
+
+ + + + \ No newline at end of file diff --git a/index.js b/index.js new file mode 100644 index 00000000000..2f7c26b74d6 --- /dev/null +++ b/index.js @@ -0,0 +1,112 @@ + +/* eslint-env es2015 */ + +/* eslint-disable no-var */ + +if (window.location.hostname.endsWith('.github.io')) { + window.location.replace('https://netron.app'); +} + +window.exports = {}; + +window.exports.require = function(id, callback) { + var url = new URL(`${id}.js`, window.location.href).href; + var scripts = document.head.getElementsByTagName('script'); + for (var i = 0; i < scripts.length; i++) { + if (url === scripts[i].getAttribute('src')) { + throw new Error(`Duplicate import of '${url}'.`); + } + } + var script = document.createElement('script'); + script.setAttribute('id', id); + script.setAttribute('type', 'module'); + /* eslint-disable no-use-before-define */ + var loadHandler = function() { + script.removeEventListener('load', loadHandler); + script.removeEventListener('error', errorHandler); + callback(); + }; + var errorHandler = function(e) { + script.removeEventListener('load', loadHandler); + script.removeEventListener('error', errorHandler); + callback(null, new Error(`The script '${e.target.src}' failed to load.`)); + }; + /* eslint-enable no-use-before-define */ + script.addEventListener('load', loadHandler, false); + script.addEventListener('error', errorHandler, false); + script.setAttribute('src', url); + document.head.appendChild(script); +}; + +window.exports.preload = function(callback) { + var modules = [ + [ './view' ], + [ './json', './xml', './protobuf', './hdf5', './grapher', './browser' ], + [ './base', './text', './flatbuffers', './flexbuffers', './zip', './tar', './python', './dagre' ] + ]; + var next = function() { + if (modules.length === 0) { + callback(); + return; + } + var ids = modules.pop(); + var resolved = ids.length; + for (var i = 0; i < ids.length; i++) { + window.exports.require(ids[i], function(module, error) { + if (error) { + callback(null, error); + return; + } + resolved--; + if (resolved === 0) { + next(); + } + }, true); + } + }; + next(); +}; + +window.exports.terminate = function(message, action, callback) { + document.getElementById('message-text').innerText = message; + var button = document.getElementById('message-button'); + if (action) { + button.style.removeProperty('display'); + button.innerText = action; + button.onclick = function() { + callback(); + }; + button.focus(); + } else { + button.style.display = 'none'; + button.onclick = null; + } + if (window.__view__) { + try { + window.__view__.show('welcome message'); + } catch (error) { + // continue regardless of error + } + } + document.body.setAttribute('class', 'welcome message'); +}; + +window.addEventListener('error', function (event) { + var error = event instanceof ErrorEvent && event.error && event.error instanceof Error ? event.error : new Error(event && event.message ? event.message : JSON.stringify(event)); + window.exports.terminate(error.message); +}); + +window.addEventListener('load', function() { + if (!Symbol || !Symbol.asyncIterator) { + throw new Error('Your browser is not supported.'); + } + window.exports.preload(function(value, error) { + if (error) { + window.exports.terminate(error.message); + } else { + var host = new window.exports.browser.BrowserHost(); + window.__view__ = new window.exports.view.View(host); + window.__view__.start(); + } + }); +}); diff --git a/json.js b/json.js new file mode 100755 index 00000000000..36ddf73d3d7 --- /dev/null +++ b/json.js @@ -0,0 +1,564 @@ + +import * as text from './text.js'; + +const json = {}; +const bson = {}; + +json.TextReader = class { + + static open(data) { + const decoder = text.Decoder.open(data); + let state = ''; + for (let i = 0; i < 0x1000; i++) { + const c = decoder.decode(); + if (state === 'match') { + break; + } + if (c === undefined || c === '\0') { + if (state === '') { + return null; + } + break; + } + if (c <= ' ') { + if (c !== ' ' && c !== '\n' && c !== '\r' && c !== '\t') { + return null; + } + continue; + } + switch (state) { + case '': + if (c === '{') { + state = '{}'; + } else if (c === '[') { + state = '[]'; + } else { + return null; + } + break; + case '[]': + if (c !== '"' && c !== '-' && c !== '+' && c !== '{' && c !== '[' && (c < '0' || c > '9')) { + return null; + } + state = 'match'; + break; + case '{}': + if (c !== '"') { + return null; + } + state = 'match'; + break; + default: + break; + } + } + return new json.TextReader(decoder); + } + + constructor(decoder) { + this._decoder = decoder; + this._decoder.position = 0; + this._escape = { '"': '"', '\\': '\\', '/': '/', b: '\b', f: '\f', n: '\n', r: '\r', t: '\t' }; + } + + read() { + const stack = []; + this._decoder.position = 0; + this._position = 0; + this._char = this._decoder.decode(); + this._whitespace(); + let obj = undefined; + let first = true; + for (;;) { + if (Array.isArray(obj)) { + this._whitespace(); + let c = this._char; + if (c === ']') { + this._next(); + this._whitespace(); + if (stack.length > 0) { + obj = stack.pop(); + first = false; + continue; + } + if (this._char !== undefined) { + this._unexpected(); + } + return obj; + } + if (!first) { + if (this._char !== ',') { + this._unexpected(); + } + this._next(); + this._whitespace(); + c = this._char; + } + first = false; + switch (c) { + case '{': { + this._next(); + stack.push(obj); + const item = {}; + obj.push(item); + obj = item; + first = true; + break; + } + case '[': { + this._next(); + stack.push(obj); + const item = []; + obj.push(item); + obj = item; + first = true; + break; + } + default: { + obj.push(c === '"' ? this._string() : this._literal()); + break; + } + } + } else if (obj instanceof Object) { + this._whitespace(); + let c = this._char; + if (c === '}') { + this._next(); + this._whitespace(); + if (stack.length > 0) { + obj = stack.pop(); + first = false; + continue; + } + if (this._char !== undefined) { + this._unexpected(); + } + return obj; + } + if (!first) { + if (this._char !== ',') { + this._unexpected(); + } + this._next(); + this._whitespace(); + c = this._char; + } + first = false; + if (c === '"') { + const key = this._string(); + switch (key) { + case '__proto__': + case 'constructor': + case 'prototype': + throw new json.Error(`Invalid key '${key}' ${this._location()}`); + default: + break; + } + this._whitespace(); + if (this._char !== ':') { + this._unexpected(); + } + this._next(); + this._whitespace(); + c = this._char; + switch (c) { + case '{': { + this._next(); + stack.push(obj); + const value = {}; + obj[key] = value; + obj = value; + first = true; + break; + } + case '[': { + this._next(); + stack.push(obj); + const value = []; + obj[key] = value; + obj = value; + first = true; + break; + } + default: { + obj[key] = c === '"' ? this._string() : this._literal(); + break; + } + } + this._whitespace(); + continue; + } + this._unexpected(); + } else { + const c = this._char; + switch (c) { + case '{': { + this._next(); + this._whitespace(); + obj = {}; + first = true; + break; + } + case '[': { + this._next(); + this._whitespace(); + obj = []; + first = true; + break; + } + default: { + const value = c === '"' ? this._string() : c >= '0' && c <= '9' ? this._number() : this._literal(); + this._whitespace(); + if (this._char !== undefined) { + this._unexpected(); + } + return value; + } + } + } + } + } + + _next() { + if (this._char === undefined) { + this._unexpected(); + } + this._position = this._decoder.position; + this._char = this._decoder.decode(); + } + + _whitespace() { + while (this._char === ' ' || this._char === '\n' || this._char === '\r' || this._char === '\t') { + this._next(); + } + } + + _literal() { + const c = this._char; + if (c >= '0' && c <= '9') { + return this._number(); + } + switch (c) { + case 't': this._expect('true'); return true; + case 'f': this._expect('false'); return false; + case 'n': this._expect('null'); return null; + case 'N': this._expect('NaN'); return NaN; + case 'I': this._expect('Infinity'); return Infinity; + case '-': return this._number(); + default: this._unexpected(); + } + return null; + } + + _number() { + let value = ''; + if (this._char === '-') { + value = '-'; + this._next(); + } + if (this._char === 'I') { + this._expect('Infinity'); + return -Infinity; + } + const c = this._char; + if (c < '0' || c > '9') { + this._unexpected(); + } + value += c; + this._next(); + if (c === '0') { + const n = this._char; + if (n >= '0' && n <= '9') { + this._unexpected(); + } + } + while (this._char >= '0' && this._char <= '9') { + value += this._char; + this._next(); + } + if (this._char === '.') { + value += '.'; + this._next(); + const n = this._char; + if (n < '0' || n > '9') { + this._unexpected(); + } + while (this._char >= '0' && this._char <= '9') { + value += this._char; + this._next(); + } + } + if (this._char === 'e' || this._char === 'E') { + value += this._char; + this._next(); + const s = this._char; + if (s === '-' || s === '+') { + value += this._char; + this._next(); + } + const c = this._char; + if (c < '0' || c > '9') { + this._unexpected(); + } + value += this._char; + this._next(); + while (this._char >= '0' && this._char <= '9') { + value += this._char; + this._next(); + } + } + return +value; + } + + _string() { + let value = ''; + this._next(); + while (this._char != '"') { + if (this._char === '\\') { + this._next(); + if (this._char === 'u') { + this._next(); + let uffff = 0; + for (let i = 0; i < 4; i ++) { + const hex = parseInt(this._char, 16); + if (!isFinite(hex)) { + this._unexpected(); + } + this._next(); + uffff = uffff * 16 + hex; + } + value += String.fromCharCode(uffff); + } else if (this._escape[this._char]) { + value += this._escape[this._char]; + this._next(); + } else { + this._unexpected(); + } + } else if (this._char < ' ') { + this._unexpected(); + } else { + value += this._char; + this._next(); + } + } + this._next(); + return value; + } + + _expect(value) { + for (let i = 0; i < value.length; i++) { + if (value[i] !== this._char) { + this._unexpected(); + } + this._next(); + } + } + + _unexpected() { + let c = this._char; + if (c === undefined) { + throw new json.Error('Unexpected end of JSON input.'); + } else if (c === '"') { + c = 'string'; + } else if ((c >= '0' && c <= '9') || c === '-') { + c = 'number'; + } else { + if (c < ' ' || c > '\x7F') { + const name = Object.keys(this._escape).filter((key) => this._escape[key] === c); + c = (name.length === 1) ? `\\${name}` : `\\u${(`000${c.charCodeAt(0).toString(16)}`).slice(-4)}`; + } + c = `token '${c}'`; + } + throw new json.Error(`Unexpected ${c} ${this._location()}`); + } + + _location() { + let line = 1; + let column = 1; + this._decoder.position = 0; + let c; + do { + if (this._decoder.position === this._position) { + return `at ${line}:${column}.`; + } + c = this._decoder.decode(); + if (c === '\n') { + line++; + column = 1; + } else { + column++; + } + } + while (c !== undefined); + return `at ${line}:${column}.`; + } +}; + +json.BinaryReader = class { + + static open(data) { + return data ? new json.BinaryReader(data) : null; + } + + constructor(data) { + this._buffer = data instanceof Uint8Array ? data : data.peek(); + } + + read() { + const buffer = this._buffer; + const length = buffer.length; + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + const asciiDecoder = new TextDecoder('ascii'); + const utf8Decoder = new TextDecoder('utf-8'); + let position = 0; + const skip = (offset) => { + position += offset; + if (position > length) { + throw new bson.Error(`Expected ${position + length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + }; + const header = () => { + const start = position; + skip(4); + const size = view.getInt32(start, 4); + if (size < 5 || start + size > length || buffer[start + size - 1] != 0x00) { + throw new bson.Error('Invalid file size.'); + } + }; + header(); + const stack = []; + let obj = {}; + for (;;) { + skip(1); + const type = buffer[position - 1]; + if (type == 0x00) { + if (stack.length === 0) { + break; + } + obj = stack.pop(); + continue; + } + const start = position; + position = buffer.indexOf(0x00, start) + 1; + const key = asciiDecoder.decode(buffer.subarray(start, position - 1)); + let value = null; + switch (type) { + case 0x01: { // float64 + const start = position; + skip(8); + value = view.getFloat64(start, true); + break; + } + case 0x02: { // string + skip(4); + const size = view.getInt32(position - 4, true); + const start = position; + skip(size); + value = utf8Decoder.decode(buffer.subarray(start, position - 1)); + if (buffer[position - 1] != '0x00') { + throw new bson.Error('String missing terminal 0.'); + } + break; + } + case 0x03: { // object + header(); + value = {}; + break; + } + case 0x04: { // array + header(); + value = []; + break; + } + case 0x05: { // bytes + const start = position; + skip(5); + const size = view.getInt32(start, true); + const subtype = buffer[start + 4]; + if (subtype !== 0x00) { + throw new bson.Error(`Unsupported binary subtype '${subtype}'.`); + } + skip(size); + value = buffer.subarray(start + 5, position); + break; + } + case 0x08: { // boolean + skip(1); + value = buffer[position - 1]; + if (value > 1) { + throw new bson.Error(`Invalid boolean value '${value}'.`); + } + value = value === 1 ? true : false; + break; + } + case 0x0A: + value = null; + break; + case 0x10: { + const start = position; + skip(4); + value = view.getInt32(start, true); + break; + } + case 0x11: { // uint64 + const start = position; + skip(8); + value = view.getUint64(start, true).toNumber(); + break; + } + case 0x12: { // int64 + const start = position; + skip(8); + value = view.getInt64(start, true).toNumber(); + break; + } + default: { + throw new bson.Error(`Unsupported value type '${type}'.`); + } + } + if (Array.isArray(obj)) { + if (obj.length !== parseInt(key, 10)) { + throw new bson.Error(`Invalid array index '${key}'.`); + } + obj.push(value); + } else { + switch (key) { + case '__proto__': + case 'constructor': + case 'prototype': + throw new bson.Error(`Invalid key '${key}' at ${position}'.`); + default: + break; + } + obj[key] = value; + } + if (type === 0x03 || type === 0x04) { + stack.push(obj); + obj = value; + } + } + if (position !== length) { + throw new bson.Error(`Unexpected data at '${position}'.`); + } + return obj; + } +}; + +json.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'JSON Error'; + } +}; + +bson.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'BSON Error'; + } +}; + +export const TextReader = json.TextReader; +export const BinaryReader = json.BinaryReader; diff --git a/keras-metadata.json b/keras-metadata.json new file mode 100644 index 00000000000..36db0a5288b --- /dev/null +++ b/keras-metadata.json @@ -0,0 +1,4758 @@ +[ + { + "name": "Activation", + "module": "keras.layers", + "category": "Activation", + "description": "Applies an activation function to an output.", + "attributes": [ + { + "description": "Activation function. It could be a callable, or the name of\n an activation from the `keras.activations` namespace.", + "name": "activation" + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as `name` and `dtype`." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as input.", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> layer = keras.layers.Activation('relu')\n>>> layer([-3.0, -1.0, 0.0, 2.0])\n[0.0, 0.0, 0.0, 2.0]\n>>> layer = keras.layers.Activation(keras.activations.relu)\n>>> layer([-3.0, -1.0, 0.0, 2.0])\n[0.0, 0.0, 0.0, 2.0]" + } + ] + }, + { + "name": "ActivityRegularization", + "module": "keras.layers", + "description": "Layer that applies an update to the cost function based input activity.", + "attributes": [ + { + "description": "L1 regularization factor (positive float).", + "name": "l1" + }, + { + "description": "L2 regularization factor (positive float).", + "name": "l2" + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as input.", + "name": "output" + } + ] + }, + { + "name": "Add", + "module": "keras.layers", + "description": "Performs elementwise addition operation.\n\nIt takes as input a list of tensors, all of the same shape,\nand returns a single tensor (also of the same shape).", + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.Add()([x1, x2])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> # equivalent to `added = keras.layers.add([x1, x2])`\n>>> added = keras.layers.Add()([x1, x2])\n>>> out = keras.layers.Dense(4)(added)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ] + }, + { + "name": "Attention", + "module": "keras.layers", + "description": "Dot-product attention layer, a.k.a. Luong-style attention.\n\nInputs are a list with 2 or 3 elements:\n1. A `query` tensor of shape `(batch_size, Tq, dim)`.\n2. A `value` tensor of shape `(batch_size, Tv, dim)`.\n3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none\n supplied, `value` will be used as a `key`.\n\nThe calculation follows the steps:\n1. Calculate attention scores using `query` and `key` with shape\n `(batch_size, Tq, Tv)`.\n2. Use scores to calculate a softmax distribution with shape\n `(batch_size, Tq, Tv)`.\n3. Use the softmax distribution to create a linear combination of `value`\n with shape `(batch_size, Tq, dim)`.", + "attributes": [ + { + "description": "If `True`, will create a scalar variable to scale the\n attention scores.", + "name": "use_scale" + }, + { + "description": "Boolean. Set to `True` for decoder self-attention. Adds a mask\n such that position `i` cannot attend to positions `j > i`. This prevents\n the flow of information from the future towards the past. Defaults to\n `False`.", + "name": "causal" + }, + { + "description": "Float between 0 and 1. Fraction of the units to drop for the\n attention scores. Defaults to `0.0`.", + "name": "dropout" + }, + { + "description": "List of the following tensors:\n - `query`: Query tensor of shape `(batch_size, Tq, dim)`.\n - `value`: Value tensor of shape `(batch_size, Tv, dim)`.\n - `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If\n not given, will use `value` for both `key` and `value`, which is\n the most common case.", + "name": "inputs" + }, + { + "description": "List of the following tensors:\n - `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.\n If given, the output will be zero at the positions where\n `mask==False`.\n - `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.\n If given, will apply the mask such that values at positions\n where `mask==False` do not contribute to the result.", + "name": "mask" + }, + { + "description": "bool, it `True`, returns the attention scores\n (after masking and softmax) as an additional output argument.", + "name": "return_attention_scores" + }, + { + "description": "Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).", + "name": "training" + }, + { + "name": "score_mode", + "description": "Function to use to compute attention scores, one of\n `{\"dot\", \"concat\"}`. `\"dot\"` refers to the dot product between the\n query and key vectors. `\"concat\"` refers to the hyperbolic tangent\n of the concatenation of the `query` and `key` vectors.\n\nCall Args:" + }, + { + "name": "use_causal_mask", + "description": "Boolean. Set to `True` for decoder self-attention. Adds\n a mask such that position `i` cannot attend to positions `j > i`.\n This prevents the flow of information from the future towards the\n past. Defaults to `False`.\n\nOutput:\n Attention outputs of shape `(batch_size, Tq, dim)`.\n (Optional) Attention scores after masking and softmax with shape\n `(batch_size, Tq, Tv)`." + }, + { + "name": "seed", + "description": "A Python integer to use as random seed incase of `dropout`." + } + ] + }, + { + "name": "Average", + "module": "keras.layers", + "category": "Tensor", + "description": "Averages a list of inputs element-wise..\n\nIt takes as input a list of tensors, all of the same shape,\nand returns a single tensor (also of the same shape).", + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.Average()([x1, x2])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> # equivalent to `y = keras.layers.average([x1, x2])`\n>>> y = keras.layers.Average()([x1, x2])\n>>> out = keras.layers.Dense(4)(y)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ] + }, + { + "name": "AveragePooling1D", + "module": "keras.layers", + "category": "Pool", + "description": "Average pooling for temporal data.\n\nDownsamples the input representation by taking the average value over the\nwindow defined by `pool_size`. The window is shifted by `strides`. The\nresulting output when using \"valid\" padding option has a shape of:\n`output_shape = (input_shape - pool_size + 1) / strides)`\n\nThe resulting output shape when using the \"same\" padding option is:\n`output_shape = input_shape / strides`", + "attributes": [ + { + "description": "int, size of the max pooling window.", + "name": "pool_size" + }, + { + "description": "int or None. Specifies how much the pooling window moves\n for each pooling step. If None, it will default to `pool_size`.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, steps)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.", + "name": "output" + } + ], + "examples": [ + { + "summary": "`strides=1` and `padding=\"valid\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=1, padding=\"valid\")\n>>> avg_pool_1d(x)" + }, + { + "summary": "`strides=2` and `padding=\"valid\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=2, padding=\"valid\")\n>>> avg_pool_1d(x)" + }, + { + "summary": "`strides=1` and `padding=\"same\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=1, padding=\"same\")\n>>> avg_pool_1d(x)" + } + ] + }, + { + "name": "AveragePooling2D", + "module": "keras.layers", + "category": "Pool", + "description": "Average pooling operation for 2D spatial data.\n\nDownsamples the input along its spatial dimensions (height and width)\nby taking the average value over an input window\n(of size defined by `pool_size`) for each channel of the input.\nThe window is shifted by `strides` along each dimension.\n\nThe resulting output when using the `\"valid\"` padding option has a spatial\nshape (number of rows or columns) of:\n`output_shape = math.floor((input_shape - pool_size) / strides) + 1`\n(when `input_shape >= pool_size`)\n\nThe resulting output shape when using the `\"same\"` padding option is:\n`output_shape = math.floor((input_shape - 1) / strides) + 1`", + "attributes": [ + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "int or tuple of 2 integers, factors by which to downscale\n (dim1, dim2). If only one integer is specified, the same\n window length will be used for all dimensions.", + "name": "pool_size" + }, + { + "description": "int or tuple of 2 integers, or None. Strides values. If None,\n it will default to `pool_size`. If only one int is specified, the\n same stride size will be used for all dimensions.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 4D tensor with shape `(batch_size, height, width, channels)`.\n- If `data_format=\"channels_first\"`:\n 4D tensor with shape `(batch_size, channels, height, width)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 4D tensor with shape\n `(batch_size, pooled_height, pooled_width, channels)`.\n- If `data_format=\"channels_first\"`:\n 4D tensor with shape\n `(batch_size, channels, pooled_height, pooled_width)`.", + "name": "output" + } + ], + "examples": [ + { + "summary": "`strides=(1, 1)` and `padding=\"valid\"`:", + "code": ">>> x = np.array([[1., 2., 3.],\n... [4., 5., 6.],\n... [7., 8., 9.]])\n>>> x = np.reshape(x, [1, 3, 3, 1])\n>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),\n... strides=(1, 1), padding=\"valid\")\n>>> avg_pool_2d(x)" + }, + { + "summary": "`strides=(2, 2)` and `padding=\"valid\"`:", + "code": ">>> x = np.array([[1., 2., 3., 4.],\n... [5., 6., 7., 8.],\n... [9., 10., 11., 12.]])\n>>> x = np.reshape(x, [1, 3, 4, 1])\n>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),\n... strides=(2, 2), padding=\"valid\")\n>>> avg_pool_2d(x)" + }, + { + "summary": "`stride=(1, 1)` and `padding=\"same\"`:", + "code": ">>> x = np.array([[1., 2., 3.],\n... [4., 5., 6.],\n... [7., 8., 9.]])\n>>> x = np.reshape(x, [1, 3, 3, 1])\n>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),\n... strides=(1, 1), padding=\"same\")\n>>> avg_pool_2d(x)" + } + ] + }, + { + "name": "AveragePooling3D", + "module": "keras.layers", + "description": "Average pooling operation for 3D data (spatial or spatio-temporal).\n\nDownsamples the input along its spatial dimensions (depth, height, and\nwidth) by taking the average value over an input window (of size defined by\n`pool_size`) for each channel of the input. The window is shifted by\n`strides` along each dimension.", + "attributes": [ + { + "description": "int or tuple of 3 integers, factors by which to downscale\n (dim1, dim2, dim3). If only one integer is specified, the same\n window length will be used for all dimensions.", + "name": "pool_size" + }, + { + "description": "int or tuple of 3 integers, or None. Strides values. If None,\n it will default to `pool_size`. If only one int is specified, the\n same stride size will be used for all dimensions.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while\n `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json`. If you never set it, then it\n will be `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`", + "name": "output" + } + ], + "examples": [ + { + "code": "depth = 30\nheight = 30\nwidth = 30\nchannels = 3\n\ninputs = keras.layers.Input(shape=(depth, height, width, channels))\nlayer = keras.layers.AveragePooling3D(pool_size=3)\noutputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)" + } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "attributes": [ + { + "default": -1, + "name": "axis" + }, + { + "default": 0.001, + "name": "epsilon" + }, + { + "default": 0.99, + "name": "momentum" + }, + { + "default": true, + "name": "scale" + }, + { + "default": true, + "name": "center" + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "name": "gamma_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "moving_mean_initializer", + "visible": false + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "name": "moving_variance_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "beta_initializer", + "visible": false + }, + { + "name": "beta_regularizer", + "visible": false + }, + { + "name": "gamma_regularizer", + "visible": false + }, + { + "name": "beta_constraint" + }, + { + "name": "gamma_constraint" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "gamma" + }, + { + "name": "beta" + }, + { + "name": "running_mean" + }, + { + "name": "running_std" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "BatchNormalization", + "module": "keras.layers", + "category": "Normalization", + "description": "Layer that normalizes its inputs.\n\nBatch normalization applies a transformation that maintains the mean output\nclose to 0 and the output standard deviation close to 1.\n\nImportantly, batch normalization works differently during training and\nduring inference.\n\n**During training** (i.e. when using `fit()` or when calling the layer/model\nwith the argument `training=True`), the layer normalizes its output using\nthe mean and standard deviation of the current batch of inputs. That is to\nsay, for each channel being normalized, the layer returns\n`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:\n\n- `epsilon` is small constant (configurable as part of the constructor\narguments)\n- `gamma` is a learned scaling factor (initialized as 1), which\ncan be disabled by passing `scale=False` to the constructor.\n- `beta` is a learned offset factor (initialized as 0), which\ncan be disabled by passing `center=False` to the constructor.\n\n**During inference** (i.e. when using `evaluate()` or `predict()` or when\ncalling the layer/model with the argument `training=False` (which is the\ndefault), the layer normalizes its output using a moving average of the\nmean and standard deviation of the batches it has seen during training. That\nis to say, it returns\n`gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.\n\n`self.moving_mean` and `self.moving_var` are non-trainable variables that\nare updated each time the layer in called in training mode, as such:\n\n- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`\n- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`\n\nAs such, the layer will only normalize its inputs during inference\n*after having been trained on data that has similar statistics as the\ninference data*.", + "attributes": [ + { + "default": -1, + "description": "Integer, the axis that should be normalized\n (typically the features axis). For instance, after a `Conv2D` layer\n with `data_format=\"channels_first\"`, use `axis=1`.", + "name": "axis" + }, + { + "default": 0.001, + "description": "Small float added to variance to avoid dividing by zero.", + "name": "epsilon" + }, + { + "default": 0.99, + "description": "Momentum for the moving average.", + "name": "momentum" + }, + { + "default": true, + "description": "If `True`, multiply by `gamma`. If `False`, `gamma` is not used.\n When the next layer is linear this can be disabled\n since the scaling will be done by the next layer.", + "name": "scale", + "type": "boolean" + }, + { + "default": true, + "description": "If `True`, add offset of `beta` to normalized tensor.\n If `False`, `beta` is ignored.", + "name": "center", + "type": "boolean" + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "description": "Initializer for the gamma weight.", + "name": "gamma_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the moving mean.", + "name": "moving_mean_initializer", + "visible": false + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "description": "Initializer for the moving variance.", + "name": "moving_variance_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the beta weight.", + "name": "beta_initializer", + "visible": false + }, + { + "description": "Optional regularizer for the beta weight.", + "name": "beta_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the gamma weight.", + "name": "gamma_regularizer", + "visible": false + }, + { + "description": "Optional constraint for the beta weight.", + "name": "beta_constraint" + }, + { + "description": "Optional constraint for the gamma weight.", + "name": "gamma_constraint" + }, + { + "description": "Whether to use [Batch Renormalization](\n https://arxiv.org/abs/1702.03275). This adds extra variables during\n training. The inference is the same for either value of this parameter.", + "name": "renorm" + }, + { + "description": "A dictionary that may map keys 'rmax', 'rmin', 'dmax' to\n scalar `Tensors` used to clip the renorm correction. The correction `(r,\n d)` is used as `corrected_value = normalized_value * r + d`, with `r`\n clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,\n dmax are set to inf, 0, inf, respectively.", + "name": "renorm_clipping" + }, + { + "description": "Momentum used to update the moving means and standard\n deviations with renorm. Unlike `momentum`, this affects training and\n should be neither too small (which would add noise) nor too large (which\n would give stale estimates). Note that `momentum` is still applied to get\n the means and variances for inference.", + "name": "renorm_momentum" + }, + { + "description": "if `True`, use a faster, fused implementation, or raise a ValueError\n if the fused implementation cannot be used. If `None`, use the faster\n implementation if possible. If False, do not used the fused\n implementation.", + "name": "fused" + }, + { + "description": "Boolean, if `True` the variables will be marked as trainable.", + "name": "trainable" + }, + { + "description": "An `int`. By default, `virtual_batch_size` is `None`,\n which means batch normalization is performed across the whole batch. When\n `virtual_batch_size` is not `None`, instead perform \"Ghost Batch\n Normalization\", which creates virtual sub-batches which are each\n normalized separately (with shared gamma, beta, and moving statistics).\n Must divide the actual batch size during execution.", + "name": "virtual_batch_size" + }, + { + "description": "A function taking the `Tensor` containing the (dynamic) shape of\n the input tensor and returning a pair (scale, bias) to apply to the\n normalized values (before gamma and beta), only during training. For\n example, if axis==-1,\n `adjustment = lambda shape: (\n tf.random.uniform(shape[-1:], 0.93, 1.07),\n tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized\n value by up to 7% up or down, then shift the result by up to 0.1\n (with independent scaling and bias for each feature but shared\n across all examples), and finally apply gamma and/or beta. If\n `None`, no adjustment is applied. Cannot be specified if\n virtual_batch_size is specified.", + "name": "adjustment" + }, + { + "name": "synchronized", + "description": "Only applicable with the TensorFlow backend.\n If `True`, synchronizes the global batch statistics (mean and\n variance) for the layer across all devices at each training step\n in a distributed training strategy.\n If `False`, each replica uses its own local batch statistics." + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments (e.g. `name` and `dtype`)." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape` (tuple of\nintegers, does not include the samples axis) when using this layer as the\nfirst layer in a model.", + "name": "input" + }, + { + "name": "gamma" + }, + { + "name": "beta" + }, + { + "name": "moving_mean" + }, + { + "name": "moving_variance" + } + ], + "outputs": [ + { + "description": "Same shape as input.\n\nReference:\n - [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).\n\n**About setting `layer.trainable = False` on a `BatchNormalization` layer:**\n\nThe meaning of setting `layer.trainable = False` is to freeze the layer,\ni.e. its internal state will not change during training:\nits trainable weights will not be updated\nduring `fit()` or `train_on_batch()`, and its state updates will not be run.\n\nUsually, this does not necessarily mean that the layer is run in inference\nmode (which is normally controlled by the `training` argument that can\nbe passed when calling a layer). \"Frozen state\" and \"inference mode\"\nare two separate concepts.\n\nHowever, in the case of the `BatchNormalization` layer, **setting\n`trainable = False` on the layer means that the layer will be\nsubsequently run in inference mode** (meaning that it will use\nthe moving mean and the moving variance to normalize the current batch,\nrather than using the mean and variance of the current batch).\n\nThis behavior has been introduced in TensorFlow 2.0, in order\nto enable `layer.trainable = False` to produce the most commonly\nexpected behavior in the convnet fine-tuning use case.\n\nNote that:\n - Setting `trainable` on an model containing other layers will\n recursively set the `trainable` value of all inner layers.\n - If the value of the `trainable`\n attribute is changed after calling `compile()` on a model,\n the new value doesn't take effect for this model\n until `compile()` is called again.", + "name": "output" + } + ], + "references": [ + { + "description": "[Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)" + } + ] + }, + { + "name": "Bidirectional", + "module": "keras.layers", + "category": "Wrapper", + "description": "Bidirectional wrapper for RNNs.", + "attributes": [ + { + "default": "concat", + "description": "Mode by which outputs of the forward and backward RNNs\n will be combined. One of `{\"sum\", \"mul\", \"concat\", \"ave\", None}`.\n If `None`, the outputs will not be combined,\n they will be returned as a list. Defaults to `\"concat\"`.", + "name": "merge_mode" + }, + { + "description": "`keras.layers.RNN` instance, such as\n `keras.layers.LSTM` or `keras.layers.GRU`.\n It could also be a `keras.layers.Layer` instance\n that meets the following criteria:\n 1. Be a sequence-processing layer (accepts 3D+ inputs).\n 2. Have a `go_backwards`, `return_sequences` and `return_state`\n attribute (with the same semantics as for the `RNN` class).\n 3. Have an `input_spec` attribute.\n 4. Implement serialization via `get_config()` and `from_config()`.\n Note that the recommended way to create new RNN layers is to write a\n custom RNN cell and use it with `keras.layers.RNN`, instead of\n subclassing `keras.layers.Layer` directly.\n When `return_sequences` is `True`, the output of the masked\n timestep will be zero regardless of the layer's original\n `zero_output_for_mask` value.", + "name": "layer" + }, + { + "description": "Initial weights to load in the Bidirectional model\n", + "name": "weights" + }, + { + "description": "Optional `keras.layers.RNN`,\n or `keras.layers.Layer` instance to be used to handle\n backwards input processing.\n If `backward_layer` is not provided, the layer instance passed\n as the `layer` argument will be used to generate the backward layer\n automatically.\n Note that the provided `backward_layer` layer should have properties\n matching those of the `layer` argument, in particular\n it should have the same values for `stateful`, `return_states`,\n `return_sequences`, etc. In addition, `backward_layer`\n and `layer` should have different `go_backwards` argument values.\n A `ValueError` will be raised if these requirements are not met.", + "name": "backward_layer" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": "model = Sequential([\n Input(shape=(5, 10)),\n Bidirectional(LSTM(10, return_sequences=True),\n Bidirectional(LSTM(10)),\n Dense(5, activation=\"softmax\"),\n])\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n# With custom backward layer\nforward_layer = LSTM(10, return_sequences=True)\nbackward_layer = LSTM(10, activation='relu', return_sequences=True,\n go_backwards=True)\nmodel = Sequential([\n Input(shape=(5, 10)),\n Bidirectional(forward_layer, backward_layer=backward_layer),\n Dense(5, activation=\"softmax\"),\n])\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')" + } + ] + }, + { + "name": "Concatenate", + "module": "keras.layers", + "category": "Tensor", + "description": "Concatenates a list of inputs.\n\nIt takes as input a list of tensors, all of the same shape except\nfor the concatenation axis, and returns a single tensor that is the\nconcatenation of all inputs.", + "attributes": [ + { + "description": "Axis along which to concatenate.", + "name": "axis" + }, + { + "description": "Standard layer keyword arguments.", + "name": "**kwargs" + } + ], + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.arange(20).reshape(2, 2, 5)\n>>> y = np.arange(20, 30).reshape(2, 1, 5)\n>>> keras.layers.Concatenate(axis=1)([x, y])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n>>> y = keras.layers.Concatenate()([x1, x2])" + } + ] + }, + { + "name": "Conv1D", + "module": "keras.layers", + "category": "Layer", + "description": "1D convolution layer (e.g. temporal convolution).\n\nThis layer creates a convolution kernel that is convolved with the layer\ninput over a single spatial (or temporal) dimension to produce a tensor of\noutputs. If `use_bias` is True, a bias vector is created and added to the\noutputs. Finally, if `activation` is not `None`, it is applied to the\noutputs as well.", + "attributes": [ + { + "default": "linear", + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": "valid", + "description": "string, `\"valid\"`, `\"same\"` or `\"causal\"`(case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.\n `\"causal\"` results in causal(dilated) convolutions, e.g. `output[t]`\n does not depend on`input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section2.1](\n https://arxiv.org/abs/1609.03499).", + "name": "padding" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": [ + 1 + ], + "description": "int or tuple/list of 1 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "default": [ + 1 + ], + "description": "int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated convolution.", + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "int, the dimension of the output space (the number of filters\n in the convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 1 integer, specifying the size of the\n convolution window.", + "name": "kernel_size" + }, + { + "description": "Optional regularizer for the convolution kernel.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.", + "name": "kernel_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + }, + { + "description": "A positive int specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters // groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.", + "name": "groups" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, steps, channels)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, channels, steps)`", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, new_steps, filters)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, filters, new_steps)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> # The inputs are 128-length vectors with 10 timesteps, and the\n>>> # batch size is 4.\n>>> x = np.random.rand(4, 10, 128)\n>>> y = keras.layers.Conv1D(32, 3, activation='relu')(x)\n>>> print(y.shape)\n(4, 8, 32)" + } + ] + }, + { + "name": "Conv2D", + "module": "keras.layers", + "category": "Layer", + "description": "2D convolution layer.\n\nThis layer creates a convolution kernel that is convolved with the layer\ninput over a single spatial (or temporal) dimension to produce a tensor of\noutputs. If `use_bias` is True, a bias vector is created and added to the\noutputs. Finally, if `activation` is not `None`, it is applied to the\noutputs as well.", + "attributes": [ + { + "default": "linear", + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": "valid", + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "type": "boolean", + "visible": false + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integers, specifying the dilation\n rate to use for dilated convolution.", + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "int, the dimension of the output space (the number of filters\n in the convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 2 integer, specifying the size of the\n convolution window.", + "name": "kernel_size" + }, + { + "description": "Optional regularizer for the convolution kernel.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint", + "visible": false + }, + { + "description": "A positive int specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters // groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.", + "name": "groups" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 10, 128)\n>>> y = keras.layers.Conv2D(32, 3, activation='relu')(x)\n>>> print(y.shape)\n(4, 8, 8, 32)" + } + ] + }, + { + "name": "Conv2DTranspose", + "module": "keras.layers", + "category": "Layer", + "description": "2D transposed convolution layer.\n\nThe need for transposed convolutions generally arise from the desire to use\na transformation going in the opposite direction of a normal convolution,\ni.e., from something that has the shape of the output of some convolution\nto something that has the shape of its input while maintaining a\nconnectivity pattern that is compatible with said convolution.", + "attributes": [ + { + "description": "int, the dimension of the output space (the number of filters\n in the transposed convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 1 integer, specifying the size of the\n transposed convolution window.", + "name": "kernel_size" + }, + { + "description": "int or tuple/list of 1 integer, specifying the stride length\n of the transposed convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated transposed convolution.", + "name": "dilation_rate" + }, + { + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Optional regularizer for the convolution kernel.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.", + "name": "kernel_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + }, + { + "description": "An integer or tuple/list of 2 integers,\n specifying the amount of padding along the height and width\n of the output tensor.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.", + "name": "output_padding" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`", + "name": "output" + } + ], + "references": [ + { + "description": "[A guide to convolution arithmetic for deep learning]( https://arxiv.org/abs/1603.07285v1)" + }, + { + "description": "[Deconvolutional Networks]( https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf) " + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 8, 128)\n>>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x)\n>>> print(y.shape)\n(4, 20, 16, 32)" + } + ] + }, + { + "name": "Conv3D", + "module": "keras.layers", + "category": "Layer", + "description": "3D convolution layer.\n\nThis layer creates a convolution kernel that is convolved with the layer\ninput over a single spatial (or temporal) dimension to produce a tensor of\noutputs. If `use_bias` is True, a bias vector is created and added to the\noutputs. Finally, if `activation` is not `None`, it is applied to the\noutputs as well.", + "attributes": [ + { + "description": "int, the dimension of the output space (the number of filters\n in the convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 3 integer, specifying the size of the\n convolution window.", + "name": "kernel_size" + }, + { + "description": "int or tuple/list of 3 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json`. If you never set it, then it\n will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "int or tuple/list of 3 integers, specifying the dilation\n rate to use for dilated convolution.", + "name": "dilation_rate" + }, + { + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Optional regularizer for the convolution kernel.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.", + "name": "kernel_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + }, + { + "description": "A positive int specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters // groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.", + "name": "groups" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, new_spatial_dim1, new_spatial_dim2, new_spatial_dim3,\n filters)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, filters, new_spatial_dim1, new_spatial_dim2,\n new_spatial_dim3)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 10, 10, 128)\n>>> y = keras.layers.Conv3D(32, 3, activation='relu')(x)\n>>> print(y.shape)\n(4, 8, 8, 8, 32)" + } + ] + }, + { + "name": "ConvLSTM2D", + "module": "keras.layers", + "description": "2D Convolutional LSTM.\n\nSimilar to an LSTM layer, but the input transformations\nand recurrent transformations are both convolutional.", + "attributes": [ + { + "description": "int, the dimension of the output space (the number of filters\n in the convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 2 integers, specifying the size of the\n convolution window.", + "name": "kernel_size" + }, + { + "description": "int or tuple/list of 2 integers, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "description": "string, `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "int or tuple/list of 2 integers, specifying the dilation\n rate to use for dilated convolution.", + "name": "dilation_rate" + }, + { + "description": "Activation function to use. By default hyperbolic tangent\n activation function is applied (`tanh(x)`).", + "name": "activation" + }, + { + "description": "Activation function to use for the recurrent step.", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Boolean. If `True`, add 1 to the bias of the forget\n gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix.", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix.", + "name": "recurrent_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the bias vector.", + "name": "bias_constraint", + "visible": false + }, + { + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.", + "name": "return_sequences" + }, + { + "description": "Boolean (default: `False`).\n If `True`, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "description": "Boolean (default False). If `True`, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs.", + "name": "dropout" + }, + { + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "description": "Boolean. Whether to return the last state in addition\n to the output. Default: `False`.", + "name": "return_state" + }, + { + "name": "seed", + "description": "Random seed for dropout." + }, + { + "name": "unroll", + "description": "Boolean (default: `False`).\n If `True`, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences." + } + ], + "inputs": [ + { + "description": "- If `data_format='channels_first'`:\n 5D tensor with shape: `(samples, time, channels, rows, cols)`\n- If `data_format='channels_last'`:\n 5D tensor with shape: `(samples, time, rows, cols, channels)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `return_state`: a list of tensors. The first tensor is the output.\n The remaining tensors are the last states,\n each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n `data_format='channels_first'`\n or shape: `(samples, new_rows, new_cols, filters)` if\n `data_format='channels_last'`. `rows` and `cols` values might have\n changed due to padding.\n- If `return_sequences`: 5D tensor with shape: `(samples, timesteps,\n filters, new_rows, new_cols)` if data_format='channels_first'\n or shape: `(samples, timesteps, new_rows, new_cols, filters)` if\n `data_format='channels_last'`.\n- Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n `data_format='channels_first'`\n or shape: `(samples, new_rows, new_cols, filters)` if\n `data_format='channels_last'`.", + "name": "output" + } + ], + "examples": [ + { + "code": "steps = 10\nheight = 32\nwidth = 32\ninput_channels = 3\noutput_channels = 6\n\ninputs = tf.keras.Input(shape=(steps, height, width, input_channels))\nlayer = tf.keras.layers.ConvLSTM2D(filters=output_channels, kernel_size=3)\noutputs = layer(inputs)" + } + ], + "references": [ + { + "description": " " + }, + { + "description": "[Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) (the current implementation does not include the feedback loop on the cells output)." + } + ] + }, + { + "name": "Convolution2D", + "module": "keras.layers", + "category": "Layer", + "description": "2D convolution layer.\n\nThis layer creates a convolution kernel that is convolved with the layer\ninput over a single spatial (or temporal) dimension to produce a tensor of\noutputs. If `use_bias` is True, a bias vector is created and added to the\noutputs. Finally, if `activation` is not `None`, it is applied to the\noutputs as well.", + "attributes": [ + { + "default": "linear", + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": "valid", + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch_size, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integers, specifying the dilation\n rate to use for dilated convolution.", + "name": "dilation_rate" + }, + { + "default": 1, + "name": "depth_multiplier" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "int, the dimension of the output space (the number of filters\n in the convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 2 integer, specifying the size of the\n convolution window.", + "name": "kernel_size" + }, + { + "description": "Optional regularizer for the convolution kernel.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.", + "name": "kernel_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + }, + { + "description": "A positive int specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters // groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.", + "name": "groups" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 10, 128)\n>>> y = keras.layers.Conv2D(32, 3, activation='relu')(x)\n>>> print(y.shape)\n(4, 8, 8, 32)" + } + ] + }, + { + "name": "Cropping1D", + "module": "keras.layers", + "category": "Shape", + "description": "Cropping layer for 1D input (e.g. temporal sequence).\n\nIt crops along the time dimension (axis 1).", + "attributes": [ + { + "description": "Int, or tuple of int (length 2), or dictionary.\n - If int: how many units should be trimmed off at the beginning and\n end of the cropping dimension (axis 1).\n - If tuple of 2 ints: how many units should be trimmed off at the\n beginning and end of the cropping dimension\n (`(left_crop, right_crop)`).", + "name": "cropping" + } + ], + "inputs": [ + { + "description": "3D tensor with shape `(batch_size, axis_to_crop, features)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "3D tensor with shape `(batch_size, cropped_axis, features)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 2)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> x\n[[[ 0 1]\n [ 2 3]\n [ 4 5]]\n [[ 6 7]\n [ 8 9]\n [10 11]]]\n>>> y = keras.layers.Cropping1D(cropping=1)(x)\n>>> y\n[[[2 3]]\n [[8 9]]]" + } + ] + }, + { + "name": "Cropping2D", + "module": "keras.layers", + "category": "Shape", + "description": "Cropping layer for 2D input (e.g. picture).\n\nIt crops along spatial dimensions, i.e. height and width.", + "attributes": [ + { + "description": "Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric cropping is applied to height and\n width.\n - If tuple of 2 ints: interpreted as two different symmetric\n cropping values for height and width:\n `(symmetric_height_crop, symmetric_width_crop)`.\n - If tuple of 2 tuples of 2 ints: interpreted as\n `((top_crop, bottom_crop), (left_crop, right_crop))`.", + "name": "cropping" + }, + { + "description": "A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, height, width, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, height, width)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, cropped_height, cropped_width, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, cropped_height, cropped_width)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 28, 28, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)\n>>> y.shape\n(2, 24, 20, 3)" + } + ] + }, + { + "name": "Cropping3D", + "module": "keras.layers", + "category": "Shape", + "description": "Cropping layer for 3D data (e.g. spatial or spatio-temporal).", + "attributes": [ + { + "description": "Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric cropping is applied to depth, height,\n and width.\n - If tuple of 3 ints: interpreted as three different symmetric\n cropping values for depth, height, and width:\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_crop, right_dim1_crop), (left_dim2_crop,\n right_dim2_crop), (left_dim3_crop, right_dim3_crop))`.", + "name": "cropping" + }, + { + "description": "A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_crop, second_axis_to_crop,\n third_axis_to_crop, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, first_axis_to_crop, second_axis_to_crop,\n third_axis_to_crop)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, first_cropped_axis, second_cropped_axis,\n third_cropped_axis, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, first_cropped_axis, second_cropped_axis,\n third_cropped_axis)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 28, 28, 10, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x)\n>>> y.shape\n(2, 24, 20, 6, 3)" + } + ] + }, + { + "name": "CuDNNGRU", + "description": "Fast GRU implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).\n\nCan only be run on GPU, with the TensorFlow backend.\n", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n", + "name": "stateful" + } + ] + }, + { + "name": "CuDNNLSTM", + "description": "Fast LSTM implementation with [CuDNN](https://developer.nvidia.com/cudnn).\n\nCan only be run on GPU, with the TensorFlow backend.\n", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer" + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer" + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer" + }, + { + "description": "Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al. (2015)](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer" + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer" + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer" + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer" + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n", + "name": "stateful" + } + ] + }, + { + "name": "Dense", + "module": "keras.layers", + "category": "Layer", + "description": "Just your regular densely-connected NN layer.\n\n`Dense` implements the operation:\n`output = activation(dot(input, kernel) + bias)`\nwhere `activation` is the element-wise activation function\npassed as the `activation` argument, `kernel` is a weights matrix\ncreated by the layer, and `bias` is a bias vector created by the layer\n(only applicable if `use_bias` is `True`).\n\nNote: If the input to the layer has a rank greater than 2, `Dense`\ncomputes the dot product between the `inputs` and the `kernel` along the\nlast axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).\nFor example, if input has dimensions `(batch_size, d0, d1)`, then we create\na `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2\nof the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are\n`batch_size * d0` such sub-tensors). The output in this case will have\nshape `(batch_size, d0, units)`.", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "default": "linear", + "description": "Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "type": "boolean" + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix.", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector.", + "name": "bias_constraint" + }, + { + "name": "lora_rank", + "description": "Optional integer. If set, the layer's forward pass\n will implement LoRA (Low-Rank Adaptation)\n with the provided rank. LoRA sets the layer's kernel\n to non-trainable and replaces it with a delta over the\n original kernel, obtained via multiplying two lower-rank\n trainable matrices. This can be useful to reduce the\n computation cost of fine-tuning large dense layers.\n You can also enable LoRA on an existing\n `Dense` layer by calling `layer.enable_lora(rank)`." + } + ], + "inputs": [ + { + "description": "N-D tensor with shape: `(batch_size, ..., input_dim)`.\nThe most common situation would be\na 2D input with shape `(batch_size, input_dim)`.", + "name": "input", + "type": "T" + }, + { + "name": "kernel", + "type": "T" + }, + { + "name": "bias", + "type": "T" + } + ], + "outputs": [ + { + "description": "N-D tensor with shape: `(batch_size, ..., units)`.\nFor instance, for a 2D input with shape `(batch_size, input_dim)`,\nthe output would have shape `(batch_size, units)`.", + "name": "output", + "type": "T" + } + ], + "examples": [ + { + "code": ">>> # Create a `Sequential` model and add a Dense layer as the first layer.\n>>> model = tf.keras.models.Sequential()\n>>> model.add(tf.keras.Input(shape=(16,)))\n>>> model.add(tf.keras.layers.Dense(32, activation='relu'))\n>>> # Now the model will take as input arrays of shape (None, 16)\n>>> # and output arrays of shape (None, 32).\n>>> # Note that after the first layer, you don't need to specify\n>>> # the size of the input anymore:\n>>> model.add(tf.keras.layers.Dense(32))\n>>> model.output_shape\n(None, 32)" + } + ] + }, + { + "name": "DepthwiseConv2D", + "category": "Layer", + "attributes": [ + { + "default": "linear", + "name": "activation" + }, + { + "default": "valid", + "name": "padding" + }, + { + "default": true, + "name": "use_bias", + "type": "boolean", + "visible": false + }, + { + "default": "channels_last", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "name": "depthwise_initializer", + "visible": false + }, + { + "default": 1, + "name": "depth_multiplier" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "Dot", + "module": "keras.layers", + "description": "Computes element-wise dot product of two tensors.\n\nIt takes a list of inputs of size 2, and the axes\ncorresponding to each input along with the dot product\nis to be performed.\n\nLet's say `x` and `y` are the two input tensors with shapes\n`(2, 3, 5)` and `(2, 10, 3)`. The batch dimension should be\nof same size for both the inputs, and `axes` should correspond\nto the dimensions that have the same size in the corresponding\ninputs. e.g. with `axes=(1, 2)`, the dot product of `x`, and `y`\nwill result in a tensor with shape `(2, 5, 10)`", + "attributes": [ + { + "description": "Integer or tuple of integers, axis or axes along which to\n take the dot product. If a tuple, should be two integers\n corresponding to the desired axis from the first input and the\n desired axis from the second input, respectively. Note that the\n size of the two selected axes must match.", + "name": "axes" + }, + { + "description": "Whether to L2-normalize samples along the dot product axis\n before taking the dot product. If set to `True`, then\n the output of the dot product is the cosine proximity\n between the two samples.", + "name": "normalize" + }, + { + "description": "Standard layer keyword arguments.", + "name": "**kwargs" + } + ], + "inputs": [ + { + "name": "x" + }, + { + "name": "y" + } + ], + "outputs": [ + { + "name": "z" + } + ], + "examples": [ + { + "code": ">>> x = np.arange(10).reshape(1, 5, 2)\n>>> y = np.arange(10, 20).reshape(1, 2, 5)\n>>> keras.layers.Dot(axes=(1, 2))([x, y])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n>>> y = keras.layers.Dot(axes=1)([x1, x2])" + } + ] + }, + { + "name": "Dropout", + "module": "keras.layers", + "category": "Dropout", + "description": "Applies dropout to the input.\n\nThe `Dropout` layer randomly sets input units to 0 with a frequency of\n`rate` at each step during training time, which helps prevent overfitting.\nInputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over\nall inputs is unchanged.\n\nNote that the `Dropout` layer only applies when `training` is set to `True`\nin `call()`, such that no values are dropped during inference.\nWhen using `model.fit`, `training` will be appropriately set to `True`\nautomatically. In other contexts, you can set the argument explicitly\nto `True` when calling the layer.\n\n(This is in contrast to setting `trainable=False` for a `Dropout` layer.\n`trainable` does not affect the layer's behavior, as `Dropout` does\nnot have any variables/weights that can be frozen during training.)", + "attributes": [ + { + "description": "Float between 0 and 1. Fraction of the input units to drop.", + "name": "rate" + }, + { + "description": "1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.", + "name": "noise_shape" + }, + { + "description": "A Python integer to use as random seed.", + "name": "seed" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "references": [ + { + "description": "[Dropout: A Simple Way to Prevent Neural Networks from Overfitting]( http://www.jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)" + } + ] + }, + { + "name": "ELU", + "module": "keras.layers", + "category": "Activation", + "description": "Applies an Exponential Linear Unit function to an output.\n\nFormula:\n\n```\nf(x) = alpha * (exp(x) - 1.) for x < 0\nf(x) = x for x >= 0\n```", + "attributes": [ + { + "description": "float, slope of negative section. Defaults to `1.0`.", + "name": "alpha" + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as `name` and `dtype`." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as the input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)" + } + ] + }, + { + "name": "Embedding", + "module": "keras.layers", + "category": "Transform", + "description": "Turns positive integers (indexes) into dense vectors of fixed size.\n\ne.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`\n\nThis layer can only be used on positive integer inputs of a fixed range.", + "attributes": [ + { + "default": false, + "description": "Boolean, whether or not the input value 0 is a special\n \"padding\" value that should be masked out.\n This is useful when using recurrent layers which\n may take variable length input. If this is `True`,\n then all subsequent layers in the model need\n to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence,\n index 0 cannot be used in the vocabulary (input_dim should\n equal size of vocabulary + 1).", + "name": "mask_zero" + }, + { + "default": { + "class_name": "RandomUniform", + "config": { + "maxval": 0.05, + "minval": -0.05, + "seed": null + } + }, + "description": "Initializer for the `embeddings`\n matrix (see `keras.initializers`).", + "name": "embeddings_initializer", + "visible": false + }, + { + "description": "Integer. Size of the vocabulary,\n i.e. maximum integer index + 1.", + "name": "input_dim" + }, + { + "description": "Integer. Dimension of the dense embedding.", + "name": "output_dim" + }, + { + "description": "Regularizer function applied to\n the `embeddings` matrix (see `keras.regularizers`).", + "name": "embeddings_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `embeddings` matrix (see `keras.constraints`).", + "name": "embeddings_constraint" + }, + { + "description": "Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).", + "name": "input_length" + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer" + }, + { + "name": "sparse", + "description": "If True, calling this layer returns a `tf.SparseTensor`. If False,\n the layer returns a dense `tf.Tensor`. For an entry with no features in\n a sparse tensor (entry with value 0), the embedding vector of index 0 is\n returned by default." + }, + { + "name": "lora_rank", + "description": "Optional integer. If set, the layer's forward pass\n will implement LoRA (Low-Rank Adaptation)\n with the provided rank. LoRA sets the layer's embeddings\n matrix to non-trainable and replaces it with a delta over the\n original matrix, obtained via multiplying two lower-rank\n trainable matrices. This can be useful to reduce the\n computation cost of fine-tuning large embedding layers.\n You can also enable LoRA on an existing\n `Embedding` layer by calling `layer.enable_lora(rank)`." + } + ], + "inputs": [ + { + "description": "2D tensor with shape: `(batch_size, input_length)`.", + "name": "input" + }, + { + "name": "embeddings" + } + ], + "outputs": [ + { + "description": "3D tensor with shape: `(batch_size, input_length, output_dim)`.", + "name": "output" + } + ], + "references": [ + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)" + } + ], + "examples": [ + { + "code": ">>> model = keras.Sequential()\n>>> model.add(keras.layers.Embedding(1000, 64, input_length=10))\n>>> # The model will take as input an integer matrix of size (batch,\n>>> # input_length), and the largest integer (i.e. word index) in the input\n>>> # should be no larger than 999 (vocabulary size).\n>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch\n>>> # dimension.\n>>> input_array = np.random.randint(1000, size=(32, 10))\n>>> model.compile('rmsprop', 'mse')\n>>> output_array = model.predict(input_array)\n>>> print(output_array.shape)\n(32, 10, 64)" + } + ] + }, + { + "name": "Flatten", + "module": "keras.layers", + "category": "Shape", + "description": "Flattens the input. Does not affect the batch size.\n\nNote: If inputs are shaped `(batch,)` without a feature axis, then\nflattening adds an extra channel dimension and output shape is `(batch, 1)`.", + "attributes": [ + { + "default": "channels_last", + "description": "A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, ..., channels)` while `\"channels_first\"` corresponds to\n inputs with shape `(batch, channels, ...)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = keras.Input(shape=(10, 64))\n>>> y = keras.layers.Flatten()(x)\n>>> y.shape\n(None, 640)" + } + ] + }, + { + "name": "GlobalAveragePooling1D", + "module": "keras.layers", + "category": "Pool", + "description": "Global average pooling operation for temporal data.", + "attributes": [ + { + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "name": "keepdims", + "description": "A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is\n reduced for spatial dimensions. If `keepdims` is `True`, the\n temporal dimension are retained with length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`." + } + ], + "inputs": [ + { + "description": "- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `keepdims=False`:\n 2D tensor with shape `(batch_size, features)`.\n- If `keepdims=True`:\n - If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, 1, features)`\n - If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, 1)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(2, 3, 4)\n>>> y = keras.layers.GlobalAveragePooling1D()(x)\n>>> y.shape\n(2, 4)" + } + ] + }, + { + "name": "GlobalAveragePooling2D", + "module": "keras.layers", + "category": "Pool", + "description": "Global average pooling operation for 2D data.", + "attributes": [ + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, height, weight)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "name": "keepdims", + "description": "A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is\n reduced for spatial dimensions. If `keepdims` is `True`, the\n spatial dimension are retained with length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`." + } + ], + "inputs": [ + { + "description": "- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, height, width, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, height, width)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `keepdims=False`:\n 2D tensor with shape `(batch_size, channels)`.\n- If `keepdims=True`:\n - If `data_format=\"channels_last\"`:\n 4D tensor with shape `(batch_size, 1, 1, channels)`\n - If `data_format=\"channels_first\"`:\n 4D tensor with shape `(batch_size, channels, 1, 1)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(2, 4, 5, 3)\n>>> y = keras.layers.GlobalAveragePooling2D()(x)\n>>> y.shape\n(2, 3)" + } + ] + }, + { + "name": "GlobalMaxPooling1D", + "module": "keras.layers", + "category": "Pool", + "description": "Global max pooling operation for temporal data.", + "attributes": [ + { + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "name": "keepdims", + "description": "A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is\n reduced for spatial dimensions. If `keepdims` is `True`, the\n temporal dimension are retained with length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`." + } + ], + "inputs": [ + { + "description": "- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `keepdims=False`:\n 2D tensor with shape `(batch_size, features)`.\n- If `keepdims=True`:\n - If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, 1, features)`\n - If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, 1)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(2, 3, 4)\n>>> y = keras.layers.GlobalMaxPooling1D()(x)\n>>> y.shape\n(2, 4)" + } + ] + }, + { + "name": "GlobalMaxPooling2D", + "module": "keras.layers", + "category": "Pool", + "description": "Global max pooling operation for 2D data.", + "attributes": [ + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, height, weight)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "name": "keepdims", + "description": "A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is\n reduced for spatial dimensions. If `keepdims` is `True`, the\n spatial dimension are retained with length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`." + } + ], + "inputs": [ + { + "description": "- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, height, width, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, height, width)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `keepdims=False`:\n 2D tensor with shape `(batch_size, channels)`.\n- If `keepdims=True`:\n - If `data_format=\"channels_last\"`:\n 4D tensor with shape `(batch_size, 1, 1, channels)`\n - If `data_format=\"channels_first\"`:\n 4D tensor with shape `(batch_size, channels, 1, 1)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(2, 4, 5, 3)\n>>> y = keras.layers.GlobalMaxPooling2D()(x)\n>>> y.shape\n(2, 3)" + } + ] + }, + { + "name": "GRU", + "module": "keras.layers", + "category": "Layer", + "description": "Gated Recurrent Unit - Cho et al. 2014.\n\nBased on available runtime hardware and constraints, this layer\nwill choose different implementations (cuDNN-based or backend-native)\nto maximize the performance. If a GPU is available and all\nthe arguments to the layer meet the requirement of the cuDNN kernel\n(see below for details), the layer will use a fast cuDNN implementation\nwhen using the TensorFlow backend.\n\nThe requirements to use the cuDNN implementation are:\n\n1. `activation` == `tanh`\n2. `recurrent_activation` == `sigmoid`\n3. `dropout` == 0 and `recurrent_dropout` == 0\n4. `unroll` is `False`\n5. `use_bias` is `True`\n6. `reset_after` is `True`\n7. Inputs, if use masking, are strictly right-padded.\n8. Eager execution is enabled in the outermost context.\n\nThere are two variants of the GRU implementation. The default one is based\non [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to\nhidden state before matrix multiplication. The other one is based on\n[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.\n\nThe second variant is compatible with CuDNNGRU (GPU-only) and allows\ninference on CPU. Thus it has separate biases for `kernel` and\n`recurrent_kernel`. To use this variant, set `reset_after=True` and\n`recurrent_activation='sigmoid'`.\n\nFor example:\n\n```\n>>> inputs = np.random.random((32, 10, 8))\n>>> gru = keras.layers.GRU(4)\n>>> output = gru(inputs)\n>>> output.shape\n(32, 4)\n>>> gru = keras.layers.GRU(4, return_sequences=True, return_state=True)\n>>> whole_sequence_output, final_state = gru(inputs)\n>>> whole_sequence_output.shape\n(32, 10, 4)\n>>> final_state.shape\n(32, 4)\n```", + "attributes": [ + { + "default": "tanh", + "description": "Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "hard_sigmoid", + "description": "Activation function to use\n for the recurrent step.\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, (default `True`), whether the layer\n should use a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Orthogonal", + "config": { + "gain": 1, + "seed": null + } + }, + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent\n state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer", + "visible": false + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 1, + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications. Default: 2.", + "name": "implementation" + }, + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state in addition\n to the output. Default: `False`.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default `False`).\n If `True`, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default: `False`). If `True`, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default: `False`).\n If `True`, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint" + }, + { + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "description": "`None`.", + "name": "Default" + }, + { + "description": "GRU convention (whether to apply reset gate after or\n before matrix multiplication). `False` is `\"before\"`,\n `True` is `\"after\"` (default and cuDNN compatible).", + "name": "reset_after" + }, + { + "description": "The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.", + "name": "time_major" + }, + { + "name": "seed", + "description": "Random seed for dropout." + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "references": [ + { + "description": "[Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078)" + }, + { + "description": "[On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)" + }, + { + "description": "[Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](https://arxiv.org/abs/1412.3555v1)" + }, + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)" + } + ] + }, + { + "name": "GRUCell", + "module": "keras.layers", + "description": "Cell class for the GRU layer.\n\nThis class processes one step within the whole time sequence input, whereas\n`keras.layer.GRU` processes the whole sequence.", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, (default `True`), whether the layer\n should use a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation\n of the recurrent state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 (default) will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications. Default: 2.", + "name": "implementation" + }, + { + "description": "`None`.", + "name": "Default" + }, + { + "description": "GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\",\n True = \"after\" (default and cuDNN compatible).", + "name": "reset_after" + }, + { + "name": "seed", + "description": "Random seed for dropout." + } + ], + "examples": [ + { + "code": ">>> inputs = np.random.random((32, 10, 8))\n>>> rnn = keras.layers.RNN(keras.layers.GRUCell(4))\n>>> output = rnn(inputs)\n>>> output.shape\n(32, 4)\n>>> rnn = keras.layers.RNN(\n... keras.layers.GRUCell(4),\n... return_sequences=True,\n... return_state=True)\n>>> whole_sequence_output, final_state = rnn(inputs)\n>>> whole_sequence_output.shape\n(32, 10, 4)\n>>> final_state.shape\n(32, 4)" + } + ] + }, + { + "name": "HardSigmoid", + "category": "Activation" + }, + { + "name": "InputLayer", + "module": "keras.layers", + "category": "Data", + "description": "Layer to be used as an entry point into a Network (a graph of layers).\n\nIt can either wrap an existing tensor (pass an `input_tensor` argument)\nor create a placeholder tensor (pass arguments `input_shape`, and\noptionally, `dtype`).\n\nIt is generally recommend to use the Keras Functional model via `Input`,\n(which creates an `InputLayer`) without directly using `InputLayer`.\n\nWhen using `InputLayer` with the Keras Sequential model, it can be skipped\nby moving the `input_shape` parameter to the first layer after the\n`InputLayer`.\n\nThis class can create placeholders for `tf.Tensors`, `tf.SparseTensors`, and\n`tf.RaggedTensors` by choosing `sparse=True` or `ragged=True`. Note that\n`sparse` and `ragged` can't be configured to `True` at the same time.", + "attributes": [ + { + "description": "Shape tuple (not including the batch axis), or\n `TensorShape` instance (not including the batch axis).", + "name": "input_shape" + }, + { + "description": "Optional input batch size (integer or `None`).", + "name": "batch_size" + }, + { + "description": "Optional datatype of the input. When not provided, the Keras\n default `float` type will be used.", + "name": "dtype" + }, + { + "description": "Optional tensor to use as layer input. If set, the layer\n will use the `tf.TypeSpec` of this tensor rather\n than creating a new placeholder tensor.", + "name": "input_tensor" + }, + { + "description": "Boolean, whether the placeholder created is meant to be sparse.\n Defaults to `False`.", + "name": "sparse" + }, + { + "description": "Boolean, whether the placeholder created is meant to be ragged.\n In this case, values of `None` in the `shape` argument represent\n ragged dimensions. For more information about `tf.RaggedTensor`, see\n [this guide](https://www.tensorflow.org/guide/ragged_tensor).\n Defaults to `False`.", + "name": "ragged" + }, + { + "description": "Optional name of the layer (string).", + "name": "name" + }, + { + "description": "A `tf.TypeSpec` object to create Input from. This\n `tf.TypeSpec` represents the entire batch. When provided, all other\n args except name must be `None`.", + "name": "type_spec" + } + ], + "examples": [ + { + "code": "# With explicit InputLayer.\nmodel = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(4,)),\n tf.keras.layers.Dense(8)])\nmodel.compile(tf.keras.optimizers.RMSprop(0.001), loss='mse')\nmodel.fit(np.zeros((10, 4)),\n np.ones((10, 8)))\n\n# Without InputLayer and let the first layer to have the input_shape.\n# Keras will add a input for the model behind the scene.\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(8, input_shape=(4,))])\nmodel.compile(tf.keras.optimizers.RMSprop(0.001), loss='mse')\nmodel.fit(np.zeros((10, 4)),\n np.ones((10, 8)))" + } + ] + }, + { + "name": "InputSpec", + "module": "keras.layers", + "category": "Data", + "description": "Specifies the rank, dtype and shape of every input to a layer.\n\nLayers can expose (if appropriate) an `input_spec` attribute:\nan instance of `InputSpec`, or a nested structure of `InputSpec` instances\n(one per input tensor). These objects enable the layer to run input\ncompatibility checks for input structure, input rank, input shape, and\ninput dtype for the first argument of `Layer.__call__`.\n\nA `None` entry in a shape is compatible with any dimension.", + "attributes": [ + { + "description": "Expected DataType of the input.", + "name": "dtype" + }, + { + "description": "Shape tuple, expected shape of the input\n (may include None for unchecked axes). Includes the batch size.", + "name": "shape" + }, + { + "description": "Integer, expected rank of the input.", + "name": "ndim" + }, + { + "description": "Integer, maximum rank of the input.", + "name": "max_ndim" + }, + { + "description": "Integer, minimum rank of the input.", + "name": "min_ndim" + }, + { + "description": "Dictionary mapping integer axes to\n a specific dimension value.", + "name": "axes" + }, + { + "description": "If True, then allow inputs of rank N+1 as long\n as the last axis of the input is 1, as well as inputs of rank N-1\n as long as the last axis of the spec is 1.", + "name": "allow_last_axis_squeeze" + }, + { + "description": "Expected key corresponding to this input when passing data as\n a dictionary.", + "name": "name" + } + ], + "examples": [ + { + "code": "class MyLayer(Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n # The layer will accept inputs with\n # shape (*, 28, 28) & (*, 28, 28, 1)\n # and raise an appropriate error message otherwise.\n self.input_spec = InputSpec(\n shape=(None, 28, 28, 1),\n allow_last_axis_squeeze=True)" + } + ] + }, + { + "name": "Lambda", + "module": "keras.layers", + "description": "Wraps arbitrary expressions as a `Layer` object.\n\nThe `Lambda` layer exists so that arbitrary expressions can be used\nas a `Layer` when constructing Sequential\nand Functional API models. `Lambda` layers are best suited for simple\noperations or quick experimentation. For more advanced use cases,\nprefer writing new subclasses of `Layer`.\n\nWARNING: `Lambda` layers have (de)serialization limitations!\n\nThe main reason to subclass `Layer` instead of using a\n`Lambda` layer is saving and inspecting a model. `Lambda` layers\nare saved by serializing the Python bytecode, which is fundamentally\nnon-portable and potentially unsafe.\nThey should only be loaded in the same environment where\nthey were saved. Subclassed layers can be saved in a more portable way\nby overriding their `get_config()` method. Models that rely on\nsubclassed Layers are also often easier to visualize and reason about.", + "attributes": [ + { + "description": "The function to be evaluated. Takes input tensor as first\n argument.", + "name": "function" + }, + { + "description": "Expected output shape from function. This argument\n can usually be inferred if not explicitly provided.\n Can be a tuple or function. If a tuple, it only specifies\n the first dimension onward; sample dimension is assumed\n either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape` or,\n the input is `None` and the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`.\n If a function, it specifies the\n entire shape as a function of the input shape:\n `output_shape = f(input_shape)`.", + "name": "output_shape" + }, + { + "description": "Optional dictionary of keyword arguments to be passed to the\n function.", + "name": "arguments" + }, + { + "description": "Either None (indicating no masking) or a callable with the same\n signature as the `compute_mask` layer method, or a tensor\n that will be returned as output mask regardless\n of what the input is.", + "name": "mask" + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument input_shape (tuple of\nintegers, does not include the samples axis) when using this layer as the\nfirst layer in a model.", + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "description": "Specified by `output_shape` argument", + "name": "output" + } + ], + "examples": [ + { + "code": "# add a x -> x^2 layer\nmodel.add(Lambda(lambda x: x ** 2))" + } + ] + }, + { + "name": "LeakyReLU", + "module": "keras.layers", + "category": "Activation", + "description": "Leaky version of a Rectified Linear Unit activation layer.\n\nThis layer allows a small gradient when the unit is not active.\n\nFormula:\n\n``` python\nf(x) = alpha * x if x < 0\nf(x) = x if x >= 0\n```", + "attributes": [ + { + "description": "Float >= `0.`. Negative slope coefficient. Defaults to `0.3`.", + "name": "alpha" + }, + { + "name": "negative_slope", + "description": "Float >= 0.0. Negative slope coefficient.\n Defaults to `0.3`." + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as\n `name` and `dtype`." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as the input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Rectifier Nonlinearities Improve Neural Network Acoustic Models]( https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf)" + } + ], + "examples": [ + { + "code": "leaky_relu_layer = LeakyReLU(negative_slope=0.5)\ninput = np.array([-10, -5, 0.0, 5, 10])\nresult = leaky_relu_layer(input)\n# result = [-5. , -2.5, 0. , 5. , 10.]" + } + ] + }, + { + "name": "LocallyConnected1D", + "module": "keras.layers", + "category": "Layer", + "description": "Locally-connected layer for 1D inputs.\n\nThe `LocallyConnected1D` layer works similarly to\nthe `Conv1D` layer, except that weights are unshared,\nthat is, a different set of filters is applied at each different patch\nof the input.\n\nNote: layer attributes cannot be modified after the layer has been called\nonce (except the `trainable` attribute).", + "attributes": [ + { + "description": "Integer, the dimensionality of the output space (i.e. the\n number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of a single integer, specifying\n the length of the 1D convolution window.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of a single integer, specifying the\n stride length of the convolution.", + "name": "strides" + }, + { + "description": "Currently only supports `\"valid\"` (case-insensitive). `\"same\"`\n may be supported in the future. `\"valid\"` means no padding.", + "name": "padding" + }, + { + "description": "Activation function to use. If you don't specify anything,\n no activation is applied (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias" + }, + { + "description": "Initializer for the `kernel` weights matrix.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the output of the\n layer (its \"activation\")..", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector.", + "name": "bias_constraint" + }, + { + "default": "channels_last", + "description": "A string, one of `channels_last` (default) or\n `channels_first`. The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape `(batch, length,\n channels)` while `channels_first` corresponds to inputs with shape\n `(batch, channels, length)`. When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else 'channels_last'.\n Defaults to 'channels_last'.", + "name": "data_format" + }, + { + "description": "implementation mode, either `1`, `2`, or `3`. `1` loops\n over input spatial locations to perform the forward pass. It is\n memory-efficient but performs a lot of (small) ops. `2` stores layer\n weights in a dense but sparsely-populated 2D matrix and implements the\n forward pass as a single matrix-multiply. It uses a lot of RAM but\n performs few (large) ops. `3` stores layer weights in a sparse tensor\n and implements the forward pass as a single sparse matrix-multiply.\n How to choose:\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models, where \"large\" stands for large\n input/output activations (i.e. many `filters`, `input_filters`,\n large `input_size`, `output_size`), and \"sparse\" stands for few\n connections between inputs and outputs, i.e. small ratio\n `filters * input_filters * kernel_size / (input_size * strides)`,\n where inputs to and outputs of the layer are assumed to have\n shapes `(input_size, input_filters)`, `(output_size, filters)`\n respectively. It is recommended to benchmark each in the setting\n of interest to pick the most efficient one (in terms of speed and\n memory usage). Correct choice of implementation can lead to\n dramatic speed improvements (e.g. 50X), potentially at the expense\n of RAM. Also, only `padding=\"valid\"` is supported by\n `implementation=1`.", + "name": "implementation" + } + ], + "inputs": [ + { + "description": "3D tensor with shape: `(batch_size, steps, input_dim)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value\n might have changed due to padding or strides.", + "name": "output" + } + ], + "examples": [ + { + "code": " # apply a unshared weight convolution 1d of length 3 to a sequence with\n # 10 timesteps, with 64 output filters\n model = Sequential()\n model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))\n # now model.output_shape == (None, 8, 64)\n # add a new conv1d on top\n model.add(LocallyConnected1D(32, 3))\n # now model.output_shape == (None, 6, 32)" + } + ] + }, + { + "name": "LocallyConnected2D", + "module": "keras.layers", + "category": "Layer", + "description": "Locally-connected layer for 2D inputs.\n\nThe `LocallyConnected2D` layer works similarly\nto the `Conv2D` layer, except that weights are unshared,\nthat is, a different set of filters is applied at each\ndifferent patch of the input.\n\nNote: layer attributes cannot be modified after the layer has been called\nonce (except the `trainable` attribute).", + "attributes": [ + { + "description": "Integer, the dimensionality of the output space (i.e. the\n number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window. Can be a single integer\n to specify the same value for all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the strides\n of the convolution along the width and height. Can be a single integer\n to specify the same value for all spatial dimensions.", + "name": "strides" + }, + { + "description": "Currently only support `\"valid\"` (case-insensitive). `\"same\"`\n will be supported in future. `\"valid\"` means no padding.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string, one of `channels_last` (default) or\n `channels_first`. The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape `(batch, height,\n width, channels)` while `channels_first` corresponds to inputs with\n shape\n `(batch, channels, height, width)`. When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else 'channels_last'.\n Defaults to 'channels_last'.", + "name": "data_format" + }, + { + "description": "Activation function to use. If you don't specify anything,\n no activation is applied (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the output of the\n layer (its \"activation\").", + "name": "activity_regularizer" + }, + { + "description": "Constraint function applied to the kernel matrix.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector.", + "name": "bias_constraint" + }, + { + "description": "implementation mode, either `1`, `2`, or `3`. `1` loops\n over input spatial locations to perform the forward pass. It is\n memory-efficient but performs a lot of (small) ops. `2` stores layer\n weights in a dense but sparsely-populated 2D matrix and implements the\n forward pass as a single matrix-multiply. It uses a lot of RAM but\n performs few (large) ops. `3` stores layer weights in a sparse tensor\n and implements the forward pass as a single sparse matrix-multiply.\n How to choose:\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models, where \"large\" stands for large\n input/output activations (i.e. many `filters`, `input_filters`,\n large `np.prod(input_size)`, `np.prod(output_size)`), and \"sparse\"\n stands for few connections between inputs and outputs, i.e. small\n ratio `filters * input_filters * np.prod(kernel_size) /\n (np.prod(input_size) * np.prod(strides))`, where inputs to and\n outputs of the layer are assumed to have shapes `input_size +\n (input_filters,)`, `output_size + (filters,)` respectively. It is\n recommended to benchmark each in the setting of interest to pick\n the most efficient one (in terms of speed and memory usage).\n Correct choice of implementation can lead to dramatic speed\n improvements (e.g. 50X), potentially at the expense of RAM. Also,\n only `padding=\"valid\"` is supported by `implementation=1`.", + "name": "implementation" + } + ], + "inputs": [ + { + "description": "4D tensor with shape: `(samples, channels, rows, cols)` if\n data_format='channels_first'\nor 4D tensor with shape: `(samples, rows, cols, channels)` if\n data_format='channels_last'.", + "name": "input" + } + ], + "outputs": [ + { + "description": "4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n data_format='channels_first'\nor 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if\n data_format='channels_last'. `rows` and `cols` values might have\n changed due to padding.", + "name": "output" + } + ], + "examples": [ + { + "code": " # apply a 3x3 unshared weights convolution with 64 output filters on a\n 32x32 image\n # with `data_format=\"channels_last\"`:\n model = Sequential()\n model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))\n # now model.output_shape == (None, 30, 30, 64)\n # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64\n parameters\n\n # add a 3x3 unshared weights convolution on top, with 32 output filters:\n model.add(LocallyConnected2D(32, (3, 3)))\n # now model.output_shape == (None, 28, 28, 32)" + } + ] + }, + { + "name": "LSTM", + "module": "keras.layers", + "category": "Layer", + "description": "Long Short-Term Memory layer - Hochreiter 1997.\n\nBased on available runtime hardware and constraints, this layer\nwill choose different implementations (cuDNN-based or backend-native)\nto maximize the performance. If a GPU is available and all\nthe arguments to the layer meet the requirement of the cuDNN kernel\n(see below for details), the layer will use a fast cuDNN implementation\nwhen using the TensorFlow backend.\nThe requirements to use the cuDNN implementation are:\n\n1. `activation` == `tanh`\n2. `recurrent_activation` == `sigmoid`\n3. `dropout` == 0 and `recurrent_dropout` == 0\n4. `unroll` is `False`\n5. `use_bias` is `True`\n6. Inputs, if use masking, are strictly right-padded.\n7. Eager execution is enabled in the outermost context.\n\nFor example:\n\n```\n>>> inputs = np.random.random((32, 10, 8))\n>>> lstm = keras.layers.LSTM(4)\n>>> output = lstm(inputs)\n>>> output.shape\n(32, 4)\n>>> lstm = keras.layers.LSTM(\n... 4, return_sequences=True, return_state=True)\n>>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)\n>>> whole_seq_output.shape\n(32, 10, 4)\n>>> final_memory_state.shape\n(32, 4)\n>>> final_carry_state.shape\n(32, 4)\n```", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "default": "tanh", + "description": "Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "hard_sigmoid", + "description": "Activation function to use\n for the recurrent step.\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "description": "Boolean, (default `True`), whether the layer\n should use a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent\n state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer", + "visible": false + }, + { + "default": true, + "description": "Boolean (default `True`). If `True`,\n add 1 to the bias of the forget gate at initialization.\n Setting it to `True` will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](\n https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint", + "visible": false + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "default": 1, + "description": "Implementation mode, either 1 or 2. Mode 1 will structure\n its operations as a larger number of smaller dot products and additions,\n whereas mode 2 will batch them into fewer, larger operations. These modes\n will have different performance profiles on different hardware and for\n different applications. Default: 2.", + "name": "implementation" + }, + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state in addition\n to the output. Default: `False`.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default: `False`).\n If `True`, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default: `False`). If `True`, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default False).\n If `True`, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "description": "`None`.", + "name": "Default" + }, + { + "description": "The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.", + "name": "time_major" + }, + { + "name": "seed", + "description": "Random seed for dropout." + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "references": [ + { + "description": "[Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf)" + }, + { + "description": "[Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)" + }, + { + "description": "[Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)" + }, + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)" + } + ] + }, + { + "name": "LSTMCell", + "module": "keras.layers", + "description": "Cell class for the LSTM layer.\n\nThis class processes one step within the whole time sequence input, whereas\n`keras.layer.LSTM` processes the whole sequence.", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, (default `True`), whether the layer\n should use a bias vector.", + "name": "use_bias" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer" + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation\n of the recurrent state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer" + }, + { + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer" + }, + { + "description": "Boolean (default `True`). If `True`,\n add 1 to the bias of the forget gate at initialization.\n Setting it to `True` will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](\n https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer" + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer" + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer" + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of smaller dot\n products and additions, whereas mode 2 (default) will batch them into\n fewer, larger operations. These modes will have different performance\n profiles on different hardware and for different applications. Default: 2.", + "name": "implementation" + }, + { + "description": "`None`.", + "name": "Default" + }, + { + "name": "seed", + "description": "Random seed for dropout." + } + ], + "examples": [ + { + "code": ">>> inputs = np.random.random((32, 10, 8))\n>>> rnn = keras.layers.RNN(keras.layers.LSTMCell(4))\n>>> output = rnn(inputs)\n>>> output.shape\n(32, 4)\n>>> rnn = keras.layers.RNN(\n... keras.layers.LSTMCell(4),\n... return_sequences=True,\n... return_state=True)\n>>> whole_sequence_output, final_state = rnn(inputs)\n>>> whole_sequence_output.shape\n(32, 10, 4)\n>>> final_state.shape\n(32, 4)" + } + ] + }, + { + "name": "Masking", + "module": "keras.layers", + "description": "Masks a sequence by using a mask value to skip timesteps.\n\nFor each timestep in the input tensor (dimension #1 in the tensor),\nif all values in the input tensor at that timestep\nare equal to `mask_value`, then the timestep will be masked (skipped)\nin all downstream layers (as long as they support masking).\n\nIf any downstream layer does not support masking yet receives such\nan input mask, an exception will be raised.", + "attributes": [ + { + "description": "Either None or mask value to skip\n", + "name": "mask_value" + } + ], + "examples": [ + { + "summary": "Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,\nto be fed to an LSTM layer. You want to mask timestep #3 and #5 because you\nlack data for these timesteps. You can:\n- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:", + "code": "samples, timesteps, features = 32, 10, 8\ninputs = np.random.random([samples, timesteps, features]).astype(np.float32)\ninputs[:, 3, :] = 0.\ninputs[:, 5, :] = 0.\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Masking(mask_value=0.)\nmodel.add(keras.layers.LSTM(32))\noutput = model(inputs)\n# The time step 3 and 5 will be skipped from LSTM calculation." + } + ] + }, + { + "name": "Maximum", + "module": "keras.layers", + "category": "Tensor", + "description": "Computes element-wise maximum on a list of inputs.\n\nIt takes as input a list of tensors, all of the same shape,\nand returns a single tensor (also of the same shape).", + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.Maximum()([x1, x2])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> # equivalent to `y = keras.layers.maximum([x1, x2])`\n>>> y = keras.layers.Maximum()([x1, x2])\n>>> out = keras.layers.Dense(4)(y)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ] + }, + { + "name": "MaxPooling1D", + "module": "keras.layers", + "category": "Pool", + "description": "Max pooling operation for 1D temporal data.\n\nDownsamples the input representation by taking the maximum value over a\nspatial window of size `pool_size`. The window is shifted by `strides`.\n\nThe resulting output when using the `\"valid\"` padding option has a shape of:\n`output_shape = (input_shape - pool_size + 1) / strides)`.\n\nThe resulting output shape when using the `\"same\"` padding option is:\n`output_shape = input_shape / strides`", + "attributes": [ + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": "valid", + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "default": [ + 2, + 2 + ], + "description": "int, size of the max pooling window.", + "name": "pool_size" + }, + { + "default": [ + 2, + 2 + ], + "description": "int or None. Specifies how much the pooling window moves\n for each pooling step. If None, it will default to `pool_size`.", + "name": "strides" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, steps)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.", + "name": "output" + } + ], + "examples": [ + { + "summary": "`strides=1` and `padding=\"valid\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,\n... strides=1, padding=\"valid\")\n>>> max_pool_1d(x)" + }, + { + "summary": "`strides=2` and `padding=\"valid\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,\n... strides=2, padding=\"valid\")\n>>> max_pool_1d(x)" + }, + { + "summary": "`strides=1` and `padding=\"same\"`:", + "code": ">>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,\n... strides=1, padding=\"same\")\n>>> max_pool_1d(x)" + } + ] + }, + { + "name": "MaxPooling2D", + "module": "keras.layers", + "category": "Pool", + "description": "Max pooling operation for 2D spatial data.\n\nDownsamples the input along its spatial dimensions (height and width)\nby taking the maximum value over an input window\n(of size defined by `pool_size`) for each channel of the input.\nThe window is shifted by `strides` along each dimension.\n\nThe resulting output when using the `\"valid\"` padding option has a spatial\nshape (number of rows or columns) of:\n`output_shape = math.floor((input_shape - pool_size) / strides) + 1`\n(when `input_shape >= pool_size`)\n\nThe resulting output shape when using the `\"same\"` padding option is:\n`output_shape = math.floor((input_shape - 1) / strides) + 1`", + "attributes": [ + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": "valid", + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "default": [ + 2, + 2 + ], + "description": "int or tuple of 2 integers, factors by which to downscale\n (dim1, dim2). If only one integer is specified, the same\n window length will be used for all dimensions.", + "name": "pool_size" + }, + { + "default": [ + 2, + 2 + ], + "description": "int or tuple of 2 integers, or None. Strides values. If None,\n it will default to `pool_size`. If only one int is specified, the\n same stride size will be used for all dimensions.", + "name": "strides" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 4D tensor with shape `(batch_size, height, width, channels)`.\n- If `data_format=\"channels_first\"`:\n 4D tensor with shape `(batch_size, channels, height, width)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 4D tensor with shape\n `(batch_size, pooled_height, pooled_width, channels)`.\n- If `data_format=\"channels_first\"`:\n 4D tensor with shape\n `(batch_size, channels, pooled_height, pooled_width)`.", + "name": "output" + } + ], + "examples": [ + { + "summary": "`strides=(1, 1)` and `padding=\"valid\"`:", + "code": ">>> x = np.array([[1., 2., 3.],\n... [4., 5., 6.],\n... [7., 8., 9.]])\n>>> x = np.reshape(x, [1, 3, 3, 1])\n>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),\n... strides=(1, 1), padding=\"valid\")\n>>> max_pool_2d(x)" + }, + { + "summary": "`strides=(2, 2)` and `padding=\"valid\"`:", + "code": ">>> x = np.array([[1., 2., 3., 4.],\n... [5., 6., 7., 8.],\n... [9., 10., 11., 12.]])\n>>> x = np.reshape(x, [1, 3, 4, 1])\n>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),\n... strides=(2, 2), padding=\"valid\")\n>>> max_pool_2d(x)" + }, + { + "summary": "`stride=(1, 1)` and `padding=\"same\"`:", + "code": ">>> x = np.array([[1., 2., 3.],\n... [4., 5., 6.],\n... [7., 8., 9.]])\n>>> x = np.reshape(x, [1, 3, 3, 1])\n>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),\n... strides=(1, 1), padding=\"same\")\n>>> max_pool_2d(x)" + } + ] + }, + { + "name": "MaxPooling3D", + "module": "keras.layers", + "category": "Pool", + "description": "Max pooling operation for 3D data (spatial or spatio-temporal).\n\nDownsamples the input along its spatial dimensions (depth, height, and\nwidth) by taking the maximum value over an input window (of size defined by\n`pool_size`) for each channel of the input. The window is shifted by\n`strides` along each dimension.", + "attributes": [ + { + "description": "int or tuple of 3 integers, factors by which to downscale\n (dim1, dim2, dim3). If only one integer is specified, the same\n window length will be used for all dimensions.", + "name": "pool_size" + }, + { + "description": "int or tuple of 3 integers, or None. Strides values. If None,\n it will default to `pool_size`. If only one int is specified, the\n same stride size will be used for all dimensions.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while\n `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json`. If you never set it, then it\n will be `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n- If `data_format=\"channels_first\"`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`", + "name": "output" + } + ], + "examples": [ + { + "code": "depth = 30\nheight = 30\nwidth = 30\nchannels = 3\n\ninputs = keras.layers.Input(shape=(depth, height, width, channels))\nlayer = keras.layers.MaxPooling3D(pool_size=3)\noutputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)" + } + ] + }, + { + "name": "MultiHeadAttention", + "module": "keras.layers", + "description": "MultiHeadAttention layer.\n\nThis is an implementation of multi-headed attention as described in the\npaper \"Attention is all you Need\"\n[Vaswani et al., 2017](https://arxiv.org/abs/1706.03762).\nIf `query`, `key,` `value` are the same, then\nthis is self-attention. Each timestep in `query` attends to the\ncorresponding sequence in `key`, and returns a fixed-width vector.\n\nThis layer first projects `query`, `key` and `value`. These are\n(effectively) a list of tensors of length `num_attention_heads`, where the\ncorresponding shapes are `(batch_size, , key_dim)`,\n`(batch_size, , key_dim)`,\n`(batch_size, , value_dim)`.\n\nThen, the query and key tensors are dot-producted and scaled. These are\nsoftmaxed to obtain attention probabilities. The value tensors are then\ninterpolated by these probabilities, then concatenated back to a single\ntensor.\n\nFinally, the result tensor with the last dimension as `value_dim` can take\na linear projection and return.", + "attributes": [ + { + "description": "Number of attention heads.", + "name": "num_heads" + }, + { + "description": "Size of each attention head for query and key.", + "name": "key_dim" + }, + { + "description": "Size of each attention head for value.", + "name": "value_dim" + }, + { + "description": "Dropout probability.", + "name": "dropout" + }, + { + "description": "Boolean, whether the dense layers use bias vectors/matrices.", + "name": "use_bias" + }, + { + "description": "The expected shape of an output tensor, besides the batch\n and sequence dims. If not specified, projects back to the query\n feature dim (the query input's last dimension).", + "name": "output_shape" + }, + { + "description": "axes over which the attention is applied. `None` means\n attention over all axes, but batch, heads, and features.", + "name": "attention_axes" + }, + { + "description": "Initializer for dense layer kernels.", + "name": "kernel_initializer" + }, + { + "description": "Initializer for dense layer biases.", + "name": "bias_initializer" + }, + { + "description": "Regularizer for dense layer kernels.", + "name": "kernel_regularizer" + }, + { + "description": "Regularizer for dense layer biases.", + "name": "bias_regularizer" + }, + { + "description": "Regularizer for dense layer activity.", + "name": "activity_regularizer" + }, + { + "description": "Constraint for dense layer kernels.", + "name": "kernel_constraint" + }, + { + "description": "Constraint for dense layer kernels.", + "name": "bias_constraint" + } + ], + "examples": [ + { + "summary": "Performs 1D cross-attention over two sequence inputs with an attention mask.\nReturns the additional attention weights over heads.", + "code": ">>> layer = MultiHeadAttention(num_heads=2, key_dim=2)\n>>> target = tf.keras.Input(shape=[8, 16])\n>>> source = tf.keras.Input(shape=[4, 16])\n>>> output_tensor, weights = layer(target, source,\n... return_attention_scores=True)\n>>> print(output_tensor.shape)\n(None, 8, 16)\n>>> print(weights.shape)\n(None, 2, 8, 4)" + }, + { + "summary": "Performs 2D self-attention over a 5D input tensor on axes 2 and 3.", + "code": ">>> layer = MultiHeadAttention(\n... num_heads=2, key_dim=2, attention_axes=(2, 3))\n>>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])\n>>> output_tensor = layer(input_tensor, input_tensor)\n>>> print(output_tensor.shape)\n(None, 5, 3, 4, 16)" + } + ] + }, + { + "name": "Multiply", + "module": "keras.layers", + "description": "Performs elementwise multiplication.\n\nIt takes as input a list of tensors, all of the same shape,\nand returns a single tensor (also of the same shape).", + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.Multiply()([x1, x2])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> # equivalent to `y = keras.layers.multiply([x1, x2])`\n>>> y = keras.layers.Multiply()([x1, x2])\n>>> out = keras.layers.Dense(4)(y)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ] + }, + { + "name": "Permute", + "module": "keras.layers", + "category": "Shape", + "description": "Permutes the dimensions of the input according to a given pattern.\n\nUseful e.g. connecting RNNs and convnets.", + "attributes": [ + { + "description": "Tuple of integers. Permutation pattern does not include the\n batch dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.", + "name": "dims" + } + ], + "inputs": [ + { + "description": "Arbitrary.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same as the input shape, but with the dimensions re-ordered according\nto the specified pattern.", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = keras.Input(shape=(10, 64))\n>>> y = keras.layers.Permute((2, 1))(x)\n>>> y.shape\n(None, 64, 10)" + } + ] + }, + { + "name": "PReLU", + "module": "keras.layers", + "category": "Activation", + "description": "Parametric Rectified Linear Unit activation layer.\n\nFormula:\n``` python\nf(x) = alpha * x for x < 0\nf(x) = x for x >= 0\n```\nwhere `alpha` is a learned array with the same shape as x.", + "attributes": [ + { + "description": "Initializer function for the weights.", + "name": "alpha_initializer" + }, + { + "description": "Regularizer for the weights.", + "name": "alpha_regularizer", + "visible": false + }, + { + "description": "Constraint for the weights.", + "name": "alpha_constraint" + }, + { + "description": "The axes along which to share learnable parameters for the\n activation function. For example, if the incoming feature maps are\n from a 2D convolution with output shape\n `(batch, height, width, channels)`, and you wish to share parameters\n across space so that each filter only has one set of parameters,\n set `shared_axes=[1, 2]`.", + "name": "shared_axes" + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as `name` and `dtype`." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + }, + { + "name": "params" + } + ], + "outputs": [ + { + "description": "Same shape as the input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)" + } + ] + }, + { + "name": "ReLU", + "module": "keras.layers", + "category": "Activation", + "description": "Rectified Linear Unit activation function layer.\n\nFormula:\n``` python\nf(x) = max(x,0)\nf(x) = max_value if x >= max_value\nf(x) = x if threshold <= x < max_value\nf(x) = negative_slope * (x - threshold) otherwise\n```", + "attributes": [ + { + "description": "Float >= 0. Maximum activation value. None means unlimited.\n Defaults to `None`.", + "name": "max_value" + }, + { + "description": "Float >= 0. Negative slope coefficient.\n Defaults to `0.0`.", + "name": "negative_slope" + }, + { + "description": "Float >= 0. Threshold value for thresholded activation.\n Defaults to `0.0`.", + "name": "threshold" + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as `name` and `dtype`." + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as the input.", + "name": "output" + } + ], + "examples": [ + { + "code": "relu_layer = keras.layers.activations.ReLU(\n max_value=10,\n negative_slope=0.5,\n threshold=0,\n)\ninput = np.array([-10, -5, 0.0, 5, 10])\nresult = relu_layer(input)\n# result = [-5. , -2.5, 0. , 5. , 10.]" + } + ] + }, + { + "name": "RepeatVector", + "module": "keras.layers", + "category": "Shape", + "description": "Repeats the input n times.", + "attributes": [ + { + "description": "Integer, repetition factor.", + "name": "n" + } + ], + "inputs": [ + { + "description": "2D tensor with shape `(batch_size, features)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "3D tensor with shape `(batch_size, n, features)`.", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = keras.Input(shape=(32,))\n>>> y = keras.layers.RepeatVector(3)(x)\n>>> y.shape\n(None, 3, 32)" + } + ] + }, + { + "name": "Reshape", + "module": "keras.layers", + "category": "Shape", + "description": "Layer that reshapes inputs into the given shape.", + "attributes": [ + { + "description": "Target shape. Tuple of integers, does not include the\n samples dimension (batch size).", + "name": "target_shape" + } + ], + "inputs": [ + { + "description": "Arbitrary, although all dimensions in the input shape must be\nknown/fixed. Use the keyword argument `input_shape` (tuple of integers,\ndoes not include the samples/batch size axis) when using this layer as\nthe first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "`(batch_size, *target_shape)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = keras.Input(shape=(12,))\n>>> y = keras.layers.Reshape((3, 4))(x)\n>>> y.shape\n(None, 3, 4)" + }, + { + "code": ">>> # also supports shape inference using `-1` as dimension\n>>> y = keras.layers.Reshape((-1, 2, 2))(x)\n>>> y.shape\n(None, 3, 2, 2)" + } + ] + }, + { + "name": "RNN", + "module": "keras.layers", + "category": "Layer", + "description": "Base class for recurrent layers.", + "attributes": [ + { + "default": false, + "description": "Boolean (default `False`). Whether to return the last\n output in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean (default `False`).\n Whether to return the last state in addition to the output.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default `False`).\n If `True`, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default `False`). If True, the last state\n for each sample at index `i` in a batch will be used as initial\n state for the sample of index `i` in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default `False`).\n If True, the network will be unrolled, else a symbolic loop will be\n used. Unrolling can speed-up a RNN, although it tends to be more\n memory-intensive. Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "description": "A RNN cell instance or a list of RNN cell instances.\n A RNN cell is a class that has:\n - A `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - A `state_size` attribute. This can be a single integer\n (single state) in which case it is the size of the recurrent\n state. This can also be a list/tuple of integers\n (one size per state).\n - A `output_size` attribute, a single integer.\n - A `get_initial_state(batch_size=None)`\n method that creates a tensor meant to be fed to `call()` as the\n initial state, if the user didn't specify any initial state\n via other means. The returned initial state should have\n shape `(batch_size, cell.state_size)`.\n The cell might choose to create a tensor full of zeros,\n or other values based on the cell's implementation.\n `inputs` is the input tensor to the RNN layer, with shape\n `(batch_size, timesteps, features)`.\n If this method is not implemented\n by the cell, the RNN layer will create a zero filled tensor\n with shape `(batch_size, cell.state_size)`.\n In the case that `cell` is a list of RNN cell instances, the cells\n will be stacked on top of each other in the RNN, resulting in an\n efficient stacked RNN.", + "name": "cell" + }, + { + "description": "dimensionality of the input (integer).\n This argument (or alternatively,\n the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.", + "name": "input_dim" + }, + { + "description": "Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n", + "name": "input_length" + }, + { + "description": "The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.", + "name": "time_major" + }, + { + "description": "Boolean (default `False`).\n Whether the output should use zeros for the masked timesteps.\n Note that this field is only used when `return_sequences`\n is `True` and `mask` is provided.\n It can useful if you want to reuse the raw output sequence of\n the RNN without interference from the masked timesteps, e.g.,\n merging bidirectional RNNs.", + "name": "zero_output_for_mask" + } + ], + "inputs": [ + { + "description": "3-D tensor with shape `(batch_size, timesteps, features)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `return_state`: a list of tensors. The first tensor is\nthe output. The remaining tensors are the last states,\neach with shape `(batch_size, state_size)`, where `state_size` could\nbe a high dimension tensor shape.\n- If `return_sequences`: 3D tensor with shape\n`(batch_size, timesteps, output_size)`.\n\nMasking:\n\nThis layer supports masking for input data with a variable number\nof timesteps. To introduce masks to your data,\nuse a `keras.layers.Embedding` layer with the `mask_zero` parameter\nset to `True`.\n\nNote on using statefulness in RNNs:\n\nYou can set RNN layers to be 'stateful', which means that the states\ncomputed for the samples in one batch will be reused as initial states\nfor the samples in the next batch. This assumes a one-to-one mapping\nbetween samples in different successive batches.\n\nTo enable statefulness:\n\n- Specify `stateful=True` in the layer constructor.\n- Specify a fixed batch size for your model, by passing\nIf sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\nElse for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\nThis is the expected shape of your inputs\n*including the batch size*.\nIt should be a tuple of integers, e.g. `(32, 10, 100)`.\n- Specify `shuffle=False` when calling `fit()`.\n\nTo reset the states of your model, call `.reset_states()` on either\na specific layer, or on your entire model.\n\nNote on specifying the initial state of RNNs:\n\nYou can specify the initial state of RNN layers symbolically by\ncalling them with the keyword argument `initial_state`. The value of\n`initial_state` should be a tensor or list of tensors representing\nthe initial state of the RNN layer.\n\nYou can specify the initial state of RNN layers numerically by\ncalling `reset_states` with the keyword argument `states`. The value of\n`states` should be a numpy array or list of numpy arrays representing\nthe initial state of the RNN layer.", + "name": "output" + } + ], + "examples": [ + { + "code": "from keras.layers import RNN\nfrom keras import ops\n\n# First, let's define a RNN Cell, as a layer subclass.\nclass MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n super().__init__(**kwargs)\n self.units = units\n self.state_size = units\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = ops.matmul(inputs, self.kernel)\n output = h + ops.matmul(prev_output, self.recurrent_kernel)\n return output, [output]\n\n# Let's use this cell in a RNN layer:\n\ncell = MinimalRNNCell(32)\nx = keras.Input((None, 5))\nlayer = RNN(cell)\ny = layer(x)\n\n# Here's how to use the cell to build a stacked RNN:\n\ncells = [MinimalRNNCell(32), MinimalRNNCell(64)]\nx = keras.Input((None, 5))\nlayer = RNN(cells)\ny = layer(x)" + } + ] + }, + { + "name": "SeparableConv1D", + "module": "keras.layers", + "category": "Layer", + "description": "1D separable convolution layer.\n\nThis layer performs a depthwise convolution that acts separately on\nchannels, followed by a pointwise convolution that mixes channels.\nIf `use_bias` is True and a bias initializer is provided,\nit adds a bias vector to the output. It then optionally applies an\nactivation function to produce the final output.", + "attributes": [ + { + "description": "int, the dimensionality of the output space (i.e. the number\n of filters in the pointwise convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 1 integers, specifying the size of the\n depthwise convolution window.", + "name": "kernel_size" + }, + { + "description": "int or tuple/list of 1 integers, specifying the stride length\n of the depthwise convolution. If only one int is specified, the same\n stride size will be used for all dimensions. `strides > 1` is\n incompatible with `dilation_rate > 1`.", + "name": "strides" + }, + { + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated convolution. If only one int is specified,\n the same dilation rate will be used for all dimensions.", + "name": "dilation_rate" + }, + { + "description": "The number of depthwise convolution output channels\n for each input channel. The total number of depthwise convolution\n output channels will be equal to `input_channel * depth_multiplier`.", + "name": "depth_multiplier" + }, + { + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias" + }, + { + "description": "An initializer for the depthwise convolution\n kernel. If None, then the default initializer (`\"glorot_uniform\"`)\n will be used.", + "name": "depthwise_initializer" + }, + { + "description": "An initializer for the pointwise convolution\n kernel. If None, then the default initializer (`\"glorot_uniform\"`)\n will be used.", + "name": "pointwise_initializer" + }, + { + "description": "An initializer for the bias vector. If None, the\n default initializer ('\"zeros\"') will be used.", + "name": "bias_initializer" + }, + { + "description": "Optional regularizer for the depthwise\n convolution kernel.", + "name": "depthwise_regularizer" + }, + { + "description": "Optional regularizer for the pointwise\n convolution kernel.", + "name": "pointwise_regularizer" + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer" + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer" + }, + { + "description": "Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used\n for norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape).", + "name": "depthwise_constraint" + }, + { + "description": "Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer`.", + "name": "pointwise_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + }, + { + "description": "Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).", + "name": "trainable" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, steps, channels)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, channels, steps)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, new_steps, filters)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, filters, new_steps)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 12)\n>>> y = keras.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x)\n>>> print(y.shape)\n(4, 4, 4)" + } + ] + }, + { + "name": "SeparableConv2D", + "module": "keras.layers", + "category": "Layer", + "description": "2D separable convolution layer.\n\nThis layer performs a depthwise convolution that acts separately on\nchannels, followed by a pointwise convolution that mixes channels.\nIf `use_bias` is True and a bias initializer is provided,\nit adds a bias vector to the output. It then optionally applies an\nactivation function to produce the final output.", + "attributes": [ + { + "default": "linear", + "description": "Activation function. If `None`, no activation is applied.", + "name": "activation" + }, + { + "default": "valid", + "description": "string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.", + "name": "padding" + }, + { + "default": true, + "description": "bool, if `True`, bias will be added to the output.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file\n at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integers, specifying the stride length\n of the depthwise convolution. If only one int is specified, the same\n stride size will be used for all dimensions. `strides > 1` is\n incompatible with `dilation_rate > 1`.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "int or tuple/list of 2 integers, specifying the dilation\n rate to use for dilated convolution. If only one int is specified,\n the same dilation rate will be used for all dimensions.", + "name": "dilation_rate" + }, + { + "default": 1, + "description": "The number of depthwise convolution output channels\n for each input channel. The total number of depthwise convolution\n output channels will be equal to `input_channel * depth_multiplier`.", + "name": "depth_multiplier" + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "An initializer for the pointwise convolution\n kernel. If None, then the default initializer (`\"glorot_uniform\"`)\n will be used.", + "name": "pointwise_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "An initializer for the depthwise convolution\n kernel. If None, then the default initializer (`\"glorot_uniform\"`)\n will be used.", + "name": "depthwise_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "An initializer for the bias vector. If None, the\n default initializer ('\"zeros\"') will be used.", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "name": "kernel_initializer", + "visible": false + }, + { + "description": "int, the dimensionality of the output space (i.e. the number\n of filters in the pointwise convolution).", + "name": "filters" + }, + { + "description": "int or tuple/list of 2 integers, specifying the size of the\n depthwise convolution window.", + "name": "kernel_size" + }, + { + "description": "Optional regularizer for the depthwise\n convolution kernel.", + "name": "depthwise_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the pointwise\n convolution kernel.", + "name": "pointwise_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the bias vector.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Optional regularizer function for the output.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used\n for norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape).", + "name": "depthwise_constraint", + "visible": false + }, + { + "description": "Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer`.", + "name": "pointwise_constraint" + }, + { + "description": "Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.", + "name": "bias_constraint" + } + ], + "inputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, height, width, channels)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, channels, height, width)`", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "- If `data_format=\"channels_last\"`:\n A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`\n- If `data_format=\"channels_first\"`:\n A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> x = np.random.rand(4, 10, 10, 12)\n>>> y = keras.layers.SeparableConv2D(3, 4, 3, 2, activation='relu')(x)\n>>> print(y.shape)\n(4, 4, 4, 4)" + } + ] + }, + { + "name": "Sigmoid", + "category": "Activation" + }, + { + "name": "SimpleRNN", + "module": "keras.layers", + "category": "Layer", + "description": "Fully-connected RNN where the output is to be fed back as the new input.", + "attributes": [ + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state\n in addition to the output. Default: `False`.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default: `False`).\n If `True`, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default: `False`). If `True`, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default: `False`).\n If `True`, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "default": "tanh", + "description": "Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, (default `True`), whether the layer uses\n a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Orthogonal", + "config": { + "gain": 1, + "seed": null + } + }, + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent\n state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer", + "visible": false + }, + { + "default": 0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for the linear transformation\n of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the\n recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint" + }, + { + "description": "`None`.", + "name": "Default" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "examples": [ + { + "code": "inputs = np.random.random((32, 10, 8))\nsimple_rnn = keras.layers.SimpleRNN(4)\noutput = simple_rnn(inputs) # The output has shape `(32, 4)`.\nsimple_rnn = keras.layers.SimpleRNN(\n 4, return_sequences=True, return_state=True\n)\n# whole_sequence_output has shape `(32, 10, 4)`.\n# final_state has shape `(32, 4)`.\nwhole_sequence_output, final_state = simple_rnn(inputs)" + } + ] + }, + { + "name": "SimpleRNNCell", + "module": "keras.layers", + "description": "Cell class for SimpleRNN.\n\nThis class processes one step within the whole time sequence input, whereas\n`keras.layer.SimpleRNN` processes the whole sequence.", + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, (default `True`), whether the layer\n should use a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `\"glorot_uniform\"`.", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation\n of the recurrent state. Default: `\"orthogonal\"`.", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector. Default: `\"zeros\"`.", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector.\n Default: `None`.", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the `kernel` weights\n matrix. Default: `None`.", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector.\n Default: `None`.", + "name": "bias_constraint" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.", + "name": "dropout" + }, + { + "default": 0, + "description": "Float between 0 and 1. Fraction of the units to drop\n for the linear transformation of the recurrent state. Default: 0.", + "name": "recurrent_dropout" + }, + { + "description": "`None`.", + "name": "Default" + }, + { + "name": "seed", + "description": "Random seed for dropout." + } + ], + "examples": [ + { + "code": "inputs = np.random.random([32, 10, 8]).astype(np.float32)\nrnn = keras.layers.RNN(keras.layers.SimpleRNNCell(4))\noutput = rnn(inputs) # The output has shape `(32, 4)`.\nrnn = keras.layers.RNN(\n keras.layers.SimpleRNNCell(4),\n return_sequences=True,\n return_state=True\n)\n# whole_sequence_output has shape `(32, 10, 4)`.\n# final_state has shape `(32, 4)`.\nwhole_sequence_output, final_state = rnn(inputs)" + } + ] + }, + { + "name": "Softmax", + "module": "keras.layers", + "category": "Activation", + "description": "Softmax activation layer.\n\nFormula:\n``` python\nexp_x = exp(x - max(x))\nf(x) = exp_x / sum(exp_x)\n```", + "inputs": [ + { + "name": "input", + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model." + } + ], + "outputs": [ + { + "name": "output", + "description": "Same shape as the input." + } + ], + "attributes": [ + { + "name": "axis", + "description": "Integer, or list of Integers, axis along which the softmax\n normalization is applied." + }, + { + "name": "**kwargs", + "description": "Base layer keyword arguments, such as `name` and `dtype`." + } + ], + "examples": [ + { + "code": ">>>softmax_layer = keras.layers.activations.Softmax()\n>>>input = np.array([1.0, 2.0, 1.0])\n>>>result = softmax_layer(input)\n[0.21194157, 0.5761169, 0.21194157]" + } + ] + }, + { + "name": "SoftPlus", + "category": "Activation" + }, + { + "name": "SoftSign", + "category": "Activation" + }, + { + "name": "SpatialDropout1D", + "module": "keras.layers", + "category": "Dropout", + "description": "Spatial 1D version of Dropout.\n\nThis layer performs the same function as Dropout, however, it drops\nentire 1D feature maps instead of individual elements. If adjacent frames\nwithin feature maps are strongly correlated (as is normally the case in\nearly convolution layers) then regular dropout will not regularize the\nactivations and will otherwise just result in an effective learning rate\ndecrease. In this case, `SpatialDropout1D` will help promote independence\nbetween feature maps and should be used instead.", + "attributes": [ + { + "description": "Float between 0 and 1. Fraction of the input units to drop.", + "name": "rate" + } + ], + "inputs": [ + { + "description": "3D tensor with shape: `(samples, timesteps, channels)`\n\nOutput shape: Same as input.\n\nReference:\n\n- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same as input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" + } + ] + }, + { + "name": "SpatialDropout2D", + "module": "keras.layers", + "category": "Dropout", + "description": "Spatial 2D version of Dropout.\n\nThis version performs the same function as Dropout, however, it drops\nentire 2D feature maps instead of individual elements. If adjacent pixels\nwithin feature maps are strongly correlated (as is normally the case in\nearly convolution layers) then regular dropout will not regularize the\nactivations and will otherwise just result in an effective learning rate\ndecrease. In this case, `SpatialDropout2D` will help promote independence\nbetween feature maps and should be used instead.", + "attributes": [ + { + "description": "Float between 0 and 1. Fraction of the input units to drop.", + "name": "rate" + }, + { + "description": "`\"channels_first\"` or `\"channels_last\"`.\n In `\"channels_first\"` mode, the channels dimension (the depth)\n is at index 1, in `\"channels_last\"` mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "4D tensor with shape: `(samples, channels, rows, cols)` if\n data_format='channels_first'\n or 4D tensor with shape: `(samples, rows, cols, channels)` if\n data_format='channels_last'.\n\nOutput shape: Same as input.\n\nReference:\n\n- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same as input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" + } + ] + }, + { + "name": "SpatialDropout3D", + "module": "keras.layers", + "category": "Dropout", + "description": "Spatial 3D version of Dropout.\n\nThis version performs the same function as Dropout, however, it drops\nentire 3D feature maps instead of individual elements. If adjacent voxels\nwithin feature maps are strongly correlated (as is normally the case in\nearly convolution layers) then regular dropout will not regularize the\nactivations and will otherwise just result in an effective learning rate\ndecrease. In this case, SpatialDropout3D will help promote independence\nbetween feature maps and should be used instead.", + "attributes": [ + { + "description": "Float between 0 and 1. Fraction of the input units to drop.", + "name": "rate" + }, + { + "description": "`\"channels_first\"` or `\"channels_last\"`.\n In `\"channels_first\"` mode, the channels dimension (the depth)\n is at index 1, in `\"channels_last\"` mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if\n data_format='channels_first'\n or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if\n data_format='channels_last'.\n\nOutput shape: Same as input.\n\nReference:\n\n- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same as input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" + } + ] + }, + { + "name": "StackedRNNCells", + "module": "keras.layers", + "description": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\nUsed to implement efficient stacked RNNs.", + "attributes": [ + { + "description": "List of RNN cell instances.", + "name": "cells" + } + ], + "examples": [ + { + "code": "batch_size = 3\nsentence_length = 5\nnum_features = 2\nnew_shape = (batch_size, sentence_length, num_features)\nx = np.reshape(np.arange(30), new_shape)\n\nrnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]\nstacked_lstm = keras.layers.StackedRNNCells(rnn_cells)\nlstm_layer = keras.layers.RNN(stacked_lstm)\n\nresult = lstm_layer(x)" + } + ] + }, + { + "name": "Subtract", + "module": "keras.layers", + "description": "Performs elementwise subtraction.\n\nIt takes as input a list of tensors of size 2 both of the\nsame shape, and returns a single tensor (inputs[0] - inputs[1])\nof same shape.", + "inputs": [ + { + "name": "x" + }, + { + "name": "y" + } + ], + "outputs": [ + { + "name": "z" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.Subtract()([x1, x2])" + }, + { + "summary": "Usage in a Keras model:", + "code": ">>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])`\n>>> subtracted = keras.layers.Subtract()([x1, x2])\n>>> out = keras.layers.Dense(4)(subtracted)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ] + }, + { + "name": "TanH", + "category": "Activation" + }, + { + "name": "ThresholdedReLU", + "module": "keras.layers", + "category": "Activation", + "description": "Thresholded Rectified Linear Unit.\n\nIt follows:\n\n```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n```", + "attributes": [ + { + "description": "Float >= 0. Threshold location of activation.", + "name": "theta" + } + ], + "inputs": [ + { + "description": "Arbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Same shape as the input.", + "name": "output" + } + ], + "references": [ + { + "description": "[Zero-Bias Autoencoders and the Benefits of Co-Adapting Features]( https://arxiv.org/abs/1402.3337)" + } + ] + }, + { + "name": "TimeDistributed", + "module": "keras.layers", + "category": "Wrapper", + "description": "This wrapper allows to apply a layer to every temporal slice of an input.\n\nEvery input should be at least 3D, and the dimension of index one of the\nfirst input will be considered to be the temporal dimension.\n\nConsider a batch of 32 video samples, where each sample is a 128x128 RGB\nimage with `channels_last` data format, across 10 timesteps.\nThe batch input shape is `(32, 10, 128, 128, 3)`.\n\nYou can then use `TimeDistributed` to apply the same `Conv2D` layer to each\nof the 10 timesteps, independently:\n\n```\n>>> inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32)\n>>> conv_2d_layer = layers.Conv2D(64, (3, 3))\n>>> outputs = layers.TimeDistributed(conv_2d_layer)(inputs)\n>>> outputs.shape\n(32, 10, 126, 126, 64)\n```\n\nBecause `TimeDistributed` applies the same instance of `Conv2D` to each of\nthe timestamps, the same set of weights are used at each timestamp.", + "attributes": [ + { + "description": "a `keras.layers.Layer` instance.", + "name": "layer" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "UpSampling1D", + "module": "keras.layers", + "category": "Layer", + "description": "Upsampling layer for 1D inputs.\n\nRepeats each temporal step `size` times along the time axis.", + "attributes": [ + { + "default": "channels_last", + "name": "data_format" + }, + { + "description": "Integer. Upsampling factor.", + "name": "size" + } + ], + "inputs": [ + { + "description": "3D tensor with shape: `(batch_size, steps, features)`.", + "name": "input" + } + ], + "outputs": [ + { + "description": "3D tensor with shape: `(batch_size, upsampled_steps, features)`.", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 2, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> x\n[[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n>>> y = keras.layers.UpSampling1D(size=2)(x)\n>>> y\n[[[ 0. 1. 2.]\n [ 0. 1. 2.]\n [ 3. 4. 5.]\n [ 3. 4. 5.]]" + } + ] + }, + { + "name": "UpSampling2D", + "module": "keras.layers", + "category": "Layer", + "description": "Upsampling layer for 2D inputs.\n\nThe implementation uses interpolative resizing, given the resize method\n(specified by the `interpolation` argument). Use `interpolation=nearest`\nto repeat the rows and columns of the data.", + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else `\"channels_last\"`.\n Defaults to `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.", + "name": "size" + }, + { + "description": "A string, one of `\"bicubic\"`, `\"bilinear\"`, `\"lanczos3\"`,\n `\"lanczos5\"`, `\"nearest\"`.", + "name": "interpolation" + } + ], + "inputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 2, 1, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> print(x)\n[[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n>>> y = keras.layers.UpSampling2D(size=(1, 2))(x)\n>>> print(y)\n[[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]]" + } + ] + }, + { + "name": "UpSampling3D", + "module": "keras.layers", + "category": "Layer", + "description": "Upsampling layer for 3D inputs.\n\nRepeats the 1st, 2nd and 3rd dimensions\nof the data by `size[0]`, `size[1]` and `size[2]` respectively.", + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else `\"channels_last\"`.\n Defaults to `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "Int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.", + "name": "size" + } + ], + "inputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, dim1, dim2, dim3, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, dim1, dim2, dim3)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,\n channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_dim1, upsampled_dim2,\n upsampled_dim3)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 1, 2, 1, 3)\n>>> x = np.ones(input_shape)\n>>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x)\n>>> y.shape\n(2, 2, 4, 2, 3)" + } + ] + }, + { + "name": "ZeroPadding1D", + "module": "keras.layers", + "category": "Tensor", + "description": "Zero-padding layer for 1D input (e.g. temporal sequence).", + "attributes": [ + { + "description": "Int, or tuple of int (length 2), or dictionary.\n - If int: how many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n - If tuple of 2 ints: how many zeros to add at the beginning and the\n end of the padding dimension (`(left_pad, right_pad)`).", + "name": "padding" + } + ], + "inputs": [ + { + "description": "3D tensor with shape `(batch_size, axis_to_pad, features)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "3D tensor with shape `(batch_size, padded_axis, features)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (2, 2, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> x\n[[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n>>> y = keras.layers.ZeroPadding1D(padding=2)(x)\n>>> y\n[[[ 0 0 0]\n [ 0 0 0]\n [ 0 1 2]\n [ 3 4 5]\n [ 0 0 0]\n [ 0 0 0]]\n [[ 0 0 0]\n [ 0 0 0]\n [ 6 7 8]\n [ 9 10 11]\n [ 0 0 0]\n [ 0 0 0]]]" + } + ] + }, + { + "name": "ZeroPadding2D", + "module": "keras.layers", + "category": "Tensor", + "description": "Zero-padding layer for 2D input (e.g. picture).\n\nThis layer can add rows and columns of zeros at the top, bottom, left and\nright side of an image tensor.", + "attributes": [ + { + "description": "Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric padding is applied to height and width.\n - If tuple of 2 ints: interpreted as two different symmetric padding\n values for height and width:\n `(symmetric_height_pad, symmetric_width_pad)`.\n - If tuple of 2 tuples of 2 ints: interpreted as\n `((top_pad, bottom_pad), (left_pad, right_pad))`.", + "name": "padding" + }, + { + "description": "A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, height, width, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, height, width)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, padded_height, padded_width, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, padded_height, padded_width)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (1, 1, 2, 2)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> x\n[[[[0 1]\n [2 3]]]]\n>>> y = keras.layers.ZeroPadding2D(padding=1)(x)\n>>> y\n[[[[0 0]\n [0 0]\n [0 0]\n [0 0]]\n [[0 0]\n [0 1]\n [2 3]\n [0 0]]\n [[0 0]\n [0 0]\n [0 0]\n [0 0]]]]" + } + ] + }, + { + "name": "ZeroPadding3D", + "module": "keras.layers", + "category": "Tensor", + "description": "Zero-padding layer for 3D data (spatial or spatio-temporal).", + "attributes": [ + { + "description": "Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding is applied to depth, height,\n and width.\n - If tuple of 3 ints: interpreted as three different symmetric\n padding values for depth, height, and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`.", + "name": "padding" + }, + { + "description": "A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.", + "name": "data_format" + } + ], + "inputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad, depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad)`", + "name": "input" + } + ], + "outputs": [ + { + "description": "5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch_size, first_padded_axis, second_padded_axis,\n third_axis_to_pad, depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_padded_axis, second_padded_axis,\n third_axis_to_pad)`", + "name": "output" + } + ], + "examples": [ + { + "code": ">>> input_shape = (1, 1, 2, 2, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> y = keras.layers.ZeroPadding3D(padding=2)(x)\n>>> y.shape\n(1, 5, 6, 6, 3)" + } + ] + }, + { + "name": "Attention", + "module": "keras.layers", + "inputs": [ + { + "name": "query", + "type": "Tensor[]" + }, + { + "name": "value", + "type": "Tensor[]" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "description": "Dot-product attention layer, a.k.a. Luong-style attention.\n\nInputs are a list with 2 or 3 elements:\n1. A `query` tensor of shape `(batch_size, Tq, dim)`.\n2. A `value` tensor of shape `(batch_size, Tv, dim)`.\n3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none\n supplied, `value` will be used as a `key`.\n\nThe calculation follows the steps:\n1. Calculate attention scores using `query` and `key` with shape\n `(batch_size, Tq, Tv)`.\n2. Use scores to calculate a softmax distribution with shape\n `(batch_size, Tq, Tv)`.\n3. Use the softmax distribution to create a linear combination of `value`\n with shape `(batch_size, Tq, dim)`.", + "attributes": [ + { + "name": "use_scale", + "description": "If `True`, will create a scalar variable to scale the\n attention scores." + }, + { + "name": "causal", + "description": "Boolean. Set to `True` for decoder self-attention. Adds a mask\n such that position `i` cannot attend to positions `j > i`. This prevents\n the flow of information from the future towards the past. Defaults to\n `False`." + }, + { + "name": "dropout", + "description": "Float between 0 and 1. Fraction of the units to drop for the\n attention scores. Defaults to `0.0`." + }, + { + "name": "inputs", + "description": "List of the following tensors:\n - `query`: Query tensor of shape `(batch_size, Tq, dim)`.\n - `value`: Value tensor of shape `(batch_size, Tv, dim)`.\n - `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If\n not given, will use `value` for both `key` and `value`, which is\n the most common case." + }, + { + "name": "mask", + "description": "List of the following tensors:\n - `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.\n If given, the output will be zero at the positions where\n `mask==False`.\n - `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.\n If given, will apply the mask such that values at positions\n where `mask==False` do not contribute to the result." + }, + { + "name": "return_attention_scores", + "description": "bool, it `True`, returns the attention scores\n (after masking and softmax) as an additional output argument." + }, + { + "name": "training", + "description": "Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout)." + }, + { + "name": "score_mode", + "description": "Function to use to compute attention scores, one of\n `{\"dot\", \"concat\"}`. `\"dot\"` refers to the dot product between the\n query and key vectors. `\"concat\"` refers to the hyperbolic tangent\n of the concatenation of the `query` and `key` vectors.\n\nCall Args:" + }, + { + "name": "use_causal_mask", + "description": "Boolean. Set to `True` for decoder self-attention. Adds\n a mask such that position `i` cannot attend to positions `j > i`.\n This prevents the flow of information from the future towards the\n past. Defaults to `False`.\n\nOutput:\n Attention outputs of shape `(batch_size, Tq, dim)`.\n (Optional) Attention scores after masking and softmax with shape\n `(batch_size, Tq, Tv)`." + }, + { + "name": "seed", + "description": "A Python integer to use as random seed incase of `dropout`." + } + ] + }, + { + "name": "nn.relu", + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "math.add", + "inputs": [ + { + "name": "x", + "type": "Tensor" + }, + { + "name": "y", + "type": "Tensor" + } + ], + "outputs": [ + { + "name": "z" + } + ] + }, + { + "name": "__operators__.add", + "inputs": [ + { + "name": "x", + "type": "Tensor" + }, + { + "name": "y", + "type": "Tensor" + } + ], + "outputs": [ + { + "name": "z" + } + ] + }, + { + "name": "linalg.matmul", + "attributes": [ + { + "name": "transpose_a", + "type": "boolean" + }, + { + "name": "transpose_b", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "a", + "type": "Tensor" + }, + { + "name": "b", + "type": "Tensor" + } + ], + "outputs": [ + { + "name": "c", + "type": "Tensor" + } + ] + }, + { + "name": "nn.abs", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "math.sigmoid", + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "reshape", + "category": "Shape", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "concat", + "category": "Tensor", + "inputs": [ + { + "name": "inputs", + "list": true + } + ], + "outputs": [ + { + "name": "output" + } + ] + }, + { + "name": "compat.v1.transpose", + "category": "Shape", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } +] diff --git a/keras-proto.js b/keras-proto.js new file mode 100644 index 00000000000..16455c67f61 --- /dev/null +++ b/keras-proto.js @@ -0,0 +1,181 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('tf'); + +$root.third_party = {}; + +$root.third_party.tensorflow = {}; + +$root.third_party.tensorflow.python = {}; + +$root.third_party.tensorflow.python.keras = {}; + +$root.third_party.tensorflow.python.keras.protobuf = {}; + +$root.third_party.tensorflow.python.keras.protobuf.SavedMetadata = class SavedMetadata { + + constructor() { + this.nodes = []; + } + + static decode(reader, length) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.SavedMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nodes.push($root.third_party.tensorflow.python.keras.protobuf.SavedObject.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.SavedMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nodes": + message.nodes.push($root.third_party.tensorflow.python.keras.protobuf.SavedObject.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.third_party.tensorflow.python.keras.protobuf.SavedObject = class SavedObject { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.SavedObject(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.node_id = reader.int32(); + break; + case 3: + message.node_path = reader.string(); + break; + case 4: + message.identifier = reader.string(); + break; + case 5: + message.metadata = reader.string(); + break; + case 6: + message.version = $root.third_party.tensorflow.python.keras.protobuf.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.SavedObject(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_id": + message.node_id = reader.int32(); + break; + case "node_path": + message.node_path = reader.string(); + break; + case "identifier": + message.identifier = reader.string(); + break; + case "metadata": + message.metadata = reader.string(); + break; + case "version": + message.version = $root.third_party.tensorflow.python.keras.protobuf.VersionDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.third_party.tensorflow.python.keras.protobuf.SavedObject.prototype.node_id = 0; +$root.third_party.tensorflow.python.keras.protobuf.SavedObject.prototype.node_path = ""; +$root.third_party.tensorflow.python.keras.protobuf.SavedObject.prototype.identifier = ""; +$root.third_party.tensorflow.python.keras.protobuf.SavedObject.prototype.metadata = ""; +$root.third_party.tensorflow.python.keras.protobuf.SavedObject.prototype.version = null; + +$root.third_party.tensorflow.python.keras.protobuf.VersionDef = class VersionDef { + + constructor() { + this.bad_consumers = []; + } + + static decode(reader, length) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.VersionDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.producer = reader.int32(); + break; + case 2: + message.min_consumer = reader.int32(); + break; + case 3: + message.bad_consumers = reader.array(message.bad_consumers, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.third_party.tensorflow.python.keras.protobuf.VersionDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "producer": + message.producer = reader.int32(); + break; + case "min_consumer": + message.min_consumer = reader.int32(); + break; + case "bad_consumers": + reader.array(message.bad_consumers, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.third_party.tensorflow.python.keras.protobuf.VersionDef.prototype.producer = 0; +$root.third_party.tensorflow.python.keras.protobuf.VersionDef.prototype.min_consumer = 0; diff --git a/keras.js b/keras.js new file mode 100644 index 00000000000..3f76c423289 --- /dev/null +++ b/keras.js @@ -0,0 +1,1545 @@ + +import * as json from './json.js'; +import * as python from './python.js'; +import * as protobuf from './protobuf.js'; + +const keras = {}; +const tfjs = {}; + +keras.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const group = context.peek('hdf5'); + if (group && group.attributes && group.attributes.get('CLASS') !== 'hickle') { + if (identifier === 'model.weights.h5') { + return { name: 'keras.model.weights.h5', value: group }; + } + return { name: 'keras.h5', value: group }; + } + const json = context.peek('json'); + if (json) { + if (json.mxnet_version || (json.nodes && json.arg_nodes && json.heads)) { + return null; + } + if (json.model_config || (json.class_name && json.config)) { + return { name: 'keras.config.json', value: json }; + } + if (identifier === 'metadata.json' && json.keras_version) { + return { name: 'keras.metadata.json', value: json }; + } + } + const container = tfjs.Container.open(context); + if (container) { + return { name: 'tfjs.json', value: container }; + } + const pickle = context.peek('pkl'); + if (pickle && pickle.__class__ && + pickle.__class__.__module__ === 'keras.engine.sequential' && + pickle.__class__.__name__ === 'Sequential') { + return { name: 'keras.pickle', value: pickle }; + } + // model.weights.npz + const entries = context.peek('npz'); + const regex = /^(__root__|layers\/.+|_layer_checkpoint_dependencies\/.+)\.npy$/; + if (entries instanceof Map && entries.size > 0 && Array.from(entries).every(([name]) => regex.test(name))) { + return { name: 'keras.model.weights.npz', value: entries }; + } + // keras_metadata.pb + if (extension === 'pb' && context.stream && context.stream.length > 16) { + const tags = context.tags('pb'); + if (tags.size === 1 && tags.get(1) === 2) { + const stream = context.stream; + const buffer = stream.peek(Math.min(stream.length, 1024)); + const content = String.fromCharCode.apply(null, buffer); + if (/root"/.test(content) && /\{\s*"class_name"\s*:/.test(content)) { + return { name: 'keras.pb.SavedMetadata' }; + } + } + } + return null; + } + + async open(context, target) { + const request_json = async (context, name) => { + try { + context = await context.fetch(name); + } catch (error) { + return null; + } + return context.read('json'); + }; + const _create_config = (weights_store) => { + const config = {}; + config.class_name = 'Model'; + config.config = {}; + config.config.layers = []; + const snake_to_pascal_case = (name) => { + return name.replace(/(^|_|\d)([a-z])/g, (match, p1, p2) => p1 === '_' ? p2.toUpperCase() : p1 + p2.toUpperCase()); + }; + for (const [name, value] of weights_store) { + const layer = {}; + layer.name = name; + layer.class_name = name.split('/').pop().replace(/_[0-9]+$/, ''); + layer.class_name = snake_to_pascal_case(layer.class_name); + layer.config = {}; + layer.config.name = name; + layer._trainable_variables = value; + config.config.layers.push(layer); + } + return config; + }; + const _load_state = (trackable, weights_store, assets_store, inner_path) => { + inner_path = inner_path || ''; + if (trackable && trackable.config && Array.isArray(trackable.config.layers)) { + /* eslint-disable no-use-before-define */ + _load_container_state(trackable, weights_store, assets_store, inner_path ? `${inner_path}/layers` : 'layers'); + /* eslint-enable no-use-before-define */ + } else { + const weights = weights_store.get(inner_path); + if (weights) { + trackable._trainable_variables = weights; + } + } + }; + const _load_container_state = (container, weights_store, assets_store, inner_path) => { + const used_names = new Map(); + for (const trackable of container.config.layers) { + const pascal_to_snake_case = (name) => { + name = name.replace(/\W+/g, ""); + name = name.replace(/(.)([A-Z][a-z]+)/g, (match, p1, p2) => `${p1}_${p2}`); + name = name.replace(/([a-z])([A-Z])/g, (match, p1, p2) => `${p1}_${p2}`); + return name.toLowerCase(); + }; + let name = pascal_to_snake_case(trackable.class_name); + if (!used_names.has(name)) { + used_names.set(name, 0); + } else { + const next = used_names.get(name) + 1; + used_names.set(name, next); + name = `${name}_${next}`; + } + _load_state(trackable, weights_store, assets_store, `${inner_path}/${name}`); + } + }; + const read_weights_hdf5 = (group) => { + const walk = (group, path, weights_store) => { + const checkpoint = group.groups.get('layers') || group.groups.get('_layer_checkpoint_dependencies'); + if (checkpoint) { + for (const [, layer] of checkpoint.groups) { + const name = `${path ? `${path}/` : ''}layers/${layer[0]}`; + walk(layer, name, weights_store); + const values = []; + for (const vars of layer.groups) { + for (const [name, group] of vars[1].groups) { + const variable = group.value; + if (variable) { + const layout = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(name, variable.shape, variable.type, null, null, layout, variable.data); + values.push(tensor); + } + } + } + if (values.length > 0) { + weights_store.set(name, values); + } + } + } + }; + const weights_store = new Map(); + walk(group, '', weights_store); + return weights_store; + }; + const read_weights_numpy = (entries) => { + const weights_store = new Map(); + for (const [path, array] of entries) { + const file = path.split('/').map((name) => name === '_layer_checkpoint_dependencies' ? 'layers' : name).join('/'); + if (file.endsWith('.npy') && file.startsWith('layers/')) { + if (array.dtype.name === 'object' && array.shape.length === 0 && + Array.isArray(array.data) && array.data.length === 1) { + const values = Object.values(array.data[0]).map((array) => { + const stride = array.strides.map((stride) => stride / array.itemsize); + const dataType = array.dtype.__name__; + const values = dataType == 'string' || dataType == 'object' ? array.flatten().tolist() : array.tobytes(); + const encoding = dataType == 'string' || dataType == 'object' ? '|' : array.dtype.byteorder; + return new keras.Tensor('', array.shape, dataType, stride, null, encoding, values); + }); + if (values.length > 0) { + const name = file.replace(/\.npy$/, ''); + weights_store.set(name, values); + } + } + } + } + return weights_store; + }; + const request_weights = async (context) => { + const formats = [ + [ 'model.weights.h5', 'hdf5', read_weights_hdf5 ], + [ 'model.weights.npz', 'npz', read_weights_numpy ], + ]; + for (const [name, type, callback] of formats) { + let content = null; + try { + /* eslint-disable no-await-in-loop */ + content = await context.fetch(name); + /* eslint-enable no-await-in-loop */ + } catch (error) { + // continue regardless of error + } + if (content) { + const obj = content.peek(type); + if (obj) { + return callback(obj); + } + } + } + return new Map(); + }; + const open_model = async (format, producer, backend, config, weights) => { + const metadata = await context.metadata('keras-metadata.json'); + return new keras.Model(metadata, format, producer, backend, config, weights); + }; + switch (target.name) { + case 'keras.config.json': { + const obj = target.value; + const config = obj.model_config ? obj.model_config : obj; + const backend = obj.backend || ''; + let version = obj.keras_version ? obj.keras_version : null; + if (!version) { + const metadata = await request_json(context, 'metadata.json'); + if (metadata && metadata.keras_version) { + version = metadata.keras_version; + } + } + const format = `Keras${version ? ` v${version}` : ''}`; + const weights_store = await request_weights(context); + _load_state(config, weights_store); + return open_model(format, '', backend, config, null); + } + case 'keras.model.weights.h5': { + const group = target.value; + const weights_store = read_weights_hdf5(group); + const metadata = await request_json(context, 'metadata.json'); + let config = await request_json(context, 'config.json'); + const name = config ? 'Keras' : 'Keras Weights'; + const format = name + (metadata && metadata.keras_version ? ` v${metadata.keras_version}` : ''); + if (config) { + _load_state(config, weights_store); + } else { + config = _create_config(weights_store); + } + return open_model(format, '', '', config, null); + } + case 'keras.model.weights.npz': { + const entries = target.value; + const weights_store = read_weights_numpy(entries); + const metadata = await request_json(context, 'metadata.json'); + let config = await request_json(context, 'config.json'); + const name = config ? 'Keras' : 'Keras Weights'; + const format = name + (metadata && metadata.keras_version ? ` v${metadata.keras_version}` : ''); + if (config) { + _load_state(config, weights_store); + } else { + config = _create_config(weights_store); + } + return open_model(format, '', '', config, null); + } + case 'keras.metadata.json': { + const metadata = target.value; + let config = await request_json(context, 'config.json'); + const name = config ? 'Keras' : 'Keras Weights'; + const format = name + (metadata.keras_version ? `v${metadata.keras_version}` : ''); + const weights_store = await request_weights(context); + if (!config && (!weights_store || weights_store.size === 0)) { + throw new keras.Error("'config.json' or 'model.weights.*' not present."); + } + if (config) { + _load_state(config, weights_store); + } else { + config = _create_config(weights_store); + } + return open_model(format, '', '', config, null); + } + case 'keras.h5': { + const find_root_group = (root_group) => { + const kerasmodel = root_group.group('model/kerasmodel'); + if (kerasmodel && kerasmodel.attributes.has('model_config')) { + return kerasmodel; + } + return root_group; + }; + const read_model_config = (group) => { + if (group.attributes.has('model_config')) { + const buffer = group.attributes.get('model_config'); + const reader = json.TextReader.open(buffer); + if (reader) { + return reader.read(); + } + } + return null; + }; + const load_attributes_from_hdf5_group = (group, name) => { + if (group.attributes.has(name)) { + return group.attributes.get(name); + } + if (group.attributes.has(`${name}0`)) { + let index = 0; + let value = []; + while (group.attributes.has(name + index.toString())) { + const chunk = group.attributes.get(name + index.toString()); + value = value.concat(chunk); + index++; + } + return value; + } + return null; + }; + const weights = new keras.Weights(); + const group = target.value; + const root_group = find_root_group(group); + const model_config = read_model_config(root_group); + if (model_config) { + const backend = root_group.attributes.get('backend') || ''; + const version = root_group.attributes.get('keras_version') || ''; + const format = `Keras${version ? ` v${version}` : ''}`; + const model_weights_group = root_group.group('model_weights'); + if (model_weights_group) { + const layer_names = load_attributes_from_hdf5_group(model_weights_group, 'layer_names'); + for (const layer_name of layer_names) { + const layer_weights = model_weights_group.group(layer_name); + if (layer_weights) { + const weight_names = load_attributes_from_hdf5_group(layer_weights, 'weight_names'); + if (Array.isArray(weight_names) && weight_names.length > 0) { + for (const weight_name of weight_names) { + const weight = layer_weights.group(weight_name); + if (weight && weight.value) { + const variable = weight.value; + const tensor = new keras.Tensor(weight_name, variable.shape, variable.type, null, null, variable.littleEndian ? '<' : '>', variable.data); + weights.add(layer_name, tensor); + } + } + } + } + } + } + if (!model_config) { + throw new keras.Error("'model_config' is not present."); + } + if (!model_config.class_name) { + throw new keras.Error("'class_name' is not present."); + } + return open_model(format, '', backend, model_config, weights); + } + const layer_names = load_attributes_from_hdf5_group(root_group, 'layer_names'); + if (layer_names && Array.isArray(layer_names)) { + const version = root_group.attributes.get('keras_version') || ''; + const format = `Keras Weights${version ? ` v${version}` : ''}`; + const backend = root_group.attributes.get('backend') || ''; + for (const layer_name of layer_names) { + const layer_weights = root_group.group(layer_name); + if (layer_weights) { + const weight_names = load_attributes_from_hdf5_group(layer_weights, 'weight_names'); + if (Array.isArray(weight_names) && weight_names.length > 0) { + for (const weight_name of weight_names) { + const weight = layer_weights.group(weight_name); + if (weight && weight.value) { + const variable = weight.value; + const components = weight_name.split('/'); + components.pop(); + const name = (components.length == 0 || components[0] !== layer_name) ? [ layer_name ].concat(components).join('/') : components.join('/'); + const encoding = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(weight_name, variable.shape, variable.type, null, null, encoding, variable.data); + weights.add(name, tensor); + } + } + } + } + } + return open_model(format, '', backend, null, weights); + } + const rootKeys = new Set(root_group.attributes.keys()); + rootKeys.delete('nb_layers'); + if (rootKeys.size > 0 || root_group.value !== null) { + throw new keras.Error('File format is not HDF5 Weights.'); + } + const format = 'HDF5 Weights'; + let weights_group = root_group; + if (root_group.attributes.size === 0 && root_group.value === null && root_group.groups.size == 1) { + const group = root_group.groups.values().next().value; + if (group.attributes.size === 0 && group.value === null) { + weights_group = group; + } + } + const tensorKeys = new Set([ 'name', 'shape', 'quantization' ]); + const groups = Array.from(weights_group.groups.values()); + if (groups.every((group) => group.attributes.size === 0 && group.groups.length == 0 && group.value !== null)) { + for (const group of groups) { + const variable = group.value; + const layout = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(group.name, variable.shape, variable.type, null, null, layout, variable.type === 'string' ? variable.value : variable.data); + weights.add('', tensor); + } + return open_model(format, '', '', null, weights); + } + if (groups.every((group) => group.value === null && Array.from(group.attributes.keys()).filter((key) => !tensorKeys.has(key)).length === 0 && Array.from(group.groups.values()).every((variable) => Object.keys(variable.attributes).length === 0 && variable.value !== null))) { + for (const group of groups) { + const moduleName = group.attributes.has('name') ? group.attributes.get('name') : group.name; + for (const variableGroup of group.groups.values()) { + if (variableGroup.attributes.size !== 0 || variableGroup.groups.size !== 0) { + throw new keras.Error('Variable format is not HDF5 Weights.'); + } + const variable = variableGroup.value; + if (!variable) { + throw new keras.Error('Variable value is not HDF5 Weights.'); + } + const name = moduleName ? [ moduleName, variableGroup.name ].join('/') : moduleName.name; + const layout = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(name, variable.shape, variable.type, null, null, layout, variable.type === 'string' ? variable.value : variable.data); + weights.add(moduleName, tensor); + } + } + return open_model(format, '', '', null, weights); + } + const walk = function(group) { + if (group.attributes.size === 0 && group.value === null && group.groups.size > 0) { + for (const subGroup of group.groups.values()) { + walk(subGroup); + } + return; + } + const subKeys = new Set([ 'index', 'need_grad' ]); + const attribtues = Array.from(group.attributes.keys()); + const match = attribtues.filter((key) => !subKeys.has(key)).length === 0; + if (match && group.value !== null && group.groups.size === 0) { + const variable = group.value; + const variableName = group.path; + let moduleName = variableName; + const parts = variableName.split('/'); + if (parts.length > 1) { + parts.pop(); + moduleName = parts.join('/'); + } + const layout = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(variableName, variable.shape, variable.type, null, null, layout, variable.type === 'string' ? variable.value : variable.data); + weights.add(moduleName, tensor); + return; + } + throw new keras.Error('Module group format is not HDF5 Weights.'); + }; + walk(weights_group); + return open_model(format, '', '', null, weights); + } + case 'tfjs.json': { + const container = tfjs.Container.open(context); + await container.open(); + return open_model(container.format, container.producer, container.backend, container.config, container.weights); + } + case 'keras.pickle': { + const obj = target.value; + const execution = new python.Execution(); + const decoder = new TextDecoder('utf-8'); + const format = `Keras Pickle${obj.keras_version ? ` v${decoder.decode(obj.keras_version)}` : ''}`; + const backend = obj.backend ? decoder.decode(obj.backend) : ''; + const reader = json.TextReader.open(obj.model_config); + const model_config = reader.read(); + const weights = new keras.Weights(); + const model_weights_group = obj.model_weights; + if (model_weights_group) { + const layer_names = model_weights_group.layer_names.map((buffer) => decoder.decode(buffer)); + for (const layer_name of layer_names) { + const layer_weights = model_weights_group[layer_name]; + if (layer_weights) { + const weight_names = layer_weights.weight_names.map((buffer) => decoder.decode(buffer)); + if (Array.isArray(weight_names) && weight_names.length > 0) { + for (const weight_name of weight_names) { + const buffer = layer_weights[weight_name]; + const unpickler = execution.invoke('pickle.Unpickler', [ buffer ]); + const variable = unpickler.load(); + const tensor = new keras.Tensor(weight_name, variable.shape, variable.dtype.__name__, null, null, '<', variable.data); + weights.add(layer_name, tensor); + } + } + } + } + } + return open_model(format, '', backend, model_config, weights); + } + case 'keras.pb.SavedMetadata': { + await context.require('./keras-proto'); + keras.proto = protobuf.get('tf'); + const format = 'Keras Saved Metadata'; + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + const saved_metadata = keras.proto.third_party.tensorflow.python.keras.protobuf.SavedMetadata.decode(reader); + if (!saved_metadata || !Array.isArray(saved_metadata.nodes) || + !saved_metadata.nodes.every((node) => node && typeof node.metadata === 'string' && node.metadata.length > 0)) { + throw new keras.Error('Invalid keras.protobuf.SavedMetadata.'); + } + const objects = new Map(); + for (const node of saved_metadata.nodes) { + const reader = json.TextReader.open(node.metadata); + node.metadata = reader.read(); + objects.set(node.node_path, node); + } + const model_config = objects.get('root').metadata; + return open_model(format, '', '', model_config, null); + } + default: { + throw new keras.Error(`Unsupported Keras format '${target}'.`); + } + } + } +}; + +keras.Model = class { + + constructor(metadata, format, producer, backend, config, weights) { + this._format = format; + this._backend = backend; + this._producer = producer; + metadata = new keras.GraphMetadata(metadata); + this._graphs = [ new keras.Graph(metadata, config, weights) ]; + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get runtime() { + return this._backend; + } + + get graphs() { + return this._graphs; + } +}; + +keras.Graph = class { + + constructor(metadata, config, weights, group) { + this._metadata = metadata; + this._inputs = []; + this._outputs = []; + this._nodes = []; + group = group || ''; + const values = new Map(); + values.map = (name, type, tensor) => { + if (tensor) { + return new keras.Value(name, type || null, tensor); + } + if (!values.has(name)) { + values.set(name, new keras.Value(name, type || null, tensor || null)); + } else if (type || tensor) { + throw new keras.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + if (config) { + const getInputType = (layer) => { + if (layer && layer.config) { + let dataType = '?'; + let shape = []; + const config = layer.config; + if (config.dtype) { + dataType = config.dtype; + delete config.dtype; + } + if (Array.isArray(config.batch_input_shape)) { + shape = config.batch_input_shape.map((s) => s == null ? '?' : s); + delete config.batch_input_shape; + } else if (config.batch_input_shape && + config.batch_input_shape.class_name === '__tuple__' && + Array.isArray(config.batch_input_shape.items)) { + shape = config.batch_input_shape.items.map((s) => s == null ? '?' : s); + delete config.batch_input_shape; + } + return new keras.TensorType(dataType, new keras.TensorShape(shape)); + } + return null; + }; + this._name = config.name || (config.config && config.config.name ? config.config.name : ''); + switch (config.class_name) { + case 'AllCNN': + case 'Sequential': { + config = config.config; + const outputs = null; + let name = 'input'; + let index = -1; + const layers = Array.from(config.layers ? config.layers : config); + while (layers.length > 0) { + const layer = layers.shift(); + let current = index.toString(); + index++; + if (index == 0) { + const type = getInputType(layer); + let remove = false; + if (layer.class_name === 'InputLayer' && layer.config && layer.config.name) { + name = layer.config.name; + remove = true; + } + const value = values.map(name, type); + const argument = new keras.Argument(name, true, [ value ]); + this._inputs.push(argument); + if (remove) { + continue; + } + } + const nodeInputs = [ { name: name } ]; + if (layer.config && layer.config.name) { + current = layer.config.name; + } + name = current; + let nodeOutputs = [ name ]; + if (index == layers.length) { + if (outputs && outputs.length > 0) { + nodeOutputs = [ outputs[0] ]; + name = null; + } + } + layer.inputs = nodeInputs; + layer.outputs = nodeOutputs; + const node = new keras.Node(this._metadata, layer, group, weights, values); + this.nodes.push(node); + } + if (name) { + const value = values.map(name); + const argument = new keras.Argument(name, true, [ value ]); + this._outputs.push(argument); + } + break; + } + case '__Function__': + case 'Functional': + case 'Model': { + config = config.config; + const nodes = new Map(); + if (config.layers) { + const is_constant = (item) => { + return Array.isArray(item) && (item.length === 3 || item.length === 4) && item[0] === '_CONSTANT_VALUE' && item[1] === -1; + }; + const is_connection = (item) => { + return Array.isArray(item) && (item.length === 3 || item.length === 4) && typeof item[0] === 'string' && typeof item[1] === 'number' && typeof item[2] === 'number'; + }; + const read_value = (input_data) => { + if (!Array.isArray(input_data)) { + return input_data; + } + const transform = (value) => { + if (value.every((item) => is_constant(item))) { + for (let i = 0; i < value.length; i++) { + /* eslint-disable prefer-destructuring */ + value[i] = value[i][2]; + /* eslint-enable prefer-destructuring */ + } + } else if (value.every((item) => Array.isArray(item))) { + const dims = value.map((item) => transform(item)); + const [dim] = dims; + for (let i = 1; i < dims.length; i++) { + if (dim.length === dims[i].length) { + if (!dims[i].every((value, i) => value ===dim[i])) { + throw new python.Error('Invalid array shape.'); + } + } + } + return [ value.length ].concat(dim); + } + return [ value.length ]; + }; + const shape = transform(input_data); + const flatten = (input) => input.reduce((a, b) => a.concat(Array.isArray(b) ? flatten(b) : b), []); + const value = flatten(input_data); + return { shape: shape, value: value }; + }; + const functional = config.layers.every((layer) => Array.isArray(layer.inbound_nodes)); + const layers = new Map(); + if (functional) { + const read_connection = (input_data) => { + const [node_name, node_index, tensor_index] = input_data; + const inbound_node_key = `${node_name}[${node_index}]`; + const inbound_node = nodes.get(inbound_node_key); + const tensor_key = `${node_name}[${node_index}][${tensor_index}]`; + if (inbound_node) { + while (tensor_index >= inbound_node.outputs.length) { + inbound_node.outputs.push(undefined); + } + inbound_node.outputs[tensor_index] = tensor_key; + } + return tensor_key; + }; + const process_node = (node, inbound_node) => { + if (Array.isArray(inbound_node) && inbound_node.length === 4 && typeof inbound_node[0] === 'string') { + const key = read_connection(inbound_node); + node.inputs.push({ name: key }); + for (const [name, value] of Object.entries(inbound_node[3])) { + if (is_connection(value)) { + const key = read_connection(value); + node.inputs.push({ name: key }); + } else if (Array.isArray(value)) { + const array = read_value(value); + node.args[name] = array; + } else { + node.args[name] = value; + } + } + } else if (Array.isArray(inbound_node)) { + for (const input_data of inbound_node) { + // [ 'conv2d', 0, 0 ] or [ 'conv2d', 0, 0, {} ] + if (Array.isArray(input_data) && is_connection(input_data)) { + const key = read_connection(input_data); + node.inputs.push({ name: key }); + } else if (Array.isArray(input_data) && input_data.every((item) => is_connection(item))) { + for (const input of input_data) { + const key = read_connection(input); + node.inputs.push({ name: key }); + } + } else if (Array.isArray(input_data)) { + const value = read_value(input_data); + node.inputs.push(value); + } else { + throw new keras.Error(`Invalid inbound connection '${JSON.stringify(input_data)}'.`); + } + } + } else if (inbound_node && inbound_node.args) { + for (const arg of inbound_node.args) { + if (arg && arg.class_name === '__keras_tensor__' && arg.config && is_connection(arg.config.keras_history)) { + const key = read_connection(arg.config.keras_history); + node.inputs.push({ name: key }); + } else if (Array.isArray(arg) && arg.every((arg) => arg && arg.class_name === '__keras_tensor__' && arg.config && is_connection(arg.config.keras_history))) { + for (const input of arg) { + const key = read_connection(input.config.keras_history); + node.inputs.push({ name: key }); + } + } + } + } + }; + let legacy_format = true; + for (const layer of config.layers) { + if (Array.isArray(layer.inbound_nodes)) { + for (const inbound_node of layer.inbound_nodes) { + if (Array.isArray(inbound_node.args)) { + legacy_format = false; + } + } + } + } + for (const layer of config.layers) { + const class_name = layer.class_name; + let first_index = 0; + if (legacy_format) { + const keys = new Set(Object.keys(layer.config)); + const is_functional_config = keys.has('name') && keys.has('layers') && keys.has('input_layers') && keys.has('output_layers'); + if (class_name == 'Sequential' || + (is_functional_config && Array.isArray(layer.config.layers) && layer.config.layers.length > 0 && layer.config.layers[0].class_name === 'InputLayer')) { + first_index++; + } + } + layers.set(layer.name, layers); + if (Array.isArray(layer.inbound_nodes) && layer.inbound_nodes.length === 0) { + layer.inputs = []; + layer.outputs = []; + layer.args = {}; + nodes.set(`${layer.name}[${first_index}]`, layer); + } else if (Array.isArray(layer.inbound_nodes) && layer.inbound_nodes.length === 1) { + layer.inputs = []; + layer.outputs = []; + layer.args = {}; + /* eslint-disable prefer-destructuring */ + layer.inbound_node = layer.inbound_nodes[0]; + /* eslint-enable prefer-destructuring */ + nodes.set(`${layer.name}[${first_index}]`, layer); + } else { + let config = {}; + switch (class_name) { + case 'Functional': + case 'Sequential': + case 'Model': { + config = layer; + break; + } + default: { + config.class_name = '__Function__'; + config.name = layer.name; + config.config = {}; + config.config.layers = [ Object.assign({}, layer) ]; + delete config.config.layers[0].inbound_nodes; + delete config.config.layers[0].input_layers; + delete config.config.layers[0].output_layers; + break; + } + } + const type = new keras.Graph(this._metadata, config, weights, ''); + for (let i = 0; i < layer.inbound_nodes.length; i++) { + const index = i + first_index; + const key = `${layer.name}[${index}]`; + const node = {}; + node.name = key; + node.class_name = '__Function__'; + node.config = {}; + node.config.name = key; + node.inputs = []; + node.outputs = []; + node.args = {}; + node.__type__ = type; + node.inbound_node = layer.inbound_nodes[i]; + nodes.set(key, node); + } + } + } + for (const entry of nodes) { + if (entry[1].inbound_node) { + process_node(entry[1], entry[1].inbound_node); + } + } + if (Array.isArray(config.input_layers)) { + for (let i = 0; i < config.input_layers.length; i++) { + const input_data = config.input_layers[i]; + const name = read_connection(input_data); + const [node_name, node_index] = input_data; + const inbound_node_key = `${node_name}[${node_index}]`; + const node = nodes.get(inbound_node_key); + let type = null; + if (node && node.class_name === 'InputLayer') { + type = getInputType(node); + nodes.delete(name); + nodes.delete(inbound_node_key); + } + const value = values.map(name, type); + const argument = new keras.Argument(node_name, true, [ value ]); + this._inputs.push(argument); + } + } + if (Array.isArray(config.output_layers)) { + for (let i = 0; i < config.output_layers.length; i++) { + const output_data = config.output_layers[i]; + const [name] = output_data; + const key = read_connection(output_data); + const value = values.map(key); + const argument = new keras.Argument(name, true, [ value ]); + this._outputs.push(argument); + } + } + } else { + for (const layer of config.layers) { + layer.inputs = []; + layer.outputs = []; + layer.args = {}; + nodes.set(`${layer.name}[0]`, layer); + } + } + } + for (const entry of nodes) { + const node = new keras.Node(this._metadata, entry[1], group, weights, values); + this._nodes.push(node); + } + break; + } + default: { + throw new keras.Error(`'${config.class_name}' is not supported.`); + } + } + } else if (weights) { + for (const name of weights.keys()) { + if (weights.get('', name).length <= 6) { + const layer = { class_name: 'Weights', config: { name: name } }; + const node = new keras.Node(metadata, layer, '', weights, values); + this._nodes.push(node); + } + } + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +keras.Argument = class { + + constructor(name, visible, value) { + this._name = name; + this._visible = visible; + this._value = value; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get value() { + return this._value; + } +}; + +keras.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new keras.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name= name; + this.type = type ? type : initializer && initializer.type ? initializer.type : null; + this.quantization = initializer && initializer.quantization ? initializer.quantization : null; + this.initializer = initializer || null; + } +}; + +keras.Node = class { + + constructor(metadata, layer, group, weights, values) { + const config = layer.config || {}; + const args = layer.args || {}; + let inputs = layer.inputs || []; + let outputs = layer.outputs || []; + const name = config && config.name ? config.name : ''; + this._group = group || ''; + this._name = (this._group ? `${this._group}/` : '') + name; + this._inputs = []; + this._outputs = []; + this._attributes = []; + this._chain = []; + let names = [ name ]; + let class_name = layer.class_name; + let model = false; + switch (class_name) { + case '__Function__': { + this._type = layer.__type__; + model = true; + break; + } + case 'Model': + case 'Functional': + case 'Sequential': { + const name = layer.name || (layer.config ? layer.config.name : ''); + this._type = new keras.Graph(metadata, layer, weights, (group ? `${group}/` : '') + name); + model = true; + if (config) { + delete config.layers; + delete config.input_layers; + delete config.output_layers; + } + this._inputs = [ new keras.Argument('inputs', true, inputs.map((input) => values.map(input.name))) ]; + this._outputs = [ new keras.Argument('outputs', true, outputs.map((name) => values.map(name))) ]; + inputs = []; + outputs = []; + break; + } + case 'Wrapper': + case 'Bidirectional': + case 'TimeDistributed': { + if (config && config.layer) { + const inner = config.layer; + delete config.layer; + this._inner = new keras.Node(metadata, inner, null, null, values); + if (class_name == 'Bidirectional' && inner.config.name) { + names = [ `${name}/forward_${inner.config.name}`, `${name}/backward_${inner.config.name}` ]; + if (!group) { + group = name; + } + } + } + this._type = metadata.type(class_name) || { name: class_name }; + break; + } + case 'TFOpLambda': { + if (config && config.function) { + class_name = config.function; + delete config.function; + } + this._type = metadata.type(class_name) || { name: class_name }; + break; + } + default: { + this._type = metadata.type(class_name) || { name: class_name }; + break; + } + } + + if (layer._trainable_variables) { + if (inputs.length === 0 && Array.isArray(this._type.inputs) && this._type.inputs.length > 0) { + // weights-only, remove 'input' from type metadata + this._type = Object.assign({}, this._type); + this._type.inputs = this._type.inputs.slice(1); + } + for (const variable of layer._trainable_variables) { + inputs.push({ name: '', initializer: variable }); + } + } else if (weights && !model) { + for (const name of names) { + let tensors = weights.get(group, name); + if (tensors.length > 0) { + for (const initializer of tensors) { + inputs.push({ name: initializer.name, initializer: initializer }); + } + } else { + tensors = weights.get('', name); + for (const initializer of tensors) { + inputs.push({ name: initializer.name, initializer: initializer }); + } + } + } + } + + if (config && !Array.isArray(config)) { + for (const [name, value] of Object.entries(config)) { + if (class_name !== 'Activation' && name === 'activation' && value !== 'linear') { + if (typeof value === 'string') { + const config = { activation: value }; + const node = new keras.Node(metadata, { class_name: 'Activation', config: config }, null, null, value); + this.chain.push(node); + } else if (value && typeof value.class_name === 'string' && value.config) { + const type = value.class_name; + if (!metadata.type(type)) { + metadata.add(type, { name: type, category: 'Activation' }); + } + const node = new keras.Node(metadata, value, null, null, value); + this.chain.push(node); + } + } + if (name !== 'name' && name !== 'batch_input_shape') { + const attribute = new keras.Attribute(metadata.attribute(class_name, name), name, value); + this._attributes.push(attribute); + } + } + } + + const innerType = this.inner ? this.inner.type : null; + const innerMetadata = innerType ? metadata.type(innerType) : null; + let inputIndex = 0; + while (inputs.length > 0) { + let list = false; + let name = null; + let visible = true; + if (!innerMetadata || inputIndex == 0) { + if (this._type && this._type.inputs && inputIndex < this._type.inputs.length) { + const input = this._type.inputs[inputIndex]; + name = input.name; + if (class_name === 'BatchNormalization' && name === 'gamma' && config.scale === false) { + inputIndex++; + continue; + } + visible = input.visible == false ? false : true; + if (this._type.inputs[inputIndex].list) { + list = true; + } + } + } else { + switch (class_name) { + case 'Bidirectional': { + let innerIndex = inputIndex; + if (innerMetadata && innerMetadata.inputs) { + if (innerIndex < innerMetadata.inputs.length) { + name = `forward_${innerMetadata.inputs[innerIndex].name}`; + } else { + innerIndex = innerIndex - innerMetadata.inputs.length + 1; + if (innerIndex < innerMetadata.inputs.length) { + name = `backward_${innerMetadata.inputs[innerIndex].name}`; + } + } + } + visible = false; + break; + } + case 'TimeDistributed': + if (innerMetadata && innerMetadata.inputs && inputIndex < innerMetadata.inputs.length) { + name = innerMetadata.inputs[inputIndex].name; + } + break; + default: + break; + } + } + const input = !list ? [ inputs.shift() ] : inputs.splice(0, inputs.length); + const inputArguments = input.map((input) => { + if (input.name) { + return values.map(input.name, null, input.initializer); + } + if (input.initializer) { + return values.map(input.name, null, input.initializer); + } + if (input.value !== undefined) { + const tensor = new keras.Tensor('', input.shape, config.dtype || '?', null, null, '|', input.value); + return values.map('', null, tensor); + } + throw new keras.Error(`Invalid argument '${JSON.stringify(input.name)}'.`); + }); + if (!name && inputArguments.length == 1 && inputArguments[0].initializer && inputArguments[0].initializer.name) { + if (names.length === 1 && names[0] === '') { + name = inputArguments[0].initializer.name; + } else { + const parts = inputArguments[0].initializer.name.split('/').pop().split(':').shift().split('_'); + const inputName1 = parts.pop(); + const inputName2 = parts.length > 0 ? [ parts.pop(), inputName1 ].join('_') : ''; + const inputNames = new Set([ 'recurrent_kernel', 'running_mean', 'running_std', 'moving_mean', 'moving_variance', 'depthwise_filter', 'pointwise_filter' ]); + name = inputNames.has(inputName2) ? inputName2 : inputName1; + } + } + const argument = new keras.Argument(name || inputIndex.toString(), visible, inputArguments); + this._inputs.push(argument); + inputIndex++; + } + + for (let i = 0; i < outputs.length; i++) { + const output = outputs[i]; + const name = this._type && this._type.outputs && i < this._type.outputs.length && this._type.outputs[i] && this._type.outputs[i].name ? this._type.outputs[i].name : i.toString(); + const argument = new keras.Argument(name, true, output === undefined || output.length === 0 ? [] : [ values.map(output) ]); + this._outputs.push(argument); + } + + const inputTypes = new Map((this._type.inputs || []).map((input) => [ input.name, input.type ])); + for (const [name, arg] of Object.entries(args)) { + if (name !== 'name') { + if ((arg && arg.name) || (inputTypes.has(name) && inputTypes.get(name) === 'Tensor' && arg)) { + if (arg.name) { + const value = values.map(arg.name); + const argument = new keras.Argument(name, true, [ value ]); + this._inputs.push(argument); + } else { + const tensor = new keras.Tensor('', arg.shape, config.dtype || '?', null, null, '|', arg.value); + const value = values.map('', null, tensor); + const argument = new keras.Argument(name, true, [ value ]); + this._inputs.push(argument); + } + } else { + const attribute = new keras.Attribute(metadata.attribute(class_name, name), name, arg); + this._attributes.push(attribute); + } + } + } + + if (typeof this.type.name !== 'string' || !this.type.name.split) { // #416 + throw new keras.Error(`Unsupported node type '${JSON.stringify(this.type.name)}'.`); + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get chain() { + return this._chain; + } + + get inner() { + return this._inner; + } +}; + +keras.Attribute = class { + + constructor(metadata, name, value) { + this._name = name; + this._value = value; + if (value && typeof value == 'object' && value.class_name && value.config) { + this._value = keras.Attribute._convert(value); + } + switch (name) { + case 'trainable': + this._type = 'boolean'; + this._visible = false; + break; + case 'dtype': + this._visible = false; + break; + default: { + if (metadata) { + if (metadata.type) { + this._type = metadata.type; + } + if (metadata.visible === false) { + this._visible = false; + } else if (metadata.default !== undefined) { + if (Array.isArray(value)) { + if (Array.isArray(metadata.default)) { + this._visible = value.length !== metadata.default || !this.value.every((item, index) => item == metadata.default[index]); + } else { + this._visible = !this.value.every((item) => item == metadata.default); + } + } else { + this._visible = this.value !== metadata.default; + } + } + } + break; + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } + + static _convert(value) { + if (Array.isArray(value) || value !== Object(value)) { + return value; + } + const obj = {}; + if (value.class_name) { + obj.__type__ = value.class_name; + } + if (value.config) { + const config = value.config; + for (const [key, value] of Object.entries(config)) { + obj[key] = keras.Attribute._convert(value); + } + } + return obj; + } +}; + +keras.Tensor = class { + + constructor(name, shape, type, stride, quantization, encoding, data) { + this.name = name; + this.type = new keras.TensorType(type, new keras.TensorShape(shape)); + this.stride = stride; + this.encoding = encoding; + this._data = data; + if (quantization && (quantization.scale !== 0 || quantization.min !== 0)) { + this.quantization = { + type: 'linear', + scale: [ quantization.scale ], + min: [ quantization.min ] + }; + } + } + + get values() { + if (this.encoding === '|') { + return this._data; + } + if (this._data === null) { + return null; + } + return this._data instanceof Uint8Array ? this._data : this._data.peek(); + } +}; + +keras.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +keras.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions && this._dimensions.length > 0 ? (`[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +keras.GraphMetadata = class { + + constructor(metadata) { + this._metadata = metadata; + this._types = new Map(); + } + + type(name) { + if (this._types.has(name)) { + return this._types.get(name); + } + return this._metadata.type(name); + } + + attribute(type, name) { + return this._metadata.attribute(type, name); + } + + add(type, metadata) { + this._types.set(type, metadata); + } +}; + +keras.Weights = class { + + constructor() { + this._map = new Map(); + } + + get empty() { + return this._map.size === 0; + } + + add(layer_name, tensor) { + if (!this._map.has(layer_name)) { + this._map.set(layer_name, []); + } + this._map.get(layer_name).push(tensor); + } + + get(group, name) { + if (group) { + const list = this._map.get(group.split('/').shift()); + if (list) { + const match1 = list.filter((tensor) => tensor.name.startsWith(`${name}/`)); + if (match1.length > 0) { + return match1; + } + const match2 = list.filter((tensor) => tensor.name.startsWith(`${group}/${name}/`)); + if (match2.length > 0) { + return match2; + } + } + } else { + const match1 = this._map.get(name); + if (match1 && match1.length > 0) { + return match1; + } + const match2 = this._map.get(''); + if (match2 && match2.length > 0) { + const match3 = match2.filter((tensor) => tensor.name.startsWith(`${(group ? `${group}/` : '') + name}/`)); + if (match3.length > 0) { + return match3; + } + } + } + return []; + } + + keys() { + return this._map.keys(); + } +}; + +keras.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Keras model.'; + } +}; + +tfjs.Container = class { + + static open(context) { + const json = context.peek('json'); + if (json) { + if (json.modelTopology && (json.format === 'layers-model' || json.modelTopology.class_name || json.modelTopology.model_config)) { + return new tfjs.Container(context, ''); + } + if (Array.isArray(json) && json.every((item) => item.weights && item.paths)) { + return new tfjs.Container(context, 'weights'); + } + if (json.tfjsVersion) { + return new tfjs.Container(context, 'metadata'); + } + } + return null; + } + + constructor(context, type) { + this._context = context; + this._type = type; + } + + get format() { + return this._format; + } + + get producer() { + return this._producer || ''; + } + + get backend() { + return this._backend || ''; + } + + get config() { + return this._config; + } + + get weights() { + return this._weights; + } + + async open() { + switch (this._type) { + case '': { + const obj = this._context.peek('json'); + return this._openModelJson(obj); + } + case 'weights': { + this._format = 'TensorFlow.js Weights'; + this._config = null; + const obj = this._context.peek('json'); + const manifests = Array.from(obj); + for (const manifest of manifests) { + for (const weight of manifest.weights) { + const name = weight.name; + const index = name.lastIndexOf('/'); + weight.identifier = index === -1 ? name : name.substring(0, index); + } + } + return this._openManifests(manifests); + } + case 'metadata': { + const content = await this._context.fetch('model.json'); + const obj = content.read('json'); + return this._openModelJson(obj); + } + default: { + throw new tfjs.Error(`Unsupported TensorFlow.js format '${this._type}'.`); + } + } + } + + _openShards(manifests, shards) { + this._weights = new keras.Weights(); + const dtype_size_map = new Map([ + [ 'float16', 2 ], [ 'float32', 4 ], [ 'float64', 8 ], + [ 'int8', 1 ], [ 'int16', 2 ], [ 'int32', 4 ], [ 'int64', 8 ], + [ 'uint8', 1 ], [ 'uint16', 2 ], [ 'uint32', 4 ], [ 'uint64', 8 ] + ]); + for (const manifest of manifests) { + let buffer = null; + if (Array.isArray(manifest.paths) && manifest.paths.length > 0 && manifest.paths.every((path) => shards.has(path))) { + const list = manifest.paths.map((path) => shards.get(path)); + const size = list.reduce((a, b) => a + b.length, 0); + buffer = new Uint8Array(size); + let offset = 0; + for (const item of list) { + buffer.set(item, offset); + offset += item.length; + } + } + let offset = 0; + for (const weight of manifest.weights) { + const dtype = weight.quantization && weight.quantization.dtype ? weight.quantization.dtype : weight.dtype; + if (!dtype_size_map.has(dtype)) { + throw new keras.Error(`Unsupported weight data type size '${dtype}'.`); + } + const itemsize = dtype_size_map.get(dtype); + const size = weight.shape.reduce((a, b) => a * b, 1); + const length = itemsize * size; + const data = buffer ? buffer.slice(offset, offset + length) : null; + this._weights.add(weight.identifier, new keras.Tensor(weight.name, weight.shape, dtype, null, weight.quantization, '<', data)); + offset += length; + } + } + } + + async _openManifests(manifests) { + const shards = new Map(); + for (const manifest of manifests) { + for (const path of manifest.paths) { + if (!shards.has(path)) { + const promise = this._context.fetch(path); + shards.set(path, promise); + } + } + } + const promises = shards.values(); + try { + const contexts = await Promise.all(promises); + for (const key of shards.keys()) { + const context = contexts.shift(); + const buffer = context.stream.peek(); + shards.set(key, buffer); + } + this._openShards(manifests, shards); + return; + } catch (error) { + shards.clear(); + this._openShards(manifests, shards); + return; + } + } + + _openModelJson(obj) { + const modelTopology = obj.modelTopology; + this._format = `TensorFlow.js ${obj.format ? obj.format : `Keras${modelTopology.keras_version ? (` v${modelTopology.keras_version}`) : ''}`}`; + this._producer = obj.convertedBy || obj.generatedBy || ''; + this._backend = modelTopology.backend || ''; + const manifests = obj.weightsManifest; + for (const manifest of manifests) { + for (const weight of manifest.weights) { + weight.identifier = ''; + } + } + this._config = modelTopology.model_config ? modelTopology.model_config : modelTopology; + return this._openManifests(manifests); + } +}; + +tfjs.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorFlow.js model.'; + } +}; + +export const ModelFactory = keras.ModelFactory; diff --git a/kmodel.js b/kmodel.js new file mode 100644 index 00000000000..ef48cabb511 --- /dev/null +++ b/kmodel.js @@ -0,0 +1,1355 @@ + +import * as base from './base.js'; + +const kmodel = {}; + +kmodel.ModelFactory = class { + + match(context) { + return kmodel.Reader.open(context.stream); + } + + async open(context, target) { + target.read(); + return new kmodel.Model(target); + } +}; + +kmodel.Model = class { + + constructor(model) { + this.format = `kmodel v${model.version}`; + this.graphs = model.modules.map((module) => new kmodel.Graph(module)); + } +}; + +kmodel.Graph = class { + + constructor(module) { + this.name = module.name || ''; + this.type = module.type || ''; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const scopes = new Map(); + let index = 0; + const values = new Map(); + const value = (arg) => { + const name = arg.name; + const type = arg.shape ? new kmodel.TensorType(arg.datatype || '?', arg.shape) : null; + if (arg.data) { + const tensor = arg.data ? new kmodel.Tensor(type, arg.data) : null; + return new kmodel.Value(name, type || null, tensor); + } + if (!values.has(name)) { + values.set(name, new kmodel.Value(name, type || null, null)); + } if ((type && !type.equals(values.get(name).type))) { + return new kmodel.Value(name, type); + } + return values.get(name); + }; + for (const layer of module.layers) { + for (const input of layer.inputs || []) { + for (const arg of input.value) { + arg.name = scopes.has(arg.name) ? scopes.get(arg.name) : arg.name; + } + } + for (const output of layer.outputs || []) { + for (const arg of output.value) { + const name = scopes.has(arg.name) ? `${arg.name}#${index}` : arg.name; + scopes.set(arg.name, name); // custom argument id + arg.name = name; + if (arg.name && arg.shape && !arg.data) { + value(arg); + } + } + } + index++; + } + for (const layer of module.layers) { + for (const output of layer.outputs || []) { + for (const arg of output.value) { + if (arg.name && arg.shape && !arg.data) { + value(arg); + } + } + } + } + for (const layer of module.layers) { + for (const input of layer.inputs || []) { + for (const arg of input.value) { + if (arg.name && arg.shape && !arg.data) { + value(arg); + } + } + } + } + for (const layer of module.layers) { + switch (layer.type.name) { + case 'INPUT': + case 'input': { + for (const input of layer.outputs) { + const values = input.value.map((arg) => value(arg)); + const argument = new kmodel.Argument('input', values); + this.inputs.push(argument); + } + break; + } + case 'OUTPUT': + case 'output': { + for (const output of layer.inputs) { + const values = output.value.map((arg) => value(arg)); + const argument = new kmodel.Argument(output.name, values); + this.outputs.push(argument); + } + break; + } + default: { + const node = new kmodel.Node(layer, value); + this.nodes.push(node); + break; + } + } + } + } +}; + +kmodel.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +kmodel.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new kmodel.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer ? initializer.type : null; + this.initializer = initializer; + } +}; + +kmodel.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = new kmodel.TensorShape(shape); + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +kmodel.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + equals(obj) { + if (obj && Array.isArray(obj.dimensions) && Array.isArray(this.dimensions)) { + if (this.dimensions.length === obj.dimensions.length) { + return obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + if (obj.dimensions.every((dim) => Number.isInteger(dim)) && this.dimensions.every((dim) => Number.isInteger(dim))) { + const a = obj.dimensions.reduce((a, b) => a * b, 1); + const b = this.dimensions.reduce((a, b) => a * b, 1); + return a === b; + } + } + return false; + } + + toString() { + if (this.dimensions && Array.isArray(this.dimensions) && this.dimensions.length > 0) { + return `[${this.dimensions.map((dim) => dim ? dim.toString() : '?').join(',')}]`; + } + return ''; + } +}; + +kmodel.Tensor = class { + + constructor(type, data) { + this.type = type; + this.values = data; + } +}; + +kmodel.Node = class { + + constructor(layer, value) { + this.location = layer.location !== undefined ? layer.location.toString() : layer.location; + this.name = ''; + this.type = layer.type; + this.inputs = []; + this.outputs = []; + this.chain = []; + this.attributes = []; + this.chain = []; + for (const [name, value] of Object.entries(layer)) { + if (name === 'type' || name === 'location' || name === 'inputs' || name === 'outputs' || name === 'chain') { + continue; + } + const attribute = new kmodel.Attribute(name, value); + this.attributes.push(attribute); + } + for (const input of layer.inputs || []) { + const values = input.value.map((arg) => value(arg)); + const argument = new kmodel.Argument(input.name, values); + this.inputs.push(argument); + } + for (const output of layer.outputs || []) { + const values = output.value.map((arg) => value(arg)); + const argument = new kmodel.Argument(output.name, values); + this.outputs.push(argument); + } + for (const chain of layer.chain || []) { + const node = new kmodel.Node(chain, value); + this.chain.push(node); + } + } +}; + +kmodel.Attribute = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +kmodel.Reader = class { + + static open(stream) { + if (stream && stream.length >= 4) { + const length = Math.min(8, stream.length); + const buffer = stream.peek(length); + if ([ 0x03, 0x00, 0x00, 0x00 ].every((value, index) => value === buffer[index])) { + return new kmodel.Reader(stream, 3); + } + if ([ 0x4C, 0x44, 0x4D, 0x4B ].every((value, index) => value === buffer[index]) && buffer.length >= 8) { + const reader = new base.BinaryReader(buffer); + reader.skip(4); + const version = reader.uint32(); + return new kmodel.Reader(stream, version); + } + } + return null; + } + + constructor(stream, version) { + this._stream = stream; + this.version = version; + this.modules = []; + } + + read() { + if (this._stream) { + if (this.version < 3 || this.version > 5) { + throw new kmodel.Error(`Unsupported model version '${this.version}'.`); + } + const types = new Map(); + const register = (type, name, category, callback) => { + types.set(type, { type: { name: name, category: category || '' }, callback: callback }); + }; + switch (this.version) { + case 3: { + const reader = new kmodel.BinaryReader.v3(this._stream); + const model_header = reader.kpu_model_header_t(); + const layers = new Array(model_header.layers_length); + const outputs = new Array(model_header.output_count); + for (let i = 0; i < model_header.output_count; i++) { + outputs[i] = reader.kpu_model_output_t(`output${i > 0 ? i.toString() : ''}`); + } + for (let i = 0; i < layers.length; i++) { + layers[i] = reader.kpu_model_layer_header_t(); + layers[i].location = i; + } + let offset = reader.position; + for (const layer of layers) { + layer.offset = offset; + offset += layer.body_size; + } + /* eslint-disable space-in-parens */ + register( -1, 'DUMMY'); + register( 0, 'INVALID'); + register( 1, 'ADD'); + register( 2, 'QUANTIZED_ADD'); + register( 3, 'GLOBAL_MAX_POOL2D', 'Pool'); + register( 4, 'QUANTIZED_GLOBAL_MAX_POOL2D', 'Pool'); + register( 5, 'GLOBAL_AVERAGE_POOL2D', 'Pool', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.kernel_size = reader.uint32(); + layer.channels = reader.uint32(); + }); + register( 6, 'QUANTIZED_GLOBAL_AVERAGE_POOL2D', 'Pool'); + register( 7, 'MAX_POOL2D', 'Pool'); + register( 8, 'QUANTIZED_MAX_POOL2D', 'Pool', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.outputs[0].value[0].shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.kernel = [ reader.uint32(), reader.uint32() ]; + layer.stride = [ reader.uint32(), reader.uint32() ]; + layer.padding = [ reader.uint32(), reader.uint32() ]; + }); + register( 9, 'AVERAGE_POOL2D', 'Pool'); + register( 10, 'QUANTIZED_AVERAGE_POOL2D', 'Pool'); + register( 11, 'QUANTIZE', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.count = reader.uint32(); + layer.scale = reader.float32(); + layer.bias = reader.float32(); + }); + register( 12, 'DEQUANTIZE', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.count = reader.uint32(); + layer.scale = reader.float32(); + layer.bias = reader.float32(); + }); + register( 13, 'REQUANTIZE', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.count = reader.uint32(); + layer.table = reader.read(256); + }); + register( 14, 'L2_NORMALIZATION', 'Normalization'); + register( 15, 'SOFTMAX', 'Activation', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.channels = reader.uint32(); + }); + register( 16, 'CONCAT', 'Tensor', (layer, reader) => { + layer.flags = reader.uint32(); + layer.outputs = [ reader.parameter('output') ]; + layer.inputs_mem = new Array(reader.uint32()); + for (let i = 0; i < layer.inputs_mem.length; i++) { + layer.inputs_mem[i] = { + start: reader.uint32(), + end: reader.uint32() + }; + } + }); + register( 17, 'QUANTIZED_CONCAT', 'Tensor', (layer, reader) => { + layer.flags = reader.uint32(); + layer.outputs = [ reader.parameter('output') ]; + layer.inputs_mem = new Array(reader.uint32()); + for (let i = 0; i < layer.inputs_mem.length; i++) { + layer.inputs_mem[i] = { + start: reader.uint32(), + end: reader.uint32() + }; + } + }); + register( 18, 'FULLY_CONNECTED', 'Layer', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.in_channels = reader.uint32(); + layer.out_channels = reader.uint32(); + const act = reader.uint32(); + const activations = [ + { name: 'LINEAR', category: 'Activation' }, + { name: 'RELU', category: 'Activation' }, + { name: 'RELU6', category: 'Activation' }, + ]; + if (act !== 0) { + if (act > activations.length) { + throw new kmodel.Error(`Unsupported FULLY_CONNECTED activation '${act}'.`); + } + layer.chain = [ { type: activations[act] } ]; + } + layer.inputs.push({ name: 'weights', value: [ { name: '', datatype: 'float32', shape: [ layer.in_channels, layer.out_channels ], data: reader.read(4 * layer.in_channels * layer.out_channels) } ] }); + layer.inputs.push({ name: 'bias', value: [ { name: '', datatype: 'float32', shape: [ layer.out_channels ], data: reader.read(4 * layer.out_channels) } ] }); + }); + register( 19, 'QUANTIZED_FULLY_CONNECTED', 'Layer'); + register( 20, 'TENSORFLOW_FLATTEN', 'Shape', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + const shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.inputs[0].value[0].shape = shape; + layer.outputs[0].value[0].shape = shape; + }); + register( 21, 'QUANTIZED_TENSORFLOW_FLATTEN', 'Shape', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + const shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.inputs[0].value[0].shape = shape; + layer.outputs[0].value[0].shape = shape; + }); + register( 22, 'RESIZE_NEAREST_NEIGHBOR', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.out_width = reader.uint32(); + layer.out_height = reader.uint32(); + layer.align_corners = reader.uint32(); + }); + register( 23, 'QUANTIZED_RESIZE_NEAREST_NEIGHBOR', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.out_width = reader.uint32(); + layer.out_height = reader.uint32(); + layer.align_corners = reader.uint32(); + }); + register( 1000, 'CONV', 'Layer'); + register( 1001, 'DWCONV', 'Layer'); + register( 1002, 'QUANTIZED_RESHAPE', 'Shape'); + register( 1003, 'RESHAPE', 'Shape'); + register(10240, 'K210_CONV', 'Layer', (layer, reader) => { + layer.flags = reader.uint32(); + layer.outputs = [ reader.parameter('output') ]; + const layer_offset = reader.uint32(); + const weights_offset = reader.uint32(); + const bn_offset = reader.uint32(); + const act_offset = reader.uint32(); + reader.seek(layer_offset); + layer.interrupt_enabe = reader.uint64_bits({ int_en: 0, ram_flag: 1, full_add: 2, depth_wise_layer: 3 }); + layer.inputs = [ reader.parameter('input', 'kpu') ]; + const outputs = [ reader.parameter('output', 'kpu') ]; + layer.outputs[0].value.push(outputs[0].value[0]); + // layer.outputs = layer.flags & 1 ? layer.outputs : outputs; + layer.image_channel_num = reader.uint64_bits({ i_ch_num: 0, o_ch_num: 32, o_ch_num_coef: 48 }); + layer.image_size = reader.uint64_bits({ i_row_wid: 0, i_col_high: 10, o_row_wid: 32, o_col_high : 42 }); + layer.kernel_pool_type_cfg = reader.uint64_bits({ kernel_type: 0, pad_type: 3, pool_type: 4, first_stride: 8, bypass_conv: 9, load_para: 10, dma_burst_size: 16, pad_value: 24, bwsx_base_addr: 32 }); + layer.kernel_load_cfg = reader.uint64_bits({ load_coor: 0, load_time: 1, para_size: 15, para_start_addr: 32 }); + layer.kernel_offset = reader.uint64_bits({ coef_column_offset: 0, coef_row_offset: 4 }); + layer.kernel_calc_type_cfg = reader.uint64_bits({ channel_switch_addr: 0, row_switch_addr: 16, coef_size: 20, coef_group: 28, load_act: 31, active_addr: 32 }); + layer.write_back_cfg = reader.uint64_bits({ wb_channel_switch_addr: 0, wb_row_switch_addr: 16, wb_group: 20 }); + layer.conv_value = reader.uint64_bits({ shr_w: 0, shr_x: 4, arg_w: 8, arg_x: 32 }); + layer.conv_value2 = reader.uint64_bits({ arg_add: 0 }); + layer.dma_parameter = reader.uint64_bits({ send_data_out: 0, channel_byte_num: 16, dma_total_byte: 32 }); + layer.chain = []; + const ic = layer.image_channel_num.i_ch_num + 1; + const oc = layer.image_channel_num.o_ch_num + 1; + layer.outputs[0].value[0].shape = [ layer.image_size.o_row_wid + 1, layer.image_size.o_col_high + 1, oc ]; + const filter = [ 1, 3 ][layer.kernel_pool_type_cfg.kernel_type]; + const weights_shape = layer.interrupt_enabe.depth_wise_layer ? [ oc, filter, filter ] : [ ic, oc, filter, filter ]; + const weights_size = weights_shape.reduce((a, b) => a * b); + reader.seek(bn_offset); + const batch_norm = { + type: { name: 'BATCH_NORM', category: 'Normalization' }, + weights: [] + }; + batch_norm.weights = new Array(oc); + for (let i = 0; i < oc; i++) { + batch_norm.weights[i] = reader.uint64_bits({ norm_mul: 0, norm_add: 24, norm_shift: 56, reserved: 60 }); + delete batch_norm.weights[i].reserved; + } + layer.chain.push(batch_norm); + reader.seek(act_offset); + const activation = {}; + activation.type = { name: 'ACTIVATION', category: 'Activation' }; + activation.activate_para = new Array(16); + for (let i = 0; i < 16; i++) { + activation.activate_para[i] = reader.uint64_bits({ shift_number: 0, y_mul: 8, x_start: 24, reserved: 60 }); + delete activation.activate_para[i].reserved; + } + for (let i = 0; i < 16; i++) { + activation.activate_para[i].bias = reader.int8(); + } + layer.chain.push(activation); + reader.seek(weights_offset); + layer.inputs.push({ + name: 'weights', + value: [ { + name: '', + datatype: 'uint8', + shape: weights_shape, + data: reader.read(weights_size) + } ] + }); + delete layer.kernel_pool_type_cfg.bwsx_base_addr; + delete layer.kernel_calc_type_cfg.active_addr; + delete layer.kernel_load_cfg.para_start_addr; + }); + register(10241, 'K210_ADD_PADDING', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output', 'kpu') ]; + layer.channels = reader.uint32(); + }); + register(10242, 'K210_REMOVE_PADDING', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.channels = reader.uint32(); + }); + register(10243, 'K210_UPLOAD', '', (layer, reader) => { + layer.flags = reader.uint32(); + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output', 'kpu') ]; + const shape = [ reader.uint32(), reader.uint32(), reader.uint32() ]; + layer.inputs[0].value[0].shape = shape; + layer.outputs[0].value[0].shape = shape; + }); + /* eslint-enable space-in-parens */ + for (const layer of layers) { + const type = types.get(layer.type); + if (!type) { + throw new kmodel.Error(`Unsupported version '${this.version}' layer type '${layer.type}'.`); + } + if (!type.callback) { + throw new kmodel.Error(`Unsupported version '${this.version}' layer '${type.type.name}'.`); + } + layer.type = type.type; + reader.seek(layer.offset); + type.callback(layer, reader); + delete layer.offset; + delete layer.body_size; + } + if (layers.length > 0) { + layers.unshift({ + type: { name: 'input' }, + outputs: [ layers[0].inputs[0] ] + }); + } + for (const output of outputs) { + layers.push({ + type: { name: 'output' }, + inputs: output.address + }); + } + this.modules.push({ + name: '', + layers: layers + }); + break; + } + case 4: { + const reader = new kmodel.BinaryReader.v4(this._stream); + const model_header = { + flags: reader.uint32(), + target: reader.uint32(), // 0=CPU, 1=K210 + constants: reader.uint32(), + main_mem: reader.uint32(), + nodes: reader.uint32(), + inputs: reader.uint32(), + outputs: reader.uint32(), + reserved0: reader.uint32(), + }; + const inputs = new Array(model_header.inputs); + for (let i = 0; i < inputs.length; i++) { + inputs[i] = reader.parameter(`input${i == 0 ? '' : (i + 1)}`); + } + for (let i = 0; i < inputs.length; i++) { + inputs[i].value[0].shape = reader.runtime_shape_t(); + } + const outputs = new Array(model_header.outputs); + for (let i = 0; i < outputs.length; i++) { + outputs[i] = reader.parameter(`output${i == 0 ? '' : (i + 1)}`); + } + reader.constants(model_header.constants); + const layers = new Array(model_header.nodes); + for (let i = 0; i < layers.length; i++) { + layers[i] = { + location: i, + opcode: reader.uint32(), + body_size: reader.uint32() + }; + } + let offset = reader.position; + for (const layer of layers) { + layer.offset = offset; + offset += layer.body_size; + } + /* eslint-disable space-in-parens */ + register( 0x00, 'binary', '', (layer, reader) => { + layer.inputs = [ + reader.parameter('a'), + reader.parameter('b') + ]; + layer.outputs = [ reader.parameter('outputs') ]; + layer.binary_op = reader.binary_op_t(); + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.inputs[1].value[0].shape = reader.runtime_shape_t(); + layer.outputs[0].value[0].shape = reader.runtime_shape_t(); + layer.fused_activation = [ reader.float32(), reader.float32() ]; + }); + register( 0x01, 'concat', 'Tensor', (layer, reader) => { + layer.outputs = [ reader.parameter('output') ]; + layer.inner_size = reader.uint32(); + layer.outer_size = reader.uint32(); + const inputs_count = reader.uint32(); + layer.inputs = [ { name: 'inputs', value: [] } ]; + for (let i = 0; i < inputs_count; i++) { + layer.inputs[0].value[i] = reader.argument(); + } + layer.dims = new Array(inputs_count); + for (let i = 0; i < inputs_count; i++) { + layer.dims[i] = reader.int32(); + } + }); + register( 0x02, 'conv2d', 'Layer', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.groups = reader.int32(); + layer.out_channels = reader.int32(); + layer.padding_h = reader.padding(); + layer.padding_w = reader.padding(); + layer.filter_h = reader.int32(); + layer.filter_w = reader.int32(); + layer.stride_h = reader.int32(); + layer.stride_w = reader.int32(); + layer.dilation_h = reader.int32(); + layer.dilation_w = reader.int32(); + layer.fused_activation = [ reader.float32(), reader.float32() ]; + const weights_shape = [ layer.out_channels, layer.inputs[0].value[0].shape[1] / layer.groups, layer.filter_h, layer.filter_w ]; + const weights_size = 4 * weights_shape.reduce((a, b) => a * b); + layer.inputs.push({ + name: 'weights', + value: [ { + name: '', + datatype: 'float32', + shape: weights_shape, + data: reader.read(weights_size) + } ] + }); + const bias_shape = [ layer.out_channels ]; + const bias_size = 4 * layer.out_channels; + layer.inputs.push({ + name: 'bias', + value: [ { + name: '', + datatype: 'float32', + shape: bias_shape, + data: reader.read(bias_size) + } ] + }); + }); + register( 0x03, 'dequantize', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.zero_point = reader.int32(); + layer.scale = reader.float32(); + }); + register( 0x04, 'matmul', '', (layer, reader) => { + layer.inputs = [ + reader.parameter('a'), + reader.parameter('b'), + ]; + layer.outputs = [ reader.parameter('output') ]; + layer.a_rows = reader.int32(); + layer.a_cols = reader.int32(); + layer.b_cols = reader.int32(); + layer.inputs[1].value[0].shape = [ layer.a_cols, layer.b_cols ]; + layer.fused_activation = [ reader.float32(), reader.float32() ]; + const bias = reader.read(4 * layer.b_cols); + if (!bias.every((value) => value === 0)) { + layer.inputs.push({ + name: 'bias', + value: [ { name: '', datatype: 'float32', shape: [ layer.b_cols ], data: bias } ] + }); + } + }); + register( 0x05, 'pad', 'Shape', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.paddings = reader.runtime_paddings_t(); + layer.pad_value = reader.scalar(); + }); + register( 0x06, 'quantize', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.zero_point = reader.int32(); + layer.scale = reader.float32(); + }); + register( 0x07, 'reduce', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.reduce_op = reader.reduce_op_t(); + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.outputs[0].value[0].shape = reader.runtime_shape_t(); + layer.init_value = reader.float32(); + }); + register( 0x08, 'reduce_window2d', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.reduce_op = reader.reduce_op_t(); + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.padding_h = reader.padding(); + layer.padding_w = reader.padding(); + layer.filter_h = reader.int32(); + layer.filter_w = reader.int32(); + layer.stride_h = reader.int32(); + layer.stride_w = reader.int32(); + layer.dilation_h = reader.int32(); + layer.dilation_w = reader.int32(); + layer.init_value = reader.float32(); + layer.fused_activation = [ reader.float32(), reader.float32() ]; + }); + register( 0x09, 'memory_copy', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + }); + register( 0x0A, 'resize_image', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.reduce_op = reader.reduce_op_t(); + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.out_h = reader.int32(); + layer.out_w = reader.int32(); + layer.mode = reader.image_resize_mode_t(); + layer.align_corners = reader.boolean(); + }); + register( 0x0B, 'softmax', 'Activation'); + register( 0x0C, 'transpose', 'Transform', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.perm = reader.runtime_shape_t(); + }); + register( 0x0D, 'strided_slice', 'Tensor'); + register( 0x0E, 'unary', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.unary_op = reader.unary_op_t(); + }); + register( 0x0F, 'quantized_conv2d', 'Layer', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.groups = reader.int32(); + layer.out_channels = reader.int32(); + layer.padding_h = reader.padding(); + layer.padding_w = reader.padding(); + layer.filter_h = reader.int32(); + layer.filter_w = reader.int32(); + layer.stride_h = reader.int32(); + layer.stride_w = reader.int32(); + layer.dilation_h = reader.int32(); + layer.dilation_w = reader.int32(); + layer.input_offset = reader.int32(); + layer.filter_offset = reader.int32(); + layer.output_mul = reader.int32(); + layer.output_shift = reader.int32(); + layer.output_offset = reader.int32(); + const bias = reader.span('int32', [ layer.out_channels ]); + if (bias) { + layer.inputs.push({ name: 'bias', value: [ bias ] }); + } + const weights = reader.span('uint8', [ layer.out_channels, layer.inputs[0].value[0].shape[1] / layer.groups, layer.filter_h, layer.filter_w]); + if (weights) { + layer.inputs.push({ name: 'weights', value: [ weights ] }); + } + }); + register( 0x10, 'quantized_matmul', '', (layer, reader) => { + layer.inputs = [ + reader.parameter('a'), + reader.parameter('b'), + ]; + layer.outputs = [ reader.parameter('output') ]; + layer.a_rows = reader.int32(); + layer.a_cols = reader.int32(); + layer.b_cols = reader.int32(); + layer.inputs[1].value[0].shape = [ layer.a_cols, layer.b_cols ]; + layer.input_a_offset = reader.int32(); + layer.input_b_offset = reader.int32(); + layer.output_mul = reader.int32(); + layer.output_shift = reader.int32(); + layer.output_offset = reader.int32(); + const bias = reader.span('int32', [ layer.b_cols ]); + if (bias) { + layer.inputs.push({ name: 'bias', value: [ bias ] }); + } + }); + register( 0x11, 'quantized_binary', '', (layer, reader) => { + layer.inputs = [ + reader.parameter('a'), + reader.parameter('b') + ]; + layer.outputs = [ reader.parameter('outputs') ]; + layer.binary_op = reader.binary_op_t(); + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + layer.inputs[1].value[0].shape = reader.runtime_shape_t(); + layer.outputs[0].value[0].shape = reader.runtime_shape_t(); + layer.input_a_offset = reader.int32(); + layer.input_a_mul = reader.int32(); + layer.input_a_shift = reader.int32(); + layer.input_b_offset = reader.int32(); + layer.input_b_mul = reader.int32(); + layer.input_b_shift = reader.int32(); + layer.output_offset = reader.int32(); + layer.output_mul = reader.int32(); + layer.output_shift = reader.int32(); + }); + register( 0x12, 'table_lookup1d', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input'), reader.parameter('table') ]; + layer.outputs = [ reader.parameter('output') ]; + }); + register( 0x13, 'conv2d_transpose', 'Layer'); + register( 0x14, 'nnil_unary_method', '', (layer, reader, size) => { + const position = reader.position; + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.body = reader.read(size - (reader.position - position)); + }); + register(0x1001, 'cpu_conv2d', 'Layer'); + register(0x1002, 'cpu_depthwise_conv2d', 'Layer'); + register(0x1003, 'cpu_reduce_window2d'); + register(0x1004, 'cpu_quantized_conv2d', 'Layer'); + register(0x1005, 'cpu_quantized_depthwise_conv2d', 'Layer'); + register(0x2001, 'kpu_upload', '', (layer, reader) => { + layer.inputs = [ reader.parameter('input') ]; + layer.outputs = [ reader.parameter('output') ]; + layer.inputs[0].value[0].shape = reader.runtime_shape_t(); + }); + register(0x2002, 'kpu_conv2d', 'Layer', (layer, reader) => { + layer.outputs = [ reader.parameter('output') ]; + layer.batches = reader.int32(); + layer.reserved0 = reader.int32(); + layer.interrupt_enabe = reader.uint64_bits({ int_en: 0, ram_flag: 1, full_add: 2, depth_wise_layer: 3 }); + const image_src_addr = reader.uint32(); + const image_dst_addr = reader.uint32(); + layer.inputs = [ { name: 'input', value: [ { name: `kpu:${image_src_addr}` } ] } ]; + const outputs = [ { name: 'output', value: [ { name: `kpu:${image_dst_addr}` } ] } ]; + layer.outputs[0].value.push(outputs[0].value[0]); + // layer.outputs = layer.flags & 1 ? layer.outputs : outputs; + layer.image_channel_num = reader.uint64_bits({ i_ch_num: 0, o_ch_num: 32, o_ch_num_coef: 48 }); + layer.image_size = reader.uint64_bits({ i_row_wid: 0, i_col_high: 10, o_row_wid: 32, o_col_high : 42 }); + layer.kernel_pool_type_cfg = reader.uint64_bits({ kernel_type: 0, pad_type: 3, pool_type: 4, first_stride: 8, bypass_conv: 9, load_para: 10, dma_burst_size: 16, pad_value: 24, bwsx_base_addr: 32 }); + layer.kernel_load_cfg = reader.uint64_bits({ load_coor: 0, load_time: 1, para_size: 15, para_start_addr: 32 }); + layer.kernel_offset = reader.uint64_bits({ coef_column_offset: 0, coef_row_offset: 4 }); + layer.kernel_calc_type_cfg = reader.uint64_bits({ channel_switch_addr: 0, row_switch_addr: 16, coef_size: 20, coef_group: 28, load_act: 31, active_addr: 32 }); + layer.write_back_cfg = reader.uint64_bits({ wb_channel_switch_addr: 0, wb_row_switch_addr: 16, wb_group: 20 }); + layer.conv_value = reader.uint64_bits({ shr_w: 0, shr_x: 4, arg_w: 8, arg_x: 32 }); + layer.conv_value2 = reader.uint64_bits({ arg_add: 0 }); + layer.dma_parameter = reader.uint64_bits({ send_data_out: 0, reserved: 1, channel_byte_num: 16, dma_total_byte: 32 }); + layer.chain = []; + const ic = layer.image_channel_num.i_ch_num + 1; + const oc = layer.image_channel_num.o_ch_num + 1; + layer.outputs[0].value[0].shape = [ layer.image_size.o_row_wid + 1, layer.image_size.o_col_high + 1, oc ]; + const filter = [ 1, 3 ][layer.kernel_pool_type_cfg.kernel_type]; + const weights_shape = layer.interrupt_enabe.depth_wise_layer ? [ oc, filter, filter ] : [ ic, oc, filter, filter ]; + reader.skip(layer.kernel_pool_type_cfg.bwsx_base_addr); + delete layer.kernel_pool_type_cfg.bwsx_base_addr; + const batch_norm = { + type: { name: 'batch_norm', category: 'Normalization' }, + weights: [] + }; + batch_norm.weights = new Array(oc); + for (let i = 0; i < oc; i++) { + batch_norm.weights[i] = reader.uint64_bits({ norm_mul: 0, norm_add: 24, norm_shift: 56, reserved: 60 }); + delete batch_norm.weights[i].reserved; + } + layer.chain.push(batch_norm); + reader.skip(layer.kernel_calc_type_cfg.active_addr); + delete layer.kernel_calc_type_cfg.active_addr; + const activation = reader.kpu_activate_table_t(); + activation.type = { name: 'activation', category: 'Activation' }; + layer.chain.push(activation); + reader.skip(layer.kernel_load_cfg.para_start_addr); + delete layer.kernel_load_cfg.para_start_addr; + const weights = reader.span('uint8', weights_shape); + if (weights) { + layer.inputs.push({ name: 'weights', value: [ weights ] }); + } + }); + /* eslint-enable space-in-parens */ + for (const layer of layers) { + const type = types.get(layer.opcode); + if (!type) { + throw new kmodel.Error(`Unsupported version '${this.version}' layer type '${layer.type}'.`); + } + if (!type.callback) { + throw new kmodel.Error(`Unsupported version '${this.version}' layer '${type.type.name}'.`); + } + layer.type = type.type; + reader.seek(layer.offset); + if (type.callback) { + type.callback(layer, reader, layer.body_size); + } + delete layer.offset; + delete layer.body_size; + delete layer.opcode; + } + for (const input of inputs) { + layers.unshift({ + type: { name: 'INPUT' }, + outputs: [ input ] + }); + } + for (const output of outputs) { + layers.push({ + type: { name: 'OUTPUT' }, + inputs: [ output ] + }); + } + this.modules.push({ + name: '', + layers: layers + }); + break; + } + case 5: { + const reader = new kmodel.BinaryReader.v5(this._stream); + const model_header = reader.model_header(); + if (model_header.header_size < 32) { + throw new kmodel.Error(`Invalid header size '${model_header.header_size}'.`); + } + if (model_header.header_size > reader.position) { + reader.skip(model_header.header_size - reader.position); + } + delete model_header.header_size; + this.modules = new Array(model_header.modules); + for (let i = 0; i < this.modules.length; i++) { + const start = reader.position; + const module_header = reader.module_header(); + if (module_header.header_size > (reader.position - start)) { + reader.skip(module_header.header_size - (reader.position - start)); + } + const mempools = new Array(module_header.mempools); + for (let i = 0; i < mempools.length; i++) { + mempools[i] = reader.mempool_desc(); + } + const shared_mempools = new Array(module_header.shared_mempools); + for (let i = 0; i < shared_mempools.length; i++) { + shared_mempools[i] = reader.mempool_desc(); + } + const function_headers = new Array(module_header.functions); + const functions = new Array(module_header.functions); + for (let i = 0; i < functions.length; i++) { + const position = reader.position; + const function_header = reader.function_header(); + const header_size = reader.position - position; + if (function_header.header_size > header_size) { + reader.skip(function_header.header_size - header_size); + } + const inputs = new Array(function_header.inputs); + for (let i = 0; i < inputs.length; i++) { + inputs[i] = reader.parameter(`input${i == 0 ? '' : (i + 1)}`); + } + for (let i = 0; i < inputs.length; i++) { + inputs[i].value[0].shape = reader.shape(); + } + const outputs = new Array(function_header.outputs); + for (let i = 0; i < outputs.length; i++) { + outputs[i] = reader.parameter(`output${i == 0 ? '' : (i + 1)}`); + } + for (let i = 0; i < outputs.length; i++) { + outputs[i].value[0].shape = reader.shape(); + } + reader.align_position(8); + const size = reader.size - position; + if (function_header.size > size) { + reader.skip(function_header.size - size); + } + function_headers[i] = function_header; + functions[i] = { + type: { name: 'Unknown' }, + inputs: inputs, + outputs: outputs + }; + } + const sections = new Map(); + for (let i = 0; i < module_header.sections; i++) { + const section_header = reader.section_header(); + reader.skip(section_header.body_start); + const body = reader.read(section_header.body_size); + const section = { + reader: new base.BinaryReader(body), + flags: section_header.flags + }; + reader.align_position(8); + sections.set(section_header.name, section); + } + for (let i = 0; i < function_headers.length; i++) { + const function_header = function_headers[i]; + const reader = sections.get('.text').reader; + reader.seek(function_header.entrypoint); + function_header.text = reader.read(function_header.text_size); + const layer = functions[i]; + switch (module_header.type) { + case 'stackvm': + layer.type = { name: 'stackvm' }; + break; + case 'k210': + break; + case 'k510': + break; + default: + throw new kmodel.Error(`Unsupported module type '${module_header.type}'.`); + } + } + const name = this.modules.length > 1 ? i.toString() : ''; + this.modules[i] = { + name: name, + type: module_header.type, + layers: functions + }; + } + break; + } + default: { + throw new kmodel.Error(`Unsupported model version '${this.version}'.`); + } + } + delete this._stream; + } + } +}; + +kmodel.BinaryReader = class extends base.BinaryReader { + + uint64_bits(fields) { + const buffer = this.read(8); + fields = Object.entries(fields); + fields.push([ null, Math.min(64, fields[fields.length - 1][1] + 56)]); + const obj = {}; + for (let i = 0; i < fields.length - 1; i++) { + const current = fields[i]; + const next = fields[i + 1]; + const [key, start] = current; + const [, end] = next; + let value = 0; + let position = start; + while (position < end) { + const offset = (position / 8) >> 0; + const start = (position & 7); + const count = Math.min((offset + 1) * 8, end) - position; + value = value | ((buffer[offset] >>> start) & ((1 << count) - 1)) << (position - fields[i][1]); + position += count; + } + obj[key] = value; + } + return obj; + } +}; + +kmodel.BinaryReader.v3 = class extends kmodel.BinaryReader { + + constructor(buffer) { + super(buffer); + this.skip(4); + } + + kpu_model_header_t() { + return { + flags: this.uint32(), + arch: this.uint32(), + layers_length: this.uint32(), + max_start_address: this.uint32(), + main_mem_usage: this.uint32(), + output_count: this.uint32() + }; + } + + kpu_model_output_t(name) { + return { + address: [ this.parameter(name) ], + size: this.uint32() + }; + } + + kpu_model_layer_header_t() { + return { + type: this.uint32(), + body_size: this.uint32() + }; + } + + argument(memory_type) { + memory_type = memory_type || 'main'; + const address = this.uint32(); + return { name: `${memory_type}:${address}` }; + } + + parameter(name, memory_type) { + return { name: name, value: [ this.argument(memory_type) ] }; + } +}; + +kmodel.BinaryReader.v4 = class extends kmodel.BinaryReader { + + constructor(buffer) { + super(buffer); + this.skip(8); + this._memory_types = [ 'const', 'main', 'kpu' ]; + this._datatypes = [ 'float32', 'uint8' ]; + } + + memory_type_t() { + const value = this.uint32(); + return this._memory_types[value]; + } + + datatype_t() { + const value = this.uint32(); + return this._datatypes[value]; + } + + memory_range() { + return { + memory_type: this.memory_type_t(), + datatype: this.datatype_t(), + start: this.uint32(), + size: this.uint32() + }; + } + + argument() { + const memory = this.memory_range(); + const value = { + name: `${memory.memory_type}:${memory.start}`, + datatype: memory.datatype + }; + if (memory.memory_type === 'const') { + value.data = this._constants.slice(memory.start, memory.start + memory.size); + switch (value.datatype) { + case 'uint8': value.shape = [ value.data.length ]; break; + case 'float32': value.shape = [ value.data.length >> 2 ]; break; + default: break; + } + } + return value; + } + + parameter(name) { + return { name: name, value: [ this.argument() ] }; + } + + runtime_shape_t() { + return [ this.uint32(), this.uint32(), this.uint32(), this.uint32() ]; + } + + padding() { + return { before: this.int32(), after: this.int32() }; + } + + runtime_paddings_t() { + return [ this.padding(), this.padding(), this.padding(), this.padding() ]; + } + + scalar() { + return { + datatype_t: this.uint32(), + storage: this.read(4) + }; + } + + kpu_activate_table_t() { + const value = {}; + value.activate_para = new Array(16); + for (let i = 0; i < 16; i++) { + value.activate_para[i] = this.uint64_bits({ shift_number: 0, y_mul: 8, x_start: 24, reserved: 60 }); + delete value.activate_para[i].reserved; + } + for (let i = 0; i < 16; i++) { + value.activate_para[i].bias = this.int8(); + } + return value; + } + + unary_op_t() { + const value = this.uint32(); + return [ 'abs', 'ceil', 'cos', 'exp', 'floor', 'log', 'neg', 'rsqrt', 'sin', 'square' ][value]; + } + + binary_op_t() { + const value = this.uint32(); + return [ 'add', 'sub', 'mul', 'div', 'min', 'max' ][value]; + } + + reduce_op_t() { + const value = this.uint32(); + return [ 'mean', 'min', 'max', 'sum' ][value]; + } + + image_resize_mode_t() { + const value = this.uint32(); + return [ 'bilinear', 'nearest_neighbor' ][value]; + } + + constants(size) { + this._constants = this.read(size); + } + + span(datatype, shape) { + const size = shape.reduce((a, b) => a * b, 1); + const itemsize = { 'int32': 4, 'uint8': 1 }; + const buffer = this.read(itemsize[datatype] * size); + if (!buffer.every((value) => value === 0)) { + const array = {}; + array.name = ''; + array.datatype = datatype; + array.shape = shape; + array.data = buffer; + return array; + } + return null; + } +}; + +kmodel.BinaryReader.v5 = class extends kmodel.BinaryReader { + + constructor(buffer) { + super(buffer); + this.skip(8); + this._datatypes = [ 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'float32', 'float64', 'bfloat16' ]; + this._memory_locations = new Map([ [ 0, 'input' ], [ 1, 'output' ], [ 2, 'rdata' ], [ 3, 'data' ], [ 4, 'shared_data' ], [ 64, 'kpu' ] ]); + } + + model_header() { + return { + header_size: this.uint32(), + flags: this.uint32(), + alignment: this.uint32(), + modules: this.uint32(), + entry_module: this.uint32(), + entry_function: this.uint32() + }; + } + + module_type_t() { + const buffer = this.read(16); + const decoder = new TextDecoder('ascii'); + const text = decoder.decode(buffer); + return text.replace(/\0.*$/, ''); + } + + module_header() { + return { + type: this.module_type_t(), + version: this.uint32(), + header_size: this.uint32(), + size: this.uint32(), + mempools: this.uint32(), + shared_mempools: this.uint32(), + sections: this.uint32(), + functions: this.uint32(), + reserved0: this.uint32() + }; + } + + mempool_desc() { + return { + location: this.byte(), + reserved0: this.read(3), + size: this.uint32() + }; + } + + section_header() { + const buffer = this.read(16); + const decoder = new TextDecoder('ascii'); + const name = decoder.decode(buffer); + return { + name: name.replace(/\0.*$/, ''), + flags: this.uint32(), + body_start: this.uint32(), + body_size: this.uint32(), + reserved0: this.uint32() + }; + } + + function_header() { + return { + header_size: this.uint32(), + size: this.uint32(), + input_pool_size: this.uint32(), + output_pool_size: this.uint32(), + inputs: this.uint32(), + outputs: this.uint32(), + entrypoint: this.uint32(), + text_size: this.uint32() + }; + } + + memory_location_t() { + const value = this.byte(); + if (!this._memory_locations.has(value)) { + throw new kmodel.Error(`Unsupported memory location '${value}'.`); + } + return this._memory_locations.get(value); + } + + datatype_t() { + const value = this.byte(); + return this._datatypes[value]; + } + + memory_range() { + return { + memory_location: this.memory_location_t(), + datatype: this.datatype_t(), + shared_module: this.uint16(), + start: this.uint32(), + size: this.uint32() + }; + } + + argument() { + const memory = this.memory_range(); + const value = { + name: `${memory.memory_location}:${memory.start}`, + datatype: memory.datatype + }; + /* + if (memory.memory_type === 'const') { + value.data = constants.slice(memory.start, memory.start + memory.size); + switch (value.datatype) { + case 'uint8': value.shape = [ value.data.length ]; break; + case 'float32': value.shape = [ value.data.length >> 2 ]; break; + default: break; + } + } + */ + return value; + } + + parameter(name) { + return { name: name, value: [ this.argument() ] }; + } + + shape() { + const array = new Array(this.uint32()); + for (let i = 0; i < array.length; i++) { + array[i] = this.uint32(); + } + return array; + } + + align_position(alignment) { + const remainder = this._position % alignment; + if (remainder !== 0) { + this.skip(alignment - remainder); + } + } +}; + +kmodel.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading kmodel.'; + } +}; + +export const ModelFactory = kmodel.ModelFactory; diff --git a/lasagne-metadata.json b/lasagne-metadata.json new file mode 100644 index 00000000000..b3f40a4cea6 --- /dev/null +++ b/lasagne-metadata.json @@ -0,0 +1,14 @@ +[ + { + "name": "lasagne.layers.conv.Conv2DLayer", + "category": "Layer" + }, + { + "name": "lasagne.layers.pool.MaxPool2DLayer", + "category": "Pool" + }, + { + "name": "lasagne.layers.dense.DenseLayer", + "category": "Layer" + } +] \ No newline at end of file diff --git a/lasagne.js b/lasagne.js new file mode 100644 index 00000000000..94d1ab99c87 --- /dev/null +++ b/lasagne.js @@ -0,0 +1,197 @@ + +// Experimental + +const lasagne = {}; + +lasagne.ModelFactory = class { + + match(context) { + const obj = context.peek('pkl'); + if (obj && obj.__class__ && obj.__class__.__module__ === 'nolearn.lasagne.base' && obj.__class__.__name__ == 'NeuralNet') { + return obj; + } + return null; + } + + async open(context, target) { + const metadata = await context.metadata('lasagne-metadata.json'); + return new lasagne.Model(metadata, target); + } +}; + +lasagne.Model = class { + + constructor(metadata, model) { + this.format = 'Lasagne'; + this.graphs = [ new lasagne.Graph(metadata, model) ]; + } +}; + +lasagne.Graph = class { + + constructor(metadata, model) { + this.nodes = []; + this.inputs = []; + this.outputs = []; + const values = new Map(); + values.map = (name, type, tensor) => { + if (!values.has(name)) { + values.set(name, new lasagne.Value(name, type, tensor)); + } else if (tensor) { + throw new lasagne.Error(`Duplicate value '${name}'.`); + } else if (type && !type.equals(values.get(name).type)) { + throw new lasagne.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + for (const [name] of model.layers) { + const layer = model.layers_[name]; + if (layer.input_layer && layer.input_layer.name) { + const input_layer = layer.input_layer; + const dataType = input_layer.input_var ? input_layer.input_var.type.dtype : '?'; + const shape = layer.input_shape ? new lasagne.TensorShape(layer.input_shape) : null; + const type = shape ? new lasagne.TensorType(dataType, shape) : null; + values.map(input_layer.name, type); + } + } + for (const [name] of model.layers) { + const layer = model.layers_[name]; + if (layer && layer.__class__ && layer.__class__.__module__ === 'lasagne.layers.input' && layer.__class__.__name__ === 'InputLayer') { + const shape = new lasagne.TensorShape(layer.shape); + const type = new lasagne.TensorType(layer.input_var.type.dtype, shape); + const argument = new lasagne.Argument(layer.name, [ values.map(layer.name, type) ]); + this.inputs.push(argument); + continue; + } + this.nodes.push(new lasagne.Node(metadata, layer, values)); + } + if (model._output_layer) { + const output_layer = model._output_layer; + this.outputs.push(new lasagne.Argument(output_layer.name, [ values.map(output_layer.name) ])); + } + } +}; + +lasagne.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +lasagne.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new lasagne.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name= name; + this.type = type ? type : initializer ? initializer.type : null; + this.initializer = initializer; + } +}; + +lasagne.Node = class { + + constructor(metadata, layer, values) { + this.name = layer.name || ''; + const type = layer.__class__ ? `${layer.__class__.__module__}.${layer.__class__.__name__}` : ''; + this.type = metadata.type(type) || { name: type }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + const params = new Map(); + for (const [key, value] of Object.entries(layer)) { + if (key === 'name' || key === 'params' || key === 'input_layer' || key === 'input_shape') { + continue; + } + if (value && value.__class__ && value.__class__.__module__ === 'theano.tensor.sharedvar' && value.__class__.__name__ === 'TensorSharedVariable') { + params.set(value.name, key); + continue; + } + const attribute = new lasagne.Attribute(null, key, value); + this.attributes.push(attribute); + } + if (layer.input_layer && layer.input_layer.name) { + const value = values.map(layer.input_layer.name); + const argument = new lasagne.Argument('input', [ value ]); + this.inputs.push(argument); + } + if (layer.params) { + for (const [param] of layer.params) { + const param_key = params.get(param.name); + if (param_key) { + const initializer = new lasagne.Tensor(param.container.storage[0]); + const argument = new lasagne.Argument(param_key, [ values.map(param.name, null, initializer) ]); + this.inputs.push(argument); + } + } + } + this.outputs.push(new lasagne.Argument('output', [ values.map(this.name) ])); + } +}; + +lasagne.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + if (value && value.__class__) { + this.type = `${value.__class__.__module__}.${value.__class__.__name__}`; + } + } +}; + +lasagne.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +lasagne.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && Array.isArray(this.dimensions) && + this.dimensions.length === obj.dimensions.length && + obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`; + } + return ''; + } +}; + +lasagne.Tensor = class { + + constructor(storage) { + this.type = new lasagne.TensorType(storage.dtype.__name__, new lasagne.TensorShape(storage.shape)); + } +}; + +lasagne.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Lasagne Error'; + } +}; + +export const ModelFactory = lasagne.ModelFactory; diff --git a/lightgbm.js b/lightgbm.js new file mode 100644 index 00000000000..4468c652332 --- /dev/null +++ b/lightgbm.js @@ -0,0 +1,159 @@ + +import * as python from './python.js'; + +const lightgbm = {}; + +lightgbm.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0x74, 0x72, 0x65, 0x65, 0x0A ]; + if (stream && stream.length >= signature.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return { name: 'lightgbm.text', value: stream }; + } + const obj = context.peek('pkl'); + if (obj && obj.__class__ && obj.__class__.__module__ && obj.__class__.__module__.startsWith('lightgbm.')) { + return { name: 'lightgbm.pickle', value: obj }; + } + return null; + } + + async open(context, target) { + switch (target.name) { + case 'lightgbm.pickle': { + const obj = target.value; + return new lightgbm.Model(obj, 'LightGBM Pickle'); + } + case 'lightgbm.text': { + const stream = target.value; + const buffer = stream.peek(); + const decoder = new TextDecoder('utf-8'); + const model_str = decoder.decode(buffer); + const execution = new python.Execution(); + const obj = execution.invoke('lightgbm.basic.Booster', []); + obj.LoadModelFromString(model_str); + return new lightgbm.Model(obj, 'LightGBM'); + } + default: { + throw new lightgbm.Error(`Unsupported LightGBM format '${target}'.`); + } + } + } +}; + +lightgbm.Model = class { + + constructor(obj, format) { + this.format = format + (obj && obj.version ? ` ${obj.version}` : ''); + this.graphs = [ new lightgbm.Graph(obj) ]; + } +}; + +lightgbm.Graph = class { + + constructor(model) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = []; + const feature_names = model.feature_names || []; + for (let i = 0; i < feature_names.length; i++) { + const name = feature_names[i]; + // const info = model.feature_infos && i < model.feature_infos.length ? model.feature_infos[i] : null; + const value = new lightgbm.Value(name); + values.push(value); + if (feature_names.length < 1000) { + const argument = new lightgbm.Argument(name, [ value ]); + this.inputs.push(argument); + } + } + const node = new lightgbm.Node(model, values); + this.nodes.push(node); + } +}; + +lightgbm.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +lightgbm.Value = class { + + constructor(name) { + if (typeof name !== 'string') { + throw new lightgbm.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + } +}; + +lightgbm.Node = class { + + constructor(obj, values, stack) { + const type = obj && obj.__class__ ? `${obj.__class__.__module__}.${obj.__class__.__name__}` : 'builtins.object'; + this.name = ''; + this.type = { name: type }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + if (values) { + const argument = new lightgbm.Argument('features', values); + this.inputs.push(argument); + } + const isObject = (obj) => { + if (obj && typeof obj === 'object') { + const proto = Object.getPrototypeOf(obj); + return proto === Object.prototype || proto === null; + } + return false; + }; + stack = stack || new Set(); + const entries = Object.entries(obj).filter(([key, value]) => value !== undefined && key !== 'feature_names' && key !== 'feature_infos'); + for (const [key, value] of entries) { + if (Array.isArray(value) && value.every((obj) => isObject(obj))) { + const values = value.filter((obj) => !stack.has(obj)); + const nodes = values.map((obj) => { + stack.add(obj); + const node = new lightgbm.Node(obj, null, stack); + stack.delete(obj); + return node; + }); + const attribute = new lightgbm.Attribute('object[]', key, nodes); + this.attributes.push(attribute); + continue; + } else if (isObject(value) && !stack.has(value)) { + stack.add(obj); + const node = new lightgbm.Node(obj, null, stack); + stack.delete(obj); + const attribute = new lightgbm.Attribute('object', key, node); + this.attributes.push(attribute); + } else { + const attribute = new lightgbm.Attribute(null, key, value); + this.attributes.push(attribute); + } + } + } +}; + +lightgbm.Attribute = class { + + constructor(type, name, value) { + this.type = type; + this.name = name; + this.value = value; + } +}; + +lightgbm.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading LightGBM model.'; + } +}; + +export const ModelFactory = lightgbm.ModelFactory; + diff --git a/mediapipe.js b/mediapipe.js new file mode 100644 index 00000000000..d1c71325432 --- /dev/null +++ b/mediapipe.js @@ -0,0 +1,280 @@ + +import * as protobuf from './protobuf.js'; + +const mediapipe = {}; + +mediapipe.ModelFactory = class { + + match(context) { + const tags = context.tags('pbtxt'); + if (tags.has('node') && ['input_stream', 'output_stream', 'input_side_packet', 'output_side_packet'].some((key) => tags.has(key) || tags.has(`node.${key}`))) { + return 'mediapipe.pbtxt'; + } + return null; + } + + async open(context) { + // await context.require('./mediapipe-proto'); + mediapipe.proto = protobuf.get('mediapipe'); + let config = null; + try { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + // const config = mediapipe.proto.mediapipe.CalculatorGraphConfig.decodeText(reader); + config = new mediapipe.Object(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mediapipe.Error(`File text format is not mediapipe.CalculatorGraphConfig (${message.replace(/\.$/, '')}).`); + } + return new mediapipe.Model(config); + } +}; + +mediapipe.Model = class { + + constructor(config) { + this.format = 'MediaPipe'; + this.graphs = [ new mediapipe.Graph(config) ]; + } +}; + +mediapipe.Graph = class { + + constructor(config) { + config = config || {}; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const types = new Map(); + const type = (list) => { + list = list ? Array.isArray(list) ? list : [ list ] : []; + return list.map((item) => { + const parts = item.split(':'); + const name = parts.pop(); + const type = parts.join(':'); + if (!types.has(name)) { + const value = new Set(); + if (type) { + value.add(type); + } + types.set(name, value); + } else if (type && !types.get(name).has(type)) { + types.get(name).add(type); + } + return name; + }); + }; + config.input_stream = type(config.input_stream); + config.output_stream = type(config.output_stream); + config.input_side_packet = type(config.input_side_packet); + config.output_side_packet = type(config.output_side_packet); + config.node = config.node ? Array.isArray(config.node) ? config.node : [ config.node ] : []; + for (const node of config.node) { + node.input_stream = type(node.input_stream); + node.output_stream = type(node.output_stream); + node.input_side_packet = type(node.input_side_packet); + node.output_side_packet = type(node.output_side_packet); + } + const values = new Map(); + for (const [name, value] of types) { + const type = Array.from(value).join(','); + values.set(name, new mediapipe.Value(name, type || null)); + } + const value = (name) => { + return values.get(name); + }; + for (const name of config.input_stream) { + const argument = new mediapipe.Argument(name, [ value(name) ]); + this.inputs.push(argument); + } + for (const name of config.output_stream) { + const argument = new mediapipe.Argument(name, [ value(name) ]); + this.outputs.push(argument); + } + for (const name of config.input_side_packet) { + const argument = new mediapipe.Argument(name, [ value(name, type) ]); + this.inputs.push(argument); + } + for (const output of config.output_side_packet) { + const parts = output.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + const argument = new mediapipe.Argument(name, [ value(name, type) ]); + this.outputs.push(argument); + } + for (const node of config.node) { + this.nodes.push(new mediapipe.Node(node, value)); + } + } +}; + +mediapipe.Node = class { + + constructor(node, value) { + const type = node.calculator || '?'; + this.name = ''; + this.type = { name: type.replace(/Calculator$/, '') }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + if (node.input_stream) { + const values = node.input_stream.map((name) => value(name)); + const argument = new mediapipe.Argument('input_stream', values); + this.inputs.push(argument); + } + if (node.output_stream) { + const values = node.output_stream.map((name) => value(name)); + this.outputs.push(new mediapipe.Argument('output_stream', values)); + } + if (node.input_side_packet) { + const values = node.input_side_packet.map((name) => value(name)); + this.inputs.push(new mediapipe.Argument('output_stream', values)); + } + if (node.output_side_packet) { + const values = node.output_side_packet.map((name) => value(name)); + this.outputs.push(new mediapipe.Argument('output_side_packet', values)); + } + const options = new Map(); + if (node.options) { + for (const key of Object.keys(node.options)) { + options.set(key, node.options[key]); + } + } + const node_options = node.node_options ? Array.isArray(node.node_options) ? node.node_options : [ node.node_options ] : []; + if (mediapipe.proto.google && node_options.every((options) => options instanceof mediapipe.proto.google.protobuf.Any)) { + for (const entry of node_options) { + const value = new RegExp(/^\{(.*)\}\s*$/, 's').exec(entry.value); + const buffer = new TextEncoder('utf-8').encode(value[1]); + const reader = protobuf.TextReader.open(buffer); + if (entry.type_url.startsWith('type.googleapis.com/mediapipe.')) { + const type = entry.type_url.split('.').pop(); + if (mediapipe.proto && mediapipe.proto.mediapipe && mediapipe.proto.mediapipe[type]) { + const message = mediapipe.proto.mediapipe[type].decodeText(reader); + for (const key of Object.keys(message)) { + options.set(key, message[key]); + } + continue; + } + } + const message = new mediapipe.Object(reader); + for (const [name, value] of Object.entries(message)) { + options.set(name, value); + } + } + } else { + for (const option of node_options) { + for (const [name, value] of Object.entries(option)) { + if (name !== '__type__') { + options.set(name, value); + } + } + } + } + for (const [name, value] of options) { + const attribute = new mediapipe.Argument(name, value); + this.attributes.push(attribute); + } + } +}; + +mediapipe.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +mediapipe.Value = class { + + constructor(name, type) { + if (typeof name !== 'string') { + throw new mediapipe.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type || null; + } +}; + +mediapipe.Object = class { + + constructor(reader, block) { + if (!block) { + reader.start(); + } + const type = reader.token(); + if (type.startsWith('[') && type.endsWith(']')) { + this.__type__ = type.substring(1, type.length - 1); + reader.next(); + reader.match(':'); + reader.start(); + } + const arrayTags = new Set(); + while (!reader.end()) { + const tag = reader.tag(); + const next = reader.token(); + let obj = null; + if (next === '{') { + reader.start(); + obj = new mediapipe.Object(reader, true); + if (obj.__type__) { + while (!reader.end()) { + if (!Array.isArray(obj)) { + obj = [ obj ]; + } + const token = reader.token(); + if (token.startsWith('[') && token.endsWith(']')) { + obj.push(new mediapipe.Object(reader, true)); + continue; + } + break; + } + } + } else if (next.startsWith('"') && next.endsWith('"')) { + obj = next.substring(1, next.length - 1); + reader.next(); + } else if (next === 'true' || next === 'false') { + obj = next; + reader.next(); + } else if (reader.first()) { + obj = []; + while (!reader.last()) { + const data = reader.token(); + reader.next(); + if (!isNaN(data)) { + obj.push(parseFloat(data)); + } + } + } else if (!isNaN(next)) { + obj = parseFloat(next); + reader.next(); + } else { + obj = next; + reader.next(); + } + if (this[tag] && (!Array.isArray(this[tag]) || arrayTags.has(tag))) { + this[tag] = [ this[tag] ]; + arrayTags.delete(tag); + } + if (this[tag]) { + this[tag].push(obj); + } else { + if (Array.isArray(obj)) { + arrayTags.add(tag); + } + this[tag] = obj; + } + reader.match(','); + } + } +}; + +mediapipe.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MediaPipe model.'; + } +}; + +export const ModelFactory = mediapipe.ModelFactory; diff --git a/megengine-metadata.json b/megengine-metadata.json new file mode 100644 index 00000000000..3b05ddee89b --- /dev/null +++ b/megengine-metadata.json @@ -0,0 +1,3534 @@ +[ + { + "name": "megengine.distributed.functional.all_gather", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" }, + { "name": "axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.all_reduce_max", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.all_reduce_min", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.all_reduce_sum", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.all_to_all", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" }, + { "name": "split_axis" }, + { "name": "concat_axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.broadcast", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.collective_comm", + "attributes": [ + { "name": "inp" }, + { "name": "mode" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.gather", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" }, + { "name": "axis" } + ], + "category": "Transform" + }, + { + "name": "megengine.distributed.functional.reduce_scatter_sum", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" }, + { "name": "axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.reduce_sum", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.remote_recv", + "attributes": [ + { "name": "src_rank" }, + { "name": "device" }, + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.remote_send", + "attributes": [ + { "name": "inp" }, + { "name": "dest_rank" } + ], + "category": "Layer" + }, + { + "name": "megengine.distributed.functional.scatter", + "attributes": [ + { "name": "inp" }, + { "name": "group" }, + { "name": "device" }, + { "name": "axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.elemwise.abs", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.add", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.acos", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.asin", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.atan", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.atan2", + "attributes": [ + { "name": "y" }, + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.asinh", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.acosh", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.atanh", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.ceil", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.clip", + "attributes": [ + { "name": "x" }, + { "name": "lower" }, + { "name": "upper" } + ] + }, + { + "name": "megengine.functional.elemwise.cos", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.cosh", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.div", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.equal", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.exp", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.expm1", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.floor", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.floor_div", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.greater", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.greater_equal", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.left_shift", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.less", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.less_equal", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.log", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.log1p", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.logical_and", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.logical_not", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.logical_or", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.logical_xor", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.logaddexp", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.maximum", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.minimum", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.mod", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.mul", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.neg", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.not_equal", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.pow", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.right_shift", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.round", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.sin", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.sinh", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.sqrt", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.square", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.sub", + "attributes": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "megengine.functional.elemwise.tan", + "attributes": [ + { "name": "x" } + ] + }, + { + "name": "megengine.functional.elemwise.tanh", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.loss.l1_loss", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.loss.square_loss", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.loss.cross_entropy", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.loss.binary_cross_entropy", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.loss.hinge_loss", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.loss.ctc_loss", + "attributes": [ + { "name": "pred" }, + { "name": "pred_lengths" }, + { "name": "label" }, + { "name": "label_lengths" }, + { "name": "blank" }, + { "name": "reduction" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.argmax", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.argmin", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.argsort", + "attributes": [ + { "name": "inp" }, + { "name": "descending" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.dot", + "attributes": [ + { "name": "inp1" }, + { "name": "inp2" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.isinf", + "attributes": [ + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.isnan", + "attributes": [ + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.matinv", + "attributes": [ + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.matmul", + "attributes": [ + { "name": "inp1" }, + { "name": "inp2" }, + { "name": "transpose_a" }, + { "name": "transpose_b" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.max", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.mean", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.min", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.norm", + "attributes": [ + { "name": "inp" }, + { "name": "ord" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.normalize", + "attributes": [ + { "name": "inp" }, + { "name": "ord" }, + { "name": "axis" }, + { "name": "eps" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.prod", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.sign", + "attributes": [ + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.sort", + "attributes": [ + { "name": "inp" }, + { "name": "descending" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.std", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.sum", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.svd", + "attributes": [ + { "name": "inp" }, + { "name": "full_matrices" }, + { "name": "compute_uv" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.math.topk", + "attributes": [], + "category": "Layer", + "varargs": "args" + }, + { + "name": "megengine.functional.math.var", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.metric.topk_accuracy", + "attributes": [ + { "name": "logits" }, + { "name": "target" }, + { "name": "topk" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.adaptive_avg_pool2d", + "attributes": [ + { "name": "inp" }, + { "name": "oshp" } + ], + "category": "Pool" + }, + { + "name": "megengine.functional.nn.adaptive_max_pool2d", + "attributes": [ + { "name": "inp" }, + { "name": "oshp" } + ], + "category": "Pool" + }, + { + "name": "megengine.functional.nn.avg_pool2d", + "attributes": [ + { "name": "inp" }, + { "name": "kernel_size" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "mode" } + ], + "category": "Pool" + }, + { + "name": "megengine.functional.nn.batch_norm", + "attributes": [ + { "name": "inp" }, + { "name": "running_mean" }, + { "name": "running_var" }, + { "name": "weight" }, + { "name": "bias" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.conv1d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.conv2d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.conv3d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.conv_transpose2d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.conv_transpose3d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.deformable_conv2d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "offset" }, + { "name": "mask" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.deformable_psroi_pooling", + "attributes": [ + { "name": "inp" }, + { "name": "rois" }, + { "name": "trans" }, + { "name": "no_trans" }, + { "name": "part_size" }, + { "name": "pooled_h" }, + { "name": "pooled_w" }, + { "name": "sample_per_part" }, + { "name": "spatial_scale" }, + { "name": "trans_std" } + ], + "category": "Pool" + }, + { + "name": "megengine.functional.nn.dropout", + "attributes": [ + { "name": "inp" }, + { "name": "drop_prob" }, + { "name": "training" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.embedding", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "padding_idx" }, + { "name": "max_norm" }, + { "name": "norm_type" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.gelu", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.hsigmoid", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.hswish", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.indexing_one_hot", + "attributes": [ + { "name": "src" }, + { "name": "index" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.layer_norm", + "attributes": [ + { "name": "inp" }, + { "name": "normalized_shape" }, + { "name": "affine" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "eps" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.leaky_relu", + "attributes": [ + { "name": "inp" }, + { "name": "negative_slope" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.linear", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.local_conv2d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "conv_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.local_response_norm", + "attributes": [ + { "name": "inp" }, + { "name": "kernel_size" }, + { "name": "k" }, + { "name": "alpha" }, + { "name": "beta" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.logsigmoid", + "attributes": [ + { "name": "inp" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.logsumexp", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "keepdims" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.logsoftmax", + "attributes": [ + { "name": "inp" }, + { "name": "axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.max_pool2d", + "attributes": [ + { "name": "inp" }, + { "name": "kernel_size" }, + { "name": "stride" }, + { "name": "padding" } + ], + "category": "Pool" + }, + { + "name": "megengine.functional.nn.one_hot", + "attributes": [ + { "name": "inp" }, + { "name": "num_classes" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.prelu", + "attributes": [ + { "name": "inp" }, + { "name": "weight" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.pad", + "attributes": [ + { "name": "src" }, + { "name": "pad_width" }, + { "name": "mode" }, + { "name": "constant_value" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.relu", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.relu6", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.vision.remap", + "attributes": [ + { "name": "inp" }, + { "name": "map_xy" }, + { "name": "border_mode" }, + { "name": "scalar" }, + { "name": "interp_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.sigmoid", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.sliding_window", + "attributes": [ + { "name": "inp" }, + { "name": "kernel_size" }, + { "name": "padding" }, + { "name": "stride" }, + { "name": "dilation" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.sliding_window_transpose", + "attributes": [ + { "name": "inp" }, + { "name": "output_size" }, + { "name": "kernel_size" }, + { "name": "padding" }, + { "name": "stride" }, + { "name": "dilation" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.silu", + "attributes": [ + { "name": "x" } + ], + "category": "Activation" + }, + { + "name": "megengine.functional.nn.softmax", + "attributes": [ + { "name": "inp" }, + { "name": "axis" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.softplus", + "attributes": [ + { "name": "inp" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.sync_batch_norm", + "attributes": [ + { "name": "inp" }, + { "name": "running_mean" }, + { "name": "running_var" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "training" }, + { "name": "momentum" }, + { "name": "eps" }, + { "name": "eps_mode" }, + { "name": "group" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.warp_affine", + "attributes": [ + { "name": "inp" }, + { "name": "mat" }, + { "name": "out_shape" }, + { "name": "border_mode" }, + { "name": "border_val" }, + { "name": "format" }, + { "name": "interp_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.warp_perspective", + "attributes": [ + { "name": "inp" }, + { "name": "mat" }, + { "name": "out_shape" }, + { "name": "mat_idx" }, + { "name": "border_mode" }, + { "name": "border_val" }, + { "name": "format" }, + { "name": "interp_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.nn.pixel_shuffle", + "attributes": [ + { "name": "inp" }, + { "name": "upscale_factor" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.debug_param.get_execution_strategy", + "attributes": [], + "category": "Layer" + }, + { + "name": "megengine.functional.quantized.conv_bias_activation", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "dtype" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "nonlinear_mode" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.quantized.batch_conv_bias_activation", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "dtype" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "nonlinear_mode" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.quantized.conv_transpose2d", + "attributes": [ + { "name": "inp" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "dtype" }, + { "name": "stride" }, + { "name": "padding" }, + { "name": "dilation" }, + { "name": "groups" }, + { "name": "conv_mode" }, + { "name": "compute_mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.tensor.arange", + "attributes": [ + { "name": "start" }, + { "name": "stop" }, + { "name": "step" }, + { "name": "dtype" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.broadcast_to", + "attributes": [ + { "name": "inp" }, + { "name": "shape" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.concat", + "attributes": [ + { "name": "inps" }, + { "name": "axis" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.cond_take", + "attributes": [ + { "name": "mask" }, + { "name": "x" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.cumsum", + "attributes": [ + { "name": "inp" }, + { "name": "axis" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.diag", + "attributes": [ + { "name": "inp" }, + { "name": "k" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.expand_dims", + "attributes": [ + { "name": "inp" }, + { "name": "axis" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.eye", + "attributes": [ + { "name": "N" }, + { "name": "M" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.flatten", + "attributes": [ + { "name": "inp" }, + { "name": "start_axis" }, + { "name": "end_axis" } + ], + "category": "Shape" + }, + { + "name": "megengine.functional.tensor.full", + "attributes": [ + { "name": "shape" }, + { "name": "value" }, + { "name": "dtype" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.full_like", + "attributes": [ + { "name": "inp" }, + { "name": "value" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.gather", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "index" } + ], + "category": "Transform" + }, + { + "name": "megengine.functional.tensor.linspace", + "attributes": [ + { "name": "start" }, + { "name": "stop" }, + { "name": "num" }, + { "name": "dtype" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.ones", + "attributes": [ + { "name": "shape" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.ones_like", + "attributes": [ + { "name": "inp" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.repeat", + "attributes": [ + { "name": "inp" }, + { "name": "repeats" }, + { "name": "axis" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.reshape", + "attributes": [ + { "name": "inp" }, + { "name": "target_shape" } + ], + "category": "Shape" + }, + { + "name": "megengine.functional.tensor.roll", + "attributes": [ + { "name": "inp" }, + { "name": "shift" }, + { "name": "axis" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.split", + "attributes": [ + { "name": "inp" }, + { "name": "nsplits_or_sections" }, + { "name": "axis" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.squeeze", + "attributes": [ + { "name": "inp" }, + { "name": "axis" } + ], + "category": "Transform" + }, + { + "name": "megengine.functional.tensor.stack", + "attributes": [ + { "name": "inps" }, + { "name": "axis" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.scatter", + "attributes": [ + { "name": "inp" }, + { "name": "axis" }, + { "name": "index" }, + { "name": "source" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.tile", + "attributes": [ + { "name": "inp" }, + { "name": "reps" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.copy", + "attributes": [ + { "name": "inp" }, + { "name": "device" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.transpose", + "attributes": [ + { "name": "inp" }, + { "name": "pattern" } + ], + "category": "Shape" + }, + { + "name": "megengine.functional.tensor.swapaxes", + "attributes": [ + { "name": "inp" }, + { "name": "axis1" }, + { "name": "axis2" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.where", + "attributes": [ + { "name": "mask" }, + { "name": "x" }, + { "name": "y" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.zeros", + "attributes": [ + { "name": "shape" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.tensor.zeros_like", + "attributes": [ + { "name": "inp" } + ], + "category": "Tensor" + }, + { + "name": "megengine.functional.vision.correlation", + "attributes": [ + { "name": "data1" }, + { "name": "data2" }, + { "name": "kernel_size" }, + { "name": "max_displacement" }, + { "name": "stride1" }, + { "name": "stride2" }, + { "name": "pad_size" }, + { "name": "is_multiply" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.cvt_color", + "attributes": [ + { "name": "inp" }, + { "name": "mode" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.interpolate", + "attributes": [ + { "name": "inp" }, + { "name": "size" }, + { "name": "scale_factor" }, + { "name": "mode" }, + { "name": "align_corners" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.nms", + "attributes": [ + { "name": "boxes" }, + { "name": "scores" }, + { "name": "iou_thresh" }, + { "name": "max_output" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.nvof", + "attributes": [ + { "name": "src" }, + { "name": "precision" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.roi_align", + "attributes": [ + { "name": "inp" }, + { "name": "rois" }, + { "name": "output_shape" }, + { "name": "mode" }, + { "name": "spatial_scale" }, + { "name": "sample_points" }, + { "name": "aligned" } + ], + "category": "Layer" + }, + { + "name": "megengine.functional.vision.roi_pooling", + "attributes": [ + { "name": "inp" }, + { "name": "rois" }, + { "name": "output_shape" }, + { "name": "mode" }, + { "name": "scale" } + ], + "category": "Pool" + }, + { + "name": "__lt__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__le__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__gt__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ge__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__eq__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ne__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__neg__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__pos__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__abs__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__invert__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__round__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__floor__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__ceil__", + "attributes": [ + { "name": "self" } + ] + }, + { + "name": "__add__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__sub__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__mul__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__matmul__", + "attributes": [ + { "name": "self" }, + { "name": "other" } + ] + }, + { + "name": "__truediv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__floordiv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__mod__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__pow__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__lshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__and__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__or__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__xor__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__radd__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rsub__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rmul__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rmatmul__", + "attributes": [ + { "name": "self" }, + { "name": "other" } + ] + }, + { + "name": "__rtruediv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rfloordiv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rmod__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rpow__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rlshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rrshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rand__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ror__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__rxor__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__iadd__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__isub__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__imul__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__imatmul__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__itruediv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ifloordiv__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__imod__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ipow__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ilshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__irshift__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__iand__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ior__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "__ixor__", + "attributes": [ + { "name": "self" }, + { "name": "value" } + ] + }, + { + "name": "transpose", + "attributes": [ + { "name": "self" } + ], + "category": "Shape", + "varargs": "args" + }, + { + "name": "astype", + "attributes": [ + { "name": "self" }, + { "name": "dtype" } + ], + "category": "Layer" + }, + { + "name": "reshape", + "attributes": [ + { "name": "self" } + ], + "category": "Shape", + "varargs": "args" + }, + { + "name": "_broadcast", + "attributes": [ + { "name": "self" } + ], + "category": "Shape", + "varargs": "args" + }, + { + "name": "flatten", + "attributes": [ + { "name": "self" } + ], + "category": "Shape" + }, + { + "name": "sum", + "attributes": [ + { "name": "self" }, + { "name": "axis" }, + { "name": "keepdims" } + ] + }, + { + "name": "prod", + "attributes": [ + { "name": "self" }, + { "name": "axis" }, + { "name": "keepdims" } + ] + }, + { + "name": "min", + "attributes": [ + { "name": "self" }, + { "name": "axis" }, + { "name": "keepdims" } + ] + }, + { + "name": "max", + "attributes": [ + { "name": "self" }, + { "name": "axis" }, + { "name": "keepdims" } + ] + }, + { + "name": "mean", + "attributes": [ + { "name": "self" }, + { "name": "axis" }, + { "name": "keepdims" } + ] + }, + { + "name": "__getitem__", + "attributes": [ + { "name": "self" }, + { "name": "index" } + ], + "category": "Tensor" + }, + { + "name": "__setitem__", + "attributes": [ + { "name": "self" }, + { "name": "index" }, + { "name": "value" } + ], + "category": "Tensor" + }, + { + "name": "megengine.module.adaptive_pooling.AdaptiveAvgPool2d", + "category": "Pool" + }, + { + "name": "megengine.module.adaptive_pooling.AdaptiveMaxPool2d", + "category": "Pool" + }, + { + "name": "megengine.module.pooling.AvgPool2d", + "category": "Pool" + }, + { + "name": "megengine.module.batch_matmul_activation.BatchMatMulActivation", + "category": "Layer" + }, + { + "name": "megengine.module.batchnorm.BatchNorm1d", + "category": "Normalization" + }, + { + "name": "megengine.module.batchnorm.BatchNorm2d", + "category": "Normalization" + }, + { + "name": "megengine.module.concat.Concat", + "category": "Tensor" + }, + { + "name": "megengine.module.conv.Conv1d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.Conv2d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.Conv3d", + "category": "Layer" + }, + { + "name": "megengine.module.conv_bn.ConvBn2d", + "category": "Layer" + }, + { + "name": "megengine.module.conv_bn.ConvBnRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.ConvRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.ConvTranspose2d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.ConvTranspose3d", + "category": "Layer" + }, + { + "name": "megengine.module.conv.DeformableConv2d", + "category": "Layer" + }, + { + "name": "megengine.module.deformable_psroi_pooling.DeformablePSROIPooling", + "category": "Pool" + }, + { + "name": "megengine.module.quant_dequant.DequantStub", + "category": "Layer" + }, + { + "name": "megengine.module.dropout.Dropout", + "category": "Layer" + }, + { + "name": "megengine.module.elemwise.Elemwise" + }, + { + "name": "megengine.module.embedding.Embedding", + "category": "Transform" + }, + { + "name": "megengine.module.activation.GELU", + "category": "Activation" + }, + { + "name": "megengine.module.normalization.GroupNorm", + "category": "Normalization" + }, + { + "name": "megengine.module.identity.Identity", + "category": "Layer" + }, + { + "name": "megengine.module.normalization.InstanceNorm", + "category": "Normalization" + }, + { + "name": "megengine.module.rnn.LSTM", + "category": "Layer" + }, + { + "name": "megengine.module.rnn.LSTMCell", + "category": "Layer" + }, + { + "name": "megengine.module.normalization.LayerNorm", + "category": "Normalization" + }, + { + "name": "megengine.module.activation.LeakyReLU", + "category": "Activation" + }, + { + "name": "megengine.module.linear.Linear", + "category": "Layer" + }, + { + "name": "megengine.module.conv.LocalConv2d", + "category": "Layer" + }, + { + "name": "megengine.module.lrn.LocalResponseNorm", + "category": "Layer" + }, + { + "name": "megengine.module.pooling.MaxPool2d", + "category": "Pool" + }, + { + "name": "megengine.module.module.Module", + "category": "Layer" + }, + { + "name": "megengine.module.activation.PReLU", + "category": "Activation" + }, + { + "name": "megengine.module.padding.Pad", + "category": "Layer" + }, + { + "name": "megengine.module.pixel_shuffle.PixelShuffle", + "category": "Layer" + }, + { + "name": "megengine.module.quant_dequant.QuantStub", + "category": "Layer" + }, + { + "name": "megengine.module.rnn.RNN", + "category": "Layer" + }, + { + "name": "megengine.module.rnn.RNNCell", + "category": "Layer" + }, + { + "name": "megengine.module.activation.ReLU", + "category": "Activation" + }, + { + "name": "megengine.module.activation.SiLU", + "category": "Activation" + }, + { + "name": "megengine.module.activation.Sigmoid", + "category": "Activation" + }, + { + "name": "megengine.module.sliding_window.SlidingWindow", + "category": "Layer" + }, + { + "name": "megengine.module.sliding_window.SlidingWindowTranspose", + "category": "Layer" + }, + { + "name": "megengine.module.activation.Softmax", + "category": "Activation" + }, + { + "name": "megengine.module.batchnorm.SyncBatchNorm", + "category": "Normalization" + }, + { + "name": "megengine.module.qat.batch_matmul_activation.BatchMatMulActivation", + "category": "Activation" + }, + { + "name": "megengine.module.qat.concat.Concat", + "category": "Tensor" + }, + { + "name": "megengine.module.qat.conv.Conv2d", + "category": "Layer" + }, + { + "name": "megengine.module.qat.conv_bn.ConvBn2d", + "category": "Layer" + }, + { + "name": "megengine.module.qat.conv_bn.ConvBnRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.qat.conv.ConvRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.qat.conv.ConvTranspose2d", + "category": "Layer" + }, + { + "name": "megengine.module.qat.quant_dequant.DequantStub", + "category": "Layer" + }, + { + "name": "megengine.module.qat.linear.Linear", + "category": "Layer" + }, + { + "name": "megengine.module.qat.module.QATModule", + "category": "Layer" + }, + { + "name": "megengine.module.qat.quant_dequant.QuantStub", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.batch_matmul_activation.BatchMatMulActivation", + "category": "Activation" + }, + { + "name": "megengine.module.quantized.concat.Concat", + "category": "Tensor" + }, + { + "name": "megengine.module.quantized.conv.Conv2d", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.conv_bn.ConvBn2d", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.conv_bn.ConvBnRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.conv.ConvRelu2d", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.conv.ConvTranspose2d", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.quant_dequant.DequantStub", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.linear.Linear", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.quant_dequant.QuantStub", + "category": "Layer" + }, + { + "name": "megengine.module.quantized.module.QuantizedModule", + "category": "Layer" + }, + { + "name": "Axis", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "ConvolutionV0", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "data_type", "type": "ConvolutionV0DataType", "default": "FLOAT" }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "ConvolutionV1", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "MaskPropagate", + "attributes": [ + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "kernel_h", "type": "uint32", "default": 1 }, + { "name": "kernel_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 } + ] + }, + { + "name": "ConvPooling", + "category": "Pool", + "attributes": [ + { "name": "method", "type": "ConvPoolingMethod", "default": "WITH_TEXTURE_OBJ" }, + { "name": "convMode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "poolMode", "type": "ConvPoolingPoolMode", "default": "AVERAGE" }, + { "name": "nonlineMode", "type": "ConvPoolingNonlineMode", "default": "IDENTITY" }, + { "name": "pool_shape_h", "type": "uint32", "default": 1 }, + { "name": "pool_shape_w", "type": "uint32", "default": 1 }, + { "name": "pool_stride_h", "type": "uint32", "default": 1 }, + { "name": "pool_stride_w", "type": "uint32", "default": 1 }, + { "name": "pool_pad_h", "type": "uint32", "default": 0 }, + { "name": "pool_pad_w", "type": "uint32", "default": 0 }, + { "name": "conv_stride_h", "type": "uint32", "default": 1 }, + { "name": "conv_stride_w", "type": "uint32", "default": 1 }, + { "name": "conv_pad_h", "type": "uint32", "default": 0 }, + { "name": "conv_pad_w", "type": "uint32", "default": 0 } + ] + }, + { + "name": "ConvBiasV0", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "ConvBiasV1", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "data_type", "type": "ConvolutionV0DataType", "default": "FLOAT" }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "ConvBiasV2", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "ConvBiasV3", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "output_block_size", "type": "uint32", "default": 0 }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "ConvBias", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "SeparableConv", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "borderMode", "type": "SeparableConvBorderMode", "default": "BORDER_REPLICATE" }, + { "name": "is_symm_kernel", "type": "bool", "default": true }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "ksize_h", "type": "uint32", "default": 3 }, + { "name": "ksize_w", "type": "uint32", "default": 3 }, + { "name": "anchor_h", "type": "uint32", "default": 1 }, + { "name": "anchor_w", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "Images2Neibs", + "category": "Shape", + "attributes": [ + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "window_h", "type": "uint32", "default": 3 }, + { "name": "window_w", "type": "uint32", "default": 3 } + ] + }, + { + "name": "SlidingWindowTranspose", + "category": "Transform", + "attributes": [ + { "name": "out_h", "type": "uint32", "default": 0 }, + { "name": "out_w", "type": "uint32", "default": 0 }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "window_h", "type": "uint32", "default": 3 }, + { "name": "window_w", "type": "uint32", "default": 3 } + ] + }, + { + "name": "PoolingV0", + "category": "Pool", + "attributes": [ + { "name": "mode", "type": "PoolingV0Mode", "default": "MAX_" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 2 }, + { "name": "stride_w", "type": "uint32", "default": 2 }, + { "name": "window_h", "type": "uint32", "default": 2 }, + { "name": "window_w", "type": "uint32", "default": 2 }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "attributes": [ + { "name": "mode", "type": "PoolingV0Mode", "default": "MAX_" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 2 }, + { "name": "stride_w", "type": "uint32", "default": 2 }, + { "name": "window_h", "type": "uint32", "default": 2 }, + { "name": "window_w", "type": "uint32", "default": 2 }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": -1 } + ] + }, + { + "name": "AdaptivePoolingV0", + "category": "Activation", + "attributes": [ + { "name": "mode", "type": "PoolingV0Mode", "default": "MAX_" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" } + ] + }, + { + "name": "AdaptivePooling", + "category": "Activation", + "attributes": [ + { "name": "mode", "type": "PoolingV0Mode", "default": "MAX_" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" } + ] + }, + { + "name": "LRN", + "category": "Normalization", + "attributes": [ + { "name": "n", "type": "uint32", "default": 5 }, + { "name": "k", "type": "float32", "default": 2 }, + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 } + ] + }, + { + "name": "BN", + "category": "Normalization", + "attributes": [ + { "name": "param_dim", "type": "BNParamDim", "default": "DIM_11HW" }, + { "name": "fwd_mode", "type": "BNFwdMode", "default": "TRAINING" }, + { "name": "epsilon", "type": "float64", "default": 0.0001 }, + { "name": "avg_factor", "type": "float64", "default": 1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "bias", "type": "float32", "default": 0 } + ] + }, + { + "name": "ROIPooling", + "attributes": [ + { "name": "mode", "type": "ROIPoolingMode", "default": "MAX_" }, + { "name": "scale", "type": "float32", "default": 1 } + ] + }, + { + "name": "WarpPerspectiveV1", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "bmode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "border_val", "type": "float32", "default": 0 } + ] + }, + { + "name": "WarpPerspective", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "bmode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "border_val", "type": "float32", "default": 0 } + ] + }, + { + "name": "SpatialTfGridGenerator", + "attributes": [ + { "name": "mode", "type": "SpatialTfGridGeneratorMode", "default": "AFFINE" } + ] + }, + { + "name": "SpatialTfSampler", + "attributes": [ + { "name": "mode", "type": "SpatialTfSamplerMode", "default": "BILINEAR" } + ] + }, + { + "name": "AddUpdate", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 1 }, + { "name": "beta", "type": "float32", "default": 1 }, + { "name": "bias", "type": "float32", "default": 0 } + ] + }, + { + "name": "Elemwise", + "attributes": [ + { "name": "mode", "type": "ElemwiseMode", "default": "RELU" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ] + }, + { + "name": "ElemwiseMultiType", + "attributes": [ + { "name": "mode", "type": "ElemwiseMultiTypeMode", "default": "FUSE_MUL_ADD3_INT16x32x32x32" } + ] + }, + { + "name": "PowC", + "attributes": [ + { "name": "exp", "type": "float32", "default": 0 } + ] + }, + { + "name": "DctChannelSelectV0", + "attributes": [ + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "fastImpl", "type": "DctChannelSelectV0FastImpl", "default": "NONE" }, + { "name": "dct_block_size", "type": "int32", "default": 8 } + ] + }, + { + "name": "DctChannelSelect", + "attributes": [ + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "fastImpl", "type": "DctChannelSelectV0FastImpl", "default": "NONE" }, + { "name": "dct_block_size", "type": "int32", "default": 8 } + ] + }, + { + "name": "MatrixMulV0", + "attributes": [ + { "name": "transposeA", "type": "bool", "default": false }, + { "name": "transposeB", "type": "bool", "default": false }, + { "name": "data_type", "type": "MatrixMulV0DataType", "default": "FLOAT" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ] + }, + { + "name": "MatrixMulV1", + "attributes": [ + { "name": "transposeA", "type": "bool", "default": false }, + { "name": "transposeB", "type": "bool", "default": false }, + { "name": "compute_mode", "type": "MatrixMulV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ] + }, + { + "name": "MatrixMulV2", + "attributes": [ + { "name": "transposeA", "type": "bool", "default": false }, + { "name": "transposeB", "type": "bool", "default": false }, + { "name": "compute_mode", "type": "MatrixMulV1ComputeMode", "default": "DEFAULT" }, + { "name": "format", "type": "MatrixMulFormat", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ] + }, + { + "name": "MatrixMul", + "attributes": [ + { "name": "transposeA", "type": "bool", "default": false }, + { "name": "transposeB", "type": "bool", "default": false }, + { "name": "compute_mode", "type": "MatrixMulV1ComputeMode", "default": "DEFAULT" }, + { "name": "format", "type": "MatrixMulFormat", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ] + }, + { + "name": "SVD", + "attributes": [ + { "name": "full_matrices", "type": "bool", "default": false }, + { "name": "compute_uv", "type": "bool", "default": true } + ] + }, + { + "name": "ReduceV0", + "attributes": [ + { "name": "mode", "type": "ReduceV0Mode", "default": "SUM" }, + { "name": "axis", "type": "int32", "default": -1 } + ] + }, + { + "name": "ReduceV1", + "attributes": [ + { "name": "mode", "type": "ReduceV1Mode", "default": "SUM" }, + { "name": "axis", "type": "int32", "default": -1 }, + { "name": "data_type", "type": "ReduceV1DataType", "default": "DEFAULT" } + ] + }, + { + "name": "Reduce", + "attributes": [ + { "name": "mode", "type": "ReduceMode", "default": "SUM" }, + { "name": "axis", "type": "int32", "default": 2147483647 }, + { "name": "data_type", "type": "ReduceDataType", "default": "DEFAULT" } + ] + }, + { + "name": "CumsumV0", + "attributes": [ + { "name": "axis", "type": "int32", "default": -1 }, + { "name": "exclusive", "type": "bool", "default": true }, + { "name": "reverse", "type": "bool", "default": false } + ] + }, + { + "name": "Cumsum", + "attributes": [ + { "name": "axis", "type": "int32", "default": 2147483647 }, + { "name": "exclusive", "type": "bool", "default": true }, + { "name": "reverse", "type": "bool", "default": false } + ] + }, + { + "name": "CondTake", + "attributes": [ + { "name": "mode", "type": "CondTakeMode", "default": "EQ" }, + { "name": "val", "type": "float32", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.000001 } + ] + }, + { + "name": "Argsort", + "attributes": [ + { "name": "order", "type": "ArgsortOrder", "default": "ASCENDING" } + ] + }, + { + "name": "IndexingRemap", + "attributes": [ + { "name": "is_non_overlapping", "type": "bool", "default": false } + ] + }, + { + "name": "Sleep", + "attributes": [ + { "name": "time", "type": "float32", "default": 0 } + ] + }, + { + "name": "Linspace", + "attributes": [ + { "name": "endpoint", "type": "bool", "default": true } + ] + }, + { + "name": "LinspaceFull", + "attributes": [ + { "name": "start", "type": "float64", "default": 0 }, + { "name": "stop", "type": "float64", "default": 1 }, + { "name": "endpoint", "type": "bool", "default": true } + ] + }, + { + "name": "Eye", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 }, + { "name": "dtype", "type": "DTypeEnum", "default": "Float32" } + ] + }, + { + "name": "Diag", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 } + ] + }, + { + "name": "UniformRNGV0", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "UniformRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 }, + { "name": "dtype", "type": "DTypeEnum", "default": "Float32" } + ] + }, + { + "name": "GaussianRNGV0", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 }, + { "name": "mean", "type": "float32", "default": 0 }, + { "name": "std", "type": "float32", "default": 1 } + ] + }, + { + "name": "GaussianRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 }, + { "name": "mean", "type": "float32", "default": 0 }, + { "name": "std", "type": "float32", "default": 1 }, + { "name": "dtype", "type": "DTypeEnum", "default": "Float32" } + ] + }, + { + "name": "GammaRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "BetaRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "PoissonRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "PermutationRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 }, + { "name": "dtype", "type": "DTypeEnum", "default": "Int32" } + ] + }, + { + "name": "ShuffleRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "Flip", + "category": "Shape", + "attributes": [ + { "name": "vertical", "type": "bool", "default": false }, + { "name": "horizontal", "type": "bool", "default": false } + ] + }, + { + "name": "Rotate", + "attributes": [ + { "name": "clockwise", "type": "bool", "default": true } + ] + }, + { + "name": "ROICopy", + "attributes": [ + { "name": "row_from", "type": "uint32", "default": 0 }, + { "name": "row_to", "type": "uint32", "default": 0 }, + { "name": "col_from", "type": "uint32", "default": 0 }, + { "name": "col_to", "type": "uint32", "default": 0 } + ] + }, + { + "name": "CvtColor", + "attributes": [ + { "name": "mode", "type": "CvtColorMode", "default": "RGB2GRAY" } + ] + }, + { + "name": "WarpAffineV0", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "border_mode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "border_val", "type": "float32", "default": 0 } + ] + }, + { + "name": "WarpAffineV1", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "border_mode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "border_val", "type": "float32", "default": 0 }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NHWC" } + ] + }, + { + "name": "WarpAffine", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "border_mode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "border_val", "type": "float32", "default": 0 }, + { "name": "format", "type": "ConvolutionFormat", "default": "NHWC" } + ] + }, + { + "name": "GaussianBlur", + "attributes": [ + { "name": "border_mode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "kernel_height", "type": "uint32", "default": 0 }, + { "name": "kernel_width", "type": "uint32", "default": 0 }, + { "name": "sigma_x", "type": "float32", "default": 0 }, + { "name": "sigma_y", "type": "float32", "default": 0 } + ] + }, + { + "name": "ResizeV0", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" } + ] + }, + { + "name": "ResizeV1", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NHWC" } + ] + }, + { + "name": "Resize", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NHWC" } + ] + }, + { + "name": "RemapV0", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "border_type", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NHWC" }, + { "name": "scalar", "type": "float32", "default": 0 } + ] + }, + { + "name": "Remap", + "attributes": [ + { "name": "imode", "type": "WarpPerspectiveV1InterpolationMode", "default": "LINEAR" }, + { "name": "border_type", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NHWC" }, + { "name": "scalar", "type": "float32", "default": 0 } + ] + }, + { + "name": "Convolution3D", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "Convolution3DMode", "default": "CROSS_CORRELATION" }, + { "name": "pad_d", "type": "uint32", "default": 0 }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_d", "type": "uint32", "default": 1 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_d", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "Convolution3DSparse", "default": "DENSE" }, + { "name": "data_type", "type": "Convolution3DDataType", "default": "FLOAT" }, + { "name": "format", "type": "Convolution3DFormat", "default": "NCDHW" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "Conv3DBias", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "Conv3DBiasNonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "Convolution3DMode", "default": "CROSS_CORRELATION" }, + { "name": "pad_d", "type": "uint32", "default": 0 }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_d", "type": "uint32", "default": 1 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "SeparableConv3D", + "category": "Layer", + "attributes": [ + { "name": "mode", "type": "Convolution3DMode", "default": "CROSS_CORRELATION" }, + { "name": "borderMode", "type": "SeparableConv3DBorderMode", "default": "BORDER_REPLICATE" }, + { "name": "is_symm_kernel", "type": "bool", "default": true }, + { "name": "pad_d", "type": "uint32", "default": 0 }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_d", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "ksize_d", "type": "uint32", "default": 0 }, + { "name": "ksize_h", "type": "uint32", "default": 3 }, + { "name": "ksize_w", "type": "uint32", "default": 3 }, + { "name": "anchor_d", "type": "uint32", "default": 0 }, + { "name": "anchor_h", "type": "uint32", "default": 1 }, + { "name": "anchor_w", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "TopK", + "attributes": [ + { "name": "mode", "type": "TopKMode", "default": "KTH_ONLY" } + ] + }, + { + "name": "RelayoutFormatV0", + "attributes": [ + { "name": "mode", "type": "RelayoutFormatV0Mode", "default": "NHWC_NHWCD4" } + ] + }, + { + "name": "RelayoutFormat", + "attributes": [ + { "name": "mode", "type": "RelayoutFormatV0Mode", "default": "NHWC_NHWCD4" }, + { "name": "oc", "type": "uint32", "default": 0 }, + { "name": "group", "type": "uint32", "default": 1 } + ] + }, + { + "name": "SeparableFilterV0", + "attributes": [ + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "borderMode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "is_symm_kernel", "type": "bool", "default": true }, + { "name": "ksize_h", "type": "uint32", "default": 3 }, + { "name": "ksize_w", "type": "uint32", "default": 3 }, + { "name": "anchor_h", "type": "uint32", "default": 1 }, + { "name": "anchor_w", "type": "uint32", "default": 1 } + ] + }, + { + "name": "SeparableFilter", + "attributes": [ + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "borderMode", "type": "WarpPerspectiveV1BorderMode", "default": "REPLICATE" }, + { "name": "is_symm_kernel", "type": "bool", "default": true }, + { "name": "ksize_h", "type": "uint32", "default": 3 }, + { "name": "ksize_w", "type": "uint32", "default": 3 }, + { "name": "anchor_h", "type": "uint32", "default": 1 }, + { "name": "anchor_w", "type": "uint32", "default": 1 } + ] + }, + { + "name": "LocalShareV0", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "spatial_groups_h", "type": "uint32", "default": 1 }, + { "name": "spatial_groups_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "computeMode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ] + }, + { + "name": "LocalShare", + "attributes": [ + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "spatial_groups_h", "type": "uint32", "default": 1 }, + { "name": "spatial_groups_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "computeMode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ] + }, + { + "name": "ROIAlignV0", + "attributes": [ + { "name": "mode", "type": "ROIAlignV0Mode", "default": "MAX_" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "spatial_scale", "type": "float32", "default": 1 }, + { "name": "offset", "type": "float32", "default": 0 }, + { "name": "pooled_height", "type": "uint32", "default": 1 }, + { "name": "pooled_width", "type": "uint32", "default": 1 }, + { "name": "sample_height", "type": "uint32", "default": 2 }, + { "name": "sample_width", "type": "uint32", "default": 2 } + ] + }, + { + "name": "ROIAlign", + "attributes": [ + { "name": "mode", "type": "ROIAlignV0Mode", "default": "MAX_" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "spatial_scale", "type": "float32", "default": 1 }, + { "name": "offset", "type": "float32", "default": 0 }, + { "name": "pooled_height", "type": "uint32", "default": 1 }, + { "name": "pooled_width", "type": "uint32", "default": 1 }, + { "name": "sample_height", "type": "uint32", "default": 2 }, + { "name": "sample_width", "type": "uint32", "default": 2 } + ] + }, + { + "name": "Correlation", + "attributes": [ + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "uint32", "default": 1 }, + { "name": "max_displacement", "type": "uint32", "default": 1 }, + { "name": "stride1", "type": "uint32", "default": 1 }, + { "name": "stride2", "type": "uint32", "default": 1 }, + { "name": "pad_size", "type": "uint32", "default": 0 }, + { "name": "is_multiply", "type": "bool", "default": true } + ] + }, + { + "name": "DeformablePSROIPooling", + "attributes": [ + { "name": "no_trans", "type": "bool", "default": true }, + { "name": "spatial_scale", "type": "float32", "default": 1 }, + { "name": "trans_std", "type": "float32", "default": 1 }, + { "name": "pooled_h", "type": "uint32", "default": 1 }, + { "name": "pooled_w", "type": "uint32", "default": 1 }, + { "name": "part_size", "type": "uint32", "default": 1 }, + { "name": "sample_per_part", "type": "uint32", "default": 1 } + ] + }, + { + "name": "BatchConvBiasV0", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionV0Format", "default": "NCHW" }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "BatchConvBias", + "attributes": [ + { "name": "nonlineMode", "type": "ConvBiasV0NonlineMode", "default": "IDENTITY" }, + { "name": "mode", "type": "ConvolutionV0Mode", "default": "CROSS_CORRELATION" }, + { "name": "pad_h", "type": "uint32", "default": 0 }, + { "name": "pad_w", "type": "uint32", "default": 0 }, + { "name": "stride_h", "type": "uint32", "default": 1 }, + { "name": "stride_w", "type": "uint32", "default": 1 }, + { "name": "dilate_h", "type": "uint32", "default": 1 }, + { "name": "dilate_w", "type": "uint32", "default": 1 }, + { "name": "sparse", "type": "ConvolutionV0Sparse", "default": "DENSE" }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" }, + { "name": "compute_mode", "type": "ConvolutionV1ComputeMode", "default": "DEFAULT" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "FakeQuant", + "attributes": [ + { "name": "qmin", "type": "int32", "default": -2147483648 }, + { "name": "qmax", "type": "int32", "default": 2147483647 } + ] + }, + { + "name": "TQT", + "category": "Quantization", + "attributes": [ + { "name": "qmin", "type": "int32", "default": -2147483648 }, + { "name": "qmax", "type": "int32", "default": 2147483647 } + ] + }, + { + "name": "LSQ", + "category": "Quantization", + "attributes": [ + { "name": "qmin", "type": "int32", "default": -2147483648 }, + { "name": "qmax", "type": "int32", "default": 2147483647 } + ] + }, + { + "name": "Fill", + "attributes": [ + { "name": "value", "type": "float32", "default": 0 } + ] + }, + { + "name": "CheckNonFinite", + "attributes": [ + { "name": "scale", "type": "float32", "default": 1 } + ] + }, + { + "name": "Padding", + "category": "Layer", + "attributes": [ + { "name": "front_offset_dim0", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim1", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim2", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim3", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim4", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim5", "type": "uint32", "default": 0 }, + { "name": "front_offset_dim6", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim0", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim1", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim2", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim3", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim4", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim5", "type": "uint32", "default": 0 }, + { "name": "back_offset_dim6", "type": "uint32", "default": 0 }, + { "name": "padding_val", "type": "float32", "default": 0 }, + { "name": "padding_mode", "type": "PaddingPaddingMode", "default": "CONSTANT" } + ] + }, + { + "name": "LayerNorm", + "category": "Normalization", + "attributes": [ + { "name": "affine", "type": "bool", "default": true }, + { "name": "eps", "type": "float32", "default": 0.00001 }, + { "name": "normalized_dim", "type": "uint64", "default": 1 }, + { "name": "normalized_size", "type": "uint64", "default": 1 } + ] + }, + { + "name": "Dropout", + "category": "Dropout", + "attributes": [ + { "name": "drop_prob", "type": "float32", "default": 0 }, + { "name": "seed", "type": "uint64", "default": 0 } + ] + }, + { + "name": "RNNCell", + "category": "Layer", + "attributes": [ + { "name": "nonlineMode", "type": "RNNCellNonlineMode", "default": "IDENTITY" } + ] + }, + { + "name": "RNN", + "category": "Layer", + "attributes": [ + { "name": "num_layers", "type": "uint32", "default": 1 }, + { "name": "bidirectional", "type": "bool", "default": false }, + { "name": "bias", "type": "bool", "default": true }, + { "name": "hidden_size", "type": "uint32", "default": 128 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "nonlineMode", "type": "RNNCellNonlineMode", "default": "IDENTITY" }, + { "name": "fwd_mode", "type": "BNFwdMode", "default": "TRAINING" } + ] + }, + { + "name": "LSTM", + "category": "Layer", + "attributes": [ + { "name": "num_layers", "type": "uint32", "default": 1 }, + { "name": "bidirectional", "type": "bool", "default": false }, + { "name": "bias", "type": "bool", "default": true }, + { "name": "hidden_size", "type": "uint32", "default": 128 }, + { "name": "proj_size", "type": "uint32", "default": 0 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "fwd_mode", "type": "BNFwdMode", "default": "TRAINING" } + ] + }, + { + "name": "DType", + "attributes": [ + { "name": "dtype", "type": "DTypeEnum", "default": "Byte" } + ] + }, + { + "name": "PersistentOutputStorage", + "attributes": [ + { "name": "share_key", "type": "int32", "default": -1 } + ] + }, + { + "name": "OptionalAxis", + "attributes": [ + { "name": "axis", "type": "int32", "default": -1 } + ] + }, + { + "name": "OptionalAxisV1", + "attributes": [ + { "name": "axis", "type": "int32", "default": 7 } + ] + }, + { + "name": "ExecutionPolicyV0", + "attributes": [ + { "name": "strategy", "type": "ExecutionPolicyV0Strategy", "default": "HEURISTIC" }, + { "name": "workspace_limit", "type": "uint64", "default": 18446744073709552000 } + ] + }, + { + "name": "ExecutionPolicy", + "attributes": [ + { "name": "strategy", "type": "ExecutionPolicyStrategy", "default": "PROFILE" }, + { "name": "workspace_limit", "type": "uint64", "default": 18446744073709552000 } + ] + }, + { + "name": "AssertEqual", + "attributes": [ + { "name": "maxerr", "type": "float32", "default": 0.0001 }, + { "name": "verbose", "type": "bool", "default": false } + ] + }, + { + "name": "FpgaConv", + "attributes": [ + { "name": "need_output_quantize", "type": "bool", "default": false }, + { "name": "need_output_threshold", "type": "bool", "default": false }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "input_bit_width", "type": "int32", "default": 2 }, + { "name": "output_bit_width", "type": "int32", "default": 2 }, + { "name": "weight_bit_width", "type": "int32", "default": 2 }, + { "name": "thres0", "type": "int32", "default": 0 }, + { "name": "thres1", "type": "int32", "default": 1 }, + { "name": "unpool_size", "type": "uint32", "default": 4 }, + { "name": "direct_size", "type": "uint32", "default": 4 } + ] + }, + { + "name": "CollectiveComm", + "attributes": [ + { "name": "mode", "type": "CollectiveCommMode", "default": "REDUCE_SUM" } + ] + }, + { + "name": "CondExecPred", + "attributes": [ + { "name": "mode", "type": "CondExecPredMode", "default": "CASE" }, + { "name": "eps", "type": "float32", "default": 0.0001 } + ] + }, + { + "name": "CondExecPredLogical", + "attributes": [ + { "name": "mode", "type": "CondExecPredLogicalMode", "default": "OR" } + ] + }, + { + "name": "CondExecMark", + "attributes": [ + { "name": "grad_mode", "type": "CondExecMarkGradMode", "default": "SUM" }, + { "name": "static_infer", "type": "CondExecMarkStaticInfer", "default": "SHAPE_VALUE" } + ] + }, + { + "name": "CondExecMerge", + "attributes": [ + { "name": "nr_output", "type": "uint32", "default": 1 }, + { "name": "mode", "type": "CondExecMergeMode", "default": "EXACT_ONE" } + ] + }, + { + "name": "NvOf", + "attributes": [ + { "name": "precision", "type": "uint32", "default": 1 } + ] + }, + { + "name": "PersistentDTypeScalar", + "attributes": [ + { "name": "dtype", "type": "DTypeEnum", "default": "Float32" }, + { "name": "storage", "type": "uint8[]", "default": 0 } + ] + }, + { + "name": "MGBAddUpdate", + "attributes": [ + { "name": "alpha", "type": "PersistentDTypeScalar" }, + { "name": "beta", "type": "PersistentDTypeScalar" }, + { "name": "bias", "type": "PersistentDTypeScalar" } + ] + }, + { + "name": "Host2DeviceCopy", + "category": "Data", + "attributes": [ + { "name": "enable_value_infer", "type": "bool", "default": true }, + { "name": "dump_default_value", "type": "bool", "default": false }, + { "name": "allow_cpu_mem_fwd", "type": "bool", "default": true } + ] + }, + { + "name": "Dimshuffle", + "category": "Shape", + "attributes": [ + { "name": "pattern", "type": "int32[]", "default": 0 }, + { "name": "ndim", "type": "uint32", "default": 0 } + ] + }, + { + "name": "AxisDesc", + "attributes": [ + { "name": "method", "type": "AxisDescMethod", "default": "ADD_1" }, + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "AxisAddRemove", + "attributes": [ + { "name": "desc", "type": "AxisDesc[]" } + ] + }, + { + "name": "MGBSleep", + "attributes": [ + { "name": "device", "type": "bool", "default": true }, + { "name": "host", "type": "bool", "default": false }, + { "name": "seconds", "type": "float64", "default": 0 } + ] + }, + { + "name": "IndexDescMaskItem", + "attributes": [ + { "name": "axis", "type": "int8", "default": 0 }, + { "name": "begin", "type": "bool", "default": false }, + { "name": "end", "type": "bool", "default": false }, + { "name": "step", "type": "bool", "default": false }, + { "name": "idx", "type": "bool", "default": false } + ] + }, + { + "name": "IndexDescMaskDump", + "attributes": [ + { "name": "items", "type": "IndexDescMaskItem[]" } + ] + }, + { + "name": "NMSKeep", + "attributes": [ + { "name": "iou_thresh", "type": "float32", "default": 0 }, + { "name": "max_output", "type": "uint32", "default": 0 } + ] + }, + { + "name": "BatchNormForward", + "category": "Normalization" + }, + { + "name": "ConvolutionForward", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "ConvBiasForward", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "biases" } + ] + }, + { + "name": "PoolingForward", + "category": "Pool" + }, + { + "name": "AdaptivePoolingForward", + "category": "Pool" + }, + { + "name": "Subtensor", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "axis" } + ] + }, + { + "name": "GetVarShape", + "category": "Shape" + }, + { + "name": "ReshapeV1", + "category": "Shape", + "inputs": [ + { "name": "tensor" }, + { "name": "target_shape" } + ], + "outputs": [ + { "name": "tensor" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "tensor" }, + { "name": "target_shape" } + ], + "outputs": [ + { "name": "tensor" } + ] + }, + { + "name": "Concat", + "category": "Tensor" + }, + { + "name": "GroupNorm", + "attributes": [ + { "name": "affine", "type": "bool", "default": true }, + { "name": "eps", "type": "float32", "default": 0.00001 }, + { "name": "group", "type": "uint32", "default": 1 }, + { "name": "format", "type": "ConvolutionFormat", "default": "NCHW" } + ] + }, + { + "name": "MultinomialRNG", + "attributes": [ + { "name": "seed", "type": "uint64", "default": 0 }, + { "name": "num_samples", "type": "uint64", "default": 1 }, + { "name": "replacement", "type": "bool", "default": false } + ] + } +] \ No newline at end of file diff --git a/megengine-schema.js b/megengine-schema.js new file mode 100644 index 00000000000..c167123f433 --- /dev/null +++ b/megengine-schema.js @@ -0,0 +1,2437 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('megengine'); + +$root.mgb = $root.mgb || {}; + +$root.mgb.serialization = $root.mgb.serialization || {}; + +$root.mgb.serialization.fbs = $root.mgb.serialization.fbs || {}; + +$root.mgb.serialization.fbs.DTypeEnum = { + Float32: 0, + Uint8: 1, + Int8: 2, + Int16: 3, + Int32: 4, + IntB1: 5, + IntB2: 6, + IntB4: 7, + Byte: 8, + Float16: 9, + UintB4: 10, + Quantized8Asymm: 11, + QuantizedS32: 12, + QuantizedS8: 13, + Quantized4Asymm: 14, + QuantizedS4: 15, + QuantizedS16: 16, + BFloat16: 17, + Bool: 18, + Uint16: 19, + QuantizedS1: 20 +}; + +$root.mgb.serialization.fbs.LinearQuantizationParam = class LinearQuantizationParam { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.LinearQuantizationParam(); + $.scale = reader.float32_(position, 4, 0); + $.zero_point = reader.uint8_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.DTypeParam = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.mgb.serialization.fbs.LinearQuantizationParam.decode(reader, position); + default: return undefined; + } + } +}; + +$root.mgb.serialization.fbs.DType = class DType { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.DType(); + $.type = reader.int8_(position, 4, 0); + $.param = reader.union(position, 6, $root.mgb.serialization.fbs.DTypeParam.decode); + return $; + } +}; + +$root.mgb = $root.mgb || {}; + +$root.mgb.serialization = $root.mgb.serialization || {}; + +$root.mgb.serialization.fbs = $root.mgb.serialization.fbs || {}; + +$root.mgb.serialization.fbs.param = $root.mgb.serialization.fbs.param || {}; + +$root.mgb.serialization.fbs.param.ArgsortOrder = { + ASCENDING: 0, + DESCENDING: 1 +}; + +$root.mgb.serialization.fbs.param.BNFwdMode = { + TRAINING: 0, + INFERENCE: 1 +}; + +$root.mgb.serialization.fbs.param.BNParamDim = { + DIM_11HW: 0, + DIM_1CHW: 1, + DIM_1C11: 2, + DIM_111C: 3 +}; + +$root.mgb.serialization.fbs.param.CondTakeMode = { + EQ: 0, + NEQ: 1, + LT: 2, + LEQ: 3, + GT: 4, + GEQ: 5 +}; + +$root.mgb.serialization.fbs.param.Conv3DBiasNonlineMode = { + IDENTITY: 0, + RELU: 1, + SIGMOID: 2 +}; + +$root.mgb.serialization.fbs.param.ConvBiasV0NonlineMode = { + IDENTITY: 0, + RELU: 1, + SIGMOID: 2, + H_SWISH: 3 +}; + +$root.mgb.serialization.fbs.param.ConvPoolingMethod = { + WITH_TEXTURE_OBJ: 0, + WITH_SHARED_MEM: 1 +}; + +$root.mgb.serialization.fbs.param.ConvPoolingNonlineMode = { + IDENTITY: 0, + RELU: 1, + SIGMOID: 2 +}; + +$root.mgb.serialization.fbs.param.ConvPoolingPoolMode = { + AVERAGE: 0, + MAX_: 1 +}; + +$root.mgb.serialization.fbs.param.ConvolutionFormat = { + NCHW: 0, + NHWC: 1, + NHWCD4: 2, + NCHW4: 3, + NCHW8: 4, + NCHW32: 5, + NCHW88: 6, + NCHW44: 7, + NCHW44_DOT: 8, + NCHW4_NCHW32: 9, + NCHW32_NCHW4: 10, + NCHW4_NCHW: 11, + NHWC_NCHW: 12, + NHWC_NCHW4_IC_SMALL: 13, + NCHW_NCHW4_IC_SMALL: 14, + CHWN4: 15, + NCHW64: 16, + NCHW4_NHWC: 17 +}; + +$root.mgb.serialization.fbs.param.Convolution3DDataType = { + FLOAT: 0, + FLOAT_IO16xC32: 1 +}; + +$root.mgb.serialization.fbs.param.Convolution3DFormat = { + NCDHW: 0, + NDHWC: 1 +}; + +$root.mgb.serialization.fbs.param.Convolution3DMode = { + CROSS_CORRELATION: 0, + CONVOLUTION: 1 +}; + +$root.mgb.serialization.fbs.param.Convolution3DSparse = { + DENSE: 0, + GROUP: 1 +}; + +$root.mgb.serialization.fbs.param.ConvolutionV0DataType = { + FLOAT: 0, + INT8x8x16: 1, + INT8x8x32: 2, + FLOAT_IO16xC32: 3, + QUINT8x8x32: 4, + INT8x8xX: 5, + QUINT4x4x32: 6 +}; + +$root.mgb.serialization.fbs.param.ConvolutionV0Format = { + NCHW: 0, + NHWC: 1, + NHWCD4: 2, + NCHW4: 3, + NCHW8: 4, + NCHW32: 5, + NCHW88: 6, + NCHW44: 7, + NCHW44_DOT: 8, + NCHW_WINOGRAD: 9, + NCHW88_WINOGRAD: 10, + NCHW44_WINOGRAD: 11, + NCHW4_NCHW32: 12, + NCHW32_NCHW4: 13, + NCHW4_NCHW: 14, + NHWC_NCHW: 15, + NHWC_NCHW4_IC_SMALL: 16, + NCHW_NCHW4_IC_SMALL: 17, + CHWN4: 18, + NCHW4_NHWC: 19 +}; + +$root.mgb.serialization.fbs.param.ConvolutionV0Mode = { + CROSS_CORRELATION: 0, + CONVOLUTION: 1 +}; + +$root.mgb.serialization.fbs.param.ConvolutionV0Sparse = { + DENSE: 0, + GROUP: 1 +}; + +$root.mgb.serialization.fbs.param.ConvolutionV1ComputeMode = { + DEFAULT: 0, + FLOAT32: 1 +}; + +$root.mgb.serialization.fbs.param.CvtColorMode = { + RGB2GRAY: 0, + RGB2YUV: 1, + YUV2RGB: 2, + GRAY2RGB: 3, + RGBA2RGB: 4, + RGBA2BGR: 5, + RGBA2GRAY: 6, + RGB2BGR: 7, + BGR2GRAY: 8, + BGR2RGB: 9, + YUV2GRAY_NV21: 10, + YUV2RGB_NV21: 11, + YUV2BGR_NV21: 12, + YUV2GRAY_NV12: 13, + YUV2RGB_NV12: 14, + YUV2BGR_NV12: 15, + YUV2GRAY_YV12: 16, + YUV2RGB_YV12: 17, + YUV2BGR_YV12: 18, + YUV2GRAY_YU12: 19, + YUV2RGB_YU12: 20, + YUV2BGR_YU12: 21, + YCrCb2RGB: 22, + YCrCb2BGR: 23, + BT601_YUV2RGB_NV21: 24, + BT601_YUV2BGR_NV21: 25, + BT601_YUV2RGB_NV12: 26, + BT601_YUV2BGR_NV12: 27, + BT601_YUV2RGB_YV12: 28, + BT601_YUV2BGR_YV12: 29, + BT601_YUV2RGB_YU12: 30, + BT601_YUV2BGR_YU12: 31 +}; + +$root.mgb.serialization.fbs.param.DctChannelSelectV0FastImpl = { + NONE: 0, + FIX_32_MASK: 1 +}; + +$root.mgb.serialization.fbs.param.ElemwiseMode = { + RELU: 0, + ABS: 1, + ACOS: 2, + ASIN: 3, + CEIL: 4, + COS: 5, + EXP: 6, + EXPM1: 7, + FLOOR: 8, + LOG: 9, + LOG1P: 10, + NEGATE: 11, + SIGMOID: 12, + SIN: 13, + TANH: 14, + ABS_GRAD: 15, + ADD: 16, + FLOOR_DIV: 17, + MAX_: 18, + MIN_: 19, + MOD: 20, + MUL: 21, + POW: 22, + SIGMOID_GRAD: 23, + SUB: 24, + SWITCH_GT0: 25, + TANH_GRAD: 26, + TRUE_DIV: 27, + LOG_SUM_EXP: 28, + LT: 29, + LEQ: 30, + EQ: 31, + SHL: 32, + SHR: 33, + COND_LEQ_MOV: 34, + FUSE_MUL_ADD3: 35, + FUSE_MUL_ADD4: 36, + FUSE_ADD_RELU: 37, + FUSE_ADD_SIGMOID: 38, + FUSE_ADD_TANH: 39, + FAST_TANH: 40, + FAST_TANH_GRAD: 41, + ROUND: 42, + RMULH: 43, + ATAN2: 44, + ERF: 45, + ERFINV: 46, + ERFC: 47, + ERFCINV: 48, + H_SWISH: 49, + H_SWISH_GRAD: 50, + FUSE_ADD_H_SWISH: 51, + NOT: 52, + AND: 53, + OR: 54, + XOR: 55, + SILU: 56, + SILU_GRAD: 57, + GELU: 58, + GELU_GRAD: 59 +}; + +$root.mgb.serialization.fbs.param.ElemwiseMultiTypeMode = { + FUSE_MUL_ADD3_INT16x32x32x32: 0, + FUSE_MUL_ADD3_IXxF32xF32xI8: 1, + ROUND_SHR_SATURATE_IXxI8xI8: 2, + FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT16x16x16x8: 3, + FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT32x32x32x8: 4, + ROUND_SHR_SATURATE_IXxI8xI16: 5, + QADD: 6, + QFUSE_ADD_RELU: 7, + QMUL: 8, + QMIN: 9, + QMAX: 10, + QSUB: 11, + QTRUE_DIV: 12, + QFUSE_ADD_SIGMOID: 13, + QFUSE_ADD_TANH: 14, + QRELU: 15, + QABS: 16, + QSIGMOID: 17, + QEXP: 18, + QTANH: 19, + QFUSE_MUL_ADD3: 20, + QFAST_TANH: 21, + QNEGATE: 22, + QACOS: 23, + QASIN: 24, + QCEIL: 25, + QCOS: 26, + QEXPM1: 27, + QFLOOR: 28, + QLOG: 29, + QLOG1P: 30, + QSIN: 31, + QROUND: 32, + QERF: 33, + QERFINV: 34, + QERFC: 35, + QERFCINV: 36, + QABS_GRAD: 37, + QFLOOR_DIV: 38, + QMOD: 39, + QSIGMOID_GRAD: 40, + QSWITCH_GT0: 41, + QTANH_GRAD: 42, + QLT: 43, + QLEQ: 44, + QEQ: 45, + QPOW: 46, + QLOG_SUM_EXP: 47, + QFAST_TANH_GRAD: 48, + QATAN2: 49, + QCOND_LEQ_MOV: 50, + QH_SWISH: 51, + QFUSE_ADD_H_SWISH: 52, + QH_SWISH_GRAD: 53, + FUSE_MUL_ADD3_INT16xF32xF32xF32: 54, + MUL_INT16xF32xF32: 55, + FUSE_MUL_ADD3_UINT8xF32xF32xF32: 56 +}; + +$root.mgb.serialization.fbs.param.MatrixMulFormat = { + DEFAULT: 0, + MK4: 1, + MK8: 2, + MK4_DOT: 3 +}; + +$root.mgb.serialization.fbs.param.MatrixMulV0DataType = { + FLOAT: 0, + INT8x8x16: 1, + INT8x8x32: 2, + FLOAT_IO16xC32: 3, + QUINT8x8x32: 4, + QUINT4x4x32: 5 +}; + +$root.mgb.serialization.fbs.param.MatrixMulV1ComputeMode = { + DEFAULT: 0, + FLOAT32: 1 +}; + +$root.mgb.serialization.fbs.param.PaddingPaddingMode = { + REPLICATE: 0, + REFLECT: 1, + CONSTANT: 2 +}; + +$root.mgb.serialization.fbs.param.PoolingV0Mode = { + MAX_: 0, + AVERAGE: 1, + AVERAGE_COUNT_EXCLUDE_PADDING: 2 +}; + +$root.mgb.serialization.fbs.param.RNNCellNonlineMode = { + IDENTITY: 0, + RELU: 1, + TANH: 2 +}; + +$root.mgb.serialization.fbs.param.ROIAlignV0Mode = { + MAX_: 0, + AVERAGE: 1 +}; + +$root.mgb.serialization.fbs.param.ROIPoolingMode = { + MAX_: 0, + AVERAGE: 1 +}; + +$root.mgb.serialization.fbs.param.ReduceDataType = { + DEFAULT: 0, + FLOAT_IO16xC32: 1, + FLOAT_O32xC32: 2, + FLOAT_O16xC32: 3, + QUINT_I8xO32: 4, + QINT_I8xO32: 5 +}; + +$root.mgb.serialization.fbs.param.ReduceMode = { + SUM: 0, + SUM_SQR: 1, + PRODUCT: 2, + MIN_: 3, + MAX_: 4, + MEAN: 5 +}; + +$root.mgb.serialization.fbs.param.ReduceV0Mode = { + SUM: 0, + SUM_SQR: 1, + PRODUCT: 2, + MIN_: 3, + MAX_: 4 +}; + +$root.mgb.serialization.fbs.param.ReduceV1DataType = { + DEFAULT: 0, + FLOAT_IO16xC32: 1, + FLOAT_O32xC32: 2, + FLOAT_O16xC32: 3, + QUINT_I8xO32: 4, + QINT_I8xO32: 5 +}; + +$root.mgb.serialization.fbs.param.ReduceV1Mode = { + SUM: 0, + SUM_SQR: 1, + PRODUCT: 2, + MIN_: 3, + MAX_: 4, + MEAN: 5 +}; + +$root.mgb.serialization.fbs.param.RelayoutFormatV0Mode = { + NHWC_NHWCD4: 0, + NHWCD4_NHWC: 1, + NHWC_NHWCD4I: 2, + NCHW_NHWCD4: 3, + NCHW_NHWCD4I: 4, + NHWCD4I_NCHW: 5, + NHWCD4_NCHW: 6, + INTER_WEIGHT_DENSE: 7, + INTER_WEIGHT_DENSEI: 8, + INTER_WEIGHT_GROUP: 9, + INTER_WEIGHT_GROUPI: 10, + INTER_WEIGHT_CHAN: 11, + INTER_WEIGHT_CHANI: 12, + INTER_WEIGHT_DENSEI_DOT: 13, + INTER_WEIGHT_GROUPI_DOT: 14, + NCHW4_CHWN4: 15, + CHWN4_NCHW4: 16, + NCHW_NCHW88_CONV_DENSE_WEIGHT: 17, + NCHW_NCHW88_CONV_CHAN_WEIGHT: 18, + NCHW_NCHW88_CONV_GROUP_WEIGHT: 19, + NCHW_NCHW88: 20, + NCHW88_NCHW: 21, + NCHW_NCHW4_IC_SMALL: 22, + NCHW_NCHW4_IC_SMALL_CONV_DENSE_WEIGHT: 23, + NCHW_NCHW4: 24, + NCHW4_NCHW: 25, + NCHW_NCHW4_WEIGHT: 26, + NCHW_NCHW64: 27, + NCHW64_NCHW: 28, + NCHW_NHWC: 29, + NHWC_NCHW: 30, + NHWCD4I_NHWC: 31 +}; + +$root.mgb.serialization.fbs.param.SeparableConvBorderMode = { + BORDER_REPLICATE: 0, + BORDER_REFLECT: 1, + BORDER_REFLECT_101: 2, + BORDER_WRAP: 3, + BORDER_CONSTANT: 4, + BORDER_TRANSPARENT: 5, + BORDER_ISOLATED: 6 +}; + +$root.mgb.serialization.fbs.param.SeparableConv3DBorderMode = { + BORDER_REPLICATE: 0, + BORDER_REFLECT: 1, + BORDER_REFLECT_101: 2, + BORDER_WRAP: 3, + BORDER_CONSTANT: 4, + BORDER_TRANSPARENT: 5, + BORDER_ISOLATED: 6 +}; + +$root.mgb.serialization.fbs.param.SpatialTfGridGeneratorMode = { + AFFINE: 0 +}; + +$root.mgb.serialization.fbs.param.SpatialTfSamplerMode = { + BILINEAR: 0 +}; + +$root.mgb.serialization.fbs.param.TopKMode = { + KTH_ONLY: 0, + VALUE_IDX_NOSORT: 1, + VALUE_IDX_SORTED: 2 +}; + +$root.mgb.serialization.fbs.param.WarpPerspectiveV1BorderMode = { + REPLICATE: 0, + REFLECT: 1, + REFLECT_101: 2, + WRAP: 3, + CONSTANT: 4, + TRANSPARENT: 5, + ISOLATED: 6 +}; + +$root.mgb.serialization.fbs.param.WarpPerspectiveV1InterpolationMode = { + NEAREST: 0, + LINEAR: 1, + AREA: 2, + CUBIC: 3, + LANCZOS4: 4 +}; + +$root.mgb.serialization.fbs.param.Empty = class Empty { + + static decode(/* reader, position */) { + const $ = new $root.mgb.serialization.fbs.param.Empty(); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Axis = class Axis { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Axis(); + $.axis = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvolutionV0 = class ConvolutionV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvolutionV0(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 1); + $.stride_w = reader.uint32_(position, 12, 1); + $.dilate_h = reader.uint32_(position, 14, 1); + $.dilate_w = reader.uint32_(position, 16, 1); + $.data_type = reader.uint32_(position, 18, 0); + $.sparse = reader.uint32_(position, 20, 0); + $.format = reader.uint32_(position, 22, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvolutionV1 = class ConvolutionV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvolutionV1(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 1); + $.stride_w = reader.uint32_(position, 12, 1); + $.dilate_h = reader.uint32_(position, 14, 1); + $.dilate_w = reader.uint32_(position, 16, 1); + $.sparse = reader.uint32_(position, 18, 0); + $.format = reader.uint32_(position, 20, 0); + $.compute_mode = reader.uint32_(position, 22, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Convolution = class Convolution { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Convolution(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 1); + $.stride_w = reader.uint32_(position, 12, 1); + $.dilate_h = reader.uint32_(position, 14, 1); + $.dilate_w = reader.uint32_(position, 16, 1); + $.sparse = reader.uint32_(position, 18, 0); + $.format = reader.uint32_(position, 20, 0); + $.compute_mode = reader.uint32_(position, 22, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MaskPropagate = class MaskPropagate { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MaskPropagate(); + $.pad_h = reader.uint32_(position, 4, 0); + $.pad_w = reader.uint32_(position, 6, 0); + $.stride_h = reader.uint32_(position, 8, 1); + $.stride_w = reader.uint32_(position, 10, 1); + $.kernel_h = reader.uint32_(position, 12, 1); + $.kernel_w = reader.uint32_(position, 14, 1); + $.dilate_h = reader.uint32_(position, 16, 1); + $.dilate_w = reader.uint32_(position, 18, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvPooling = class ConvPooling { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvPooling(); + $.method = reader.uint32_(position, 4, 0); + $.convMode = reader.uint32_(position, 6, 0); + $.poolMode = reader.uint32_(position, 8, 0); + $.nonlineMode = reader.uint32_(position, 10, 0); + $.pool_shape_h = reader.uint32_(position, 12, 1); + $.pool_shape_w = reader.uint32_(position, 14, 1); + $.pool_stride_h = reader.uint32_(position, 16, 1); + $.pool_stride_w = reader.uint32_(position, 18, 1); + $.pool_pad_h = reader.uint32_(position, 20, 0); + $.pool_pad_w = reader.uint32_(position, 22, 0); + $.conv_stride_h = reader.uint32_(position, 24, 1); + $.conv_stride_w = reader.uint32_(position, 26, 1); + $.conv_pad_h = reader.uint32_(position, 28, 0); + $.conv_pad_w = reader.uint32_(position, 30, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvBiasV0 = class ConvBiasV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvBiasV0(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.pad_h = reader.uint32_(position, 8, 0); + $.pad_w = reader.uint32_(position, 10, 0); + $.stride_h = reader.uint32_(position, 12, 1); + $.stride_w = reader.uint32_(position, 14, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvBiasV1 = class ConvBiasV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvBiasV1(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.data_type = reader.uint32_(position, 8, 0); + $.sparse = reader.uint32_(position, 10, 0); + $.format = reader.uint32_(position, 12, 0); + $.pad_h = reader.uint32_(position, 14, 0); + $.pad_w = reader.uint32_(position, 16, 0); + $.stride_h = reader.uint32_(position, 18, 1); + $.stride_w = reader.uint32_(position, 20, 1); + $.dilate_h = reader.uint32_(position, 22, 1); + $.dilate_w = reader.uint32_(position, 24, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvBiasV2 = class ConvBiasV2 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvBiasV2(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.sparse = reader.uint32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 0); + $.pad_h = reader.uint32_(position, 12, 0); + $.pad_w = reader.uint32_(position, 14, 0); + $.stride_h = reader.uint32_(position, 16, 1); + $.stride_w = reader.uint32_(position, 18, 1); + $.dilate_h = reader.uint32_(position, 20, 1); + $.dilate_w = reader.uint32_(position, 22, 1); + $.compute_mode = reader.uint32_(position, 24, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvBiasV3 = class ConvBiasV3 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvBiasV3(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.sparse = reader.uint32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 0); + $.pad_h = reader.uint32_(position, 12, 0); + $.pad_w = reader.uint32_(position, 14, 0); + $.stride_h = reader.uint32_(position, 16, 1); + $.stride_w = reader.uint32_(position, 18, 1); + $.dilate_h = reader.uint32_(position, 20, 1); + $.dilate_w = reader.uint32_(position, 22, 1); + $.output_block_size = reader.uint32_(position, 24, 0); + $.compute_mode = reader.uint32_(position, 26, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ConvBias = class ConvBias { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ConvBias(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.sparse = reader.uint32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 0); + $.pad_h = reader.uint32_(position, 12, 0); + $.pad_w = reader.uint32_(position, 14, 0); + $.stride_h = reader.uint32_(position, 16, 1); + $.stride_w = reader.uint32_(position, 18, 1); + $.dilate_h = reader.uint32_(position, 20, 1); + $.dilate_w = reader.uint32_(position, 22, 1); + $.compute_mode = reader.uint32_(position, 24, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SeparableConv = class SeparableConv { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SeparableConv(); + $.mode = reader.uint32_(position, 4, 0); + $.borderMode = reader.uint32_(position, 6, 0); + $.is_symm_kernel = reader.bool_(position, 8, true); + $.pad_h = reader.uint32_(position, 10, 0); + $.pad_w = reader.uint32_(position, 12, 0); + $.stride_h = reader.uint32_(position, 14, 1); + $.stride_w = reader.uint32_(position, 16, 1); + $.ksize_h = reader.uint32_(position, 18, 3); + $.ksize_w = reader.uint32_(position, 20, 3); + $.anchor_h = reader.uint32_(position, 22, 1); + $.anchor_w = reader.uint32_(position, 24, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Images2Neibs = class Images2Neibs { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Images2Neibs(); + $.pad_h = reader.uint32_(position, 4, 0); + $.pad_w = reader.uint32_(position, 6, 0); + $.stride_h = reader.uint32_(position, 8, 1); + $.stride_w = reader.uint32_(position, 10, 1); + $.dilate_h = reader.uint32_(position, 12, 1); + $.dilate_w = reader.uint32_(position, 14, 1); + $.window_h = reader.uint32_(position, 16, 3); + $.window_w = reader.uint32_(position, 18, 3); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SlidingWindowTranspose = class SlidingWindowTranspose { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SlidingWindowTranspose(); + $.out_h = reader.uint32_(position, 4, 0); + $.out_w = reader.uint32_(position, 6, 0); + $.pad_h = reader.uint32_(position, 8, 0); + $.pad_w = reader.uint32_(position, 10, 0); + $.stride_h = reader.uint32_(position, 12, 1); + $.stride_w = reader.uint32_(position, 14, 1); + $.dilate_h = reader.uint32_(position, 16, 1); + $.dilate_w = reader.uint32_(position, 18, 1); + $.window_h = reader.uint32_(position, 20, 3); + $.window_w = reader.uint32_(position, 22, 3); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PoolingV0 = class PoolingV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PoolingV0(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 2); + $.stride_w = reader.uint32_(position, 12, 2); + $.window_h = reader.uint32_(position, 14, 2); + $.window_w = reader.uint32_(position, 16, 2); + $.format = reader.uint32_(position, 18, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Pooling = class Pooling { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Pooling(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 2); + $.stride_w = reader.uint32_(position, 12, 2); + $.window_h = reader.uint32_(position, 14, 2); + $.window_w = reader.uint32_(position, 16, 2); + $.format = reader.uint32_(position, 18, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Softmax = class Softmax { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Softmax(); + $.axis = reader.int32_(position, 4, -1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AdaptivePoolingV0 = class AdaptivePoolingV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AdaptivePoolingV0(); + $.mode = reader.uint32_(position, 4, 0); + $.format = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AdaptivePooling = class AdaptivePooling { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AdaptivePooling(); + $.mode = reader.uint32_(position, 4, 0); + $.format = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LRN = class LRN { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LRN(); + $.n = reader.uint32_(position, 4, 5); + $.k = reader.float32_(position, 6, 2); + $.alpha = reader.float32_(position, 8, 0.0001); + $.beta = reader.float32_(position, 10, 0.75); + return $; + } +}; + +$root.mgb.serialization.fbs.param.BN = class BN { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.BN(); + $.param_dim = reader.uint32_(position, 4, 0); + $.fwd_mode = reader.uint32_(position, 6, 0); + $.epsilon = reader.float64_(position, 8, 0.0001); + $.avg_factor = reader.float64_(position, 10, 1); + $.scale = reader.float32_(position, 12, 1); + $.bias = reader.float32_(position, 14, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ROIPooling = class ROIPooling { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ROIPooling(); + $.mode = reader.uint32_(position, 4, 0); + $.scale = reader.float32_(position, 6, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.WarpPerspectiveV1 = class WarpPerspectiveV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.WarpPerspectiveV1(); + $.imode = reader.uint32_(position, 4, 1); + $.bmode = reader.uint32_(position, 6, 0); + $.format = reader.uint32_(position, 8, 0); + $.border_val = reader.float32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.WarpPerspective = class WarpPerspective { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.WarpPerspective(); + $.imode = reader.uint32_(position, 4, 1); + $.bmode = reader.uint32_(position, 6, 0); + $.format = reader.uint32_(position, 8, 0); + $.border_val = reader.float32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SpatialTfGridGenerator = class SpatialTfGridGenerator { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SpatialTfGridGenerator(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SpatialTfSampler = class SpatialTfSampler { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SpatialTfSampler(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AddUpdate = class AddUpdate { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AddUpdate(); + $.alpha = reader.float32_(position, 4, 1); + $.beta = reader.float32_(position, 6, 1); + $.bias = reader.float32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Elemwise = class Elemwise { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Elemwise(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ElemwiseMultiType = class ElemwiseMultiType { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ElemwiseMultiType(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PowC = class PowC { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PowC(); + $.exp = reader.float32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.DctChannelSelectV0 = class DctChannelSelectV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.DctChannelSelectV0(); + $.format = reader.uint32_(position, 4, 0); + $.fastImpl = reader.uint32_(position, 6, 0); + $.dct_block_size = reader.int32_(position, 8, 8); + return $; + } +}; + +$root.mgb.serialization.fbs.param.DctChannelSelect = class DctChannelSelect { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.DctChannelSelect(); + $.format = reader.uint32_(position, 4, 0); + $.fastImpl = reader.uint32_(position, 6, 0); + $.dct_block_size = reader.int32_(position, 8, 8); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MatrixMulV0 = class MatrixMulV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MatrixMulV0(); + $.transposeA = reader.bool_(position, 4, false); + $.transposeB = reader.bool_(position, 6, false); + $.data_type = reader.uint32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MatrixMulV1 = class MatrixMulV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MatrixMulV1(); + $.transposeA = reader.bool_(position, 4, false); + $.transposeB = reader.bool_(position, 6, false); + $.compute_mode = reader.uint32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MatrixMul = class MatrixMul { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MatrixMul(); + $.transposeA = reader.bool_(position, 4, false); + $.transposeB = reader.bool_(position, 6, false); + $.compute_mode = reader.uint32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SVD = class SVD { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SVD(); + $.full_matrices = reader.bool_(position, 4, false); + $.compute_uv = reader.bool_(position, 6, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ReduceV0 = class ReduceV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ReduceV0(); + $.mode = reader.uint32_(position, 4, 0); + $.axis = reader.int32_(position, 6, -1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ReduceV1 = class ReduceV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ReduceV1(); + $.mode = reader.uint32_(position, 4, 0); + $.axis = reader.int32_(position, 6, -1); + $.data_type = reader.uint32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Reduce = class Reduce { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Reduce(); + $.mode = reader.uint32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 2147483647); + $.data_type = reader.uint32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CumsumV0 = class CumsumV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CumsumV0(); + $.axis = reader.int32_(position, 4, -1); + $.exclusive = reader.bool_(position, 6, true); + $.reverse = reader.bool_(position, 8, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Cumsum = class Cumsum { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Cumsum(); + $.axis = reader.int32_(position, 4, 2147483647); + $.exclusive = reader.bool_(position, 6, true); + $.reverse = reader.bool_(position, 8, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CondTake = class CondTake { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CondTake(); + $.mode = reader.uint32_(position, 4, 0); + $.val = reader.float32_(position, 6, 0); + $.eps = reader.float32_(position, 8, 0.000001); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Argsort = class Argsort { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Argsort(); + $.order = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.IndexingRemap = class IndexingRemap { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.IndexingRemap(); + $.is_non_overlapping = reader.bool_(position, 4, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Sleep = class Sleep { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Sleep(); + $.time = reader.float32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Linspace = class Linspace { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Linspace(); + $.endpoint = reader.bool_(position, 4, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LinspaceFull = class LinspaceFull { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LinspaceFull(); + $.start = reader.float64_(position, 4, 0); + $.stop = reader.float64_(position, 6, 1); + $.endpoint = reader.bool_(position, 8, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Eye = class Eye { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Eye(); + $.k = reader.int32_(position, 4, 0); + $.dtype = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Diag = class Diag { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Diag(); + $.k = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.UniformRNGV0 = class UniformRNGV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.UniformRNGV0(); + $.seed = reader.uint64_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.UniformRNG = class UniformRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.UniformRNG(); + $.seed = reader.uint64_(position, 4, 0); + $.dtype = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.GaussianRNGV0 = class GaussianRNGV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.GaussianRNGV0(); + $.seed = reader.uint64_(position, 4, 0); + $.mean = reader.float32_(position, 6, 0); + $.std = reader.float32_(position, 8, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.GaussianRNG = class GaussianRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.GaussianRNG(); + $.seed = reader.uint64_(position, 4, 0); + $.mean = reader.float32_(position, 6, 0); + $.std = reader.float32_(position, 8, 1); + $.dtype = reader.int8_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.GammaRNG = class GammaRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.GammaRNG(); + $.seed = reader.uint64_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.BetaRNG = class BetaRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.BetaRNG(); + $.seed = reader.uint64_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PoissonRNG = class PoissonRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PoissonRNG(); + $.seed = reader.uint64_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MultinomialRNG = class MultinomialRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MultinomialRNG(); + $.seed = reader.uint64_(position, 4, 0); + $.num_samples = reader.uint64_(position, 6, 1); + $.replacement = reader.bool_(position, 8, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PermutationRNG = class PermutationRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PermutationRNG(); + $.seed = reader.uint64_(position, 4, 0); + $.dtype = reader.int8_(position, 6, 4); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ShuffleRNG = class ShuffleRNG { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ShuffleRNG(); + $.seed = reader.uint64_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Flip = class Flip { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Flip(); + $.vertical = reader.bool_(position, 4, false); + $.horizontal = reader.bool_(position, 6, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Rotate = class Rotate { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Rotate(); + $.clockwise = reader.bool_(position, 4, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ROICopy = class ROICopy { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ROICopy(); + $.row_from = reader.uint32_(position, 4, 0); + $.row_to = reader.uint32_(position, 6, 0); + $.col_from = reader.uint32_(position, 8, 0); + $.col_to = reader.uint32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CvtColor = class CvtColor { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CvtColor(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.WarpAffineV0 = class WarpAffineV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.WarpAffineV0(); + $.imode = reader.uint32_(position, 4, 1); + $.border_mode = reader.uint32_(position, 6, 0); + $.border_val = reader.float32_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.WarpAffineV1 = class WarpAffineV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.WarpAffineV1(); + $.imode = reader.uint32_(position, 4, 1); + $.border_mode = reader.uint32_(position, 6, 0); + $.border_val = reader.float32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.WarpAffine = class WarpAffine { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.WarpAffine(); + $.imode = reader.uint32_(position, 4, 1); + $.border_mode = reader.uint32_(position, 6, 0); + $.border_val = reader.float32_(position, 8, 0); + $.format = reader.uint32_(position, 10, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.GaussianBlur = class GaussianBlur { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.GaussianBlur(); + $.border_mode = reader.uint32_(position, 4, 0); + $.kernel_height = reader.uint32_(position, 6, 0); + $.kernel_width = reader.uint32_(position, 8, 0); + $.sigma_x = reader.float32_(position, 10, 0); + $.sigma_y = reader.float32_(position, 12, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ResizeV0 = class ResizeV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ResizeV0(); + $.imode = reader.uint32_(position, 4, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ResizeV1 = class ResizeV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ResizeV1(); + $.imode = reader.uint32_(position, 4, 1); + $.format = reader.uint32_(position, 6, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Resize = class Resize { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Resize(); + $.imode = reader.uint32_(position, 4, 1); + $.format = reader.uint32_(position, 6, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.RemapV0 = class RemapV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.RemapV0(); + $.imode = reader.uint32_(position, 4, 1); + $.border_type = reader.uint32_(position, 6, 0); + $.format = reader.uint32_(position, 8, 1); + $.scalar = reader.float32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Remap = class Remap { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Remap(); + $.imode = reader.uint32_(position, 4, 1); + $.border_type = reader.uint32_(position, 6, 0); + $.format = reader.uint32_(position, 8, 1); + $.scalar = reader.float32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Convolution3D = class Convolution3D { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Convolution3D(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_d = reader.uint32_(position, 6, 0); + $.pad_h = reader.uint32_(position, 8, 0); + $.pad_w = reader.uint32_(position, 10, 0); + $.stride_d = reader.uint32_(position, 12, 1); + $.stride_h = reader.uint32_(position, 14, 1); + $.stride_w = reader.uint32_(position, 16, 1); + $.dilate_d = reader.uint32_(position, 18, 1); + $.dilate_h = reader.uint32_(position, 20, 1); + $.dilate_w = reader.uint32_(position, 22, 1); + $.sparse = reader.uint32_(position, 24, 0); + $.data_type = reader.uint32_(position, 26, 0); + $.format = reader.uint32_(position, 28, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Conv3DBias = class Conv3DBias { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Conv3DBias(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.pad_d = reader.uint32_(position, 8, 0); + $.pad_h = reader.uint32_(position, 10, 0); + $.pad_w = reader.uint32_(position, 12, 0); + $.stride_d = reader.uint32_(position, 14, 1); + $.stride_h = reader.uint32_(position, 16, 1); + $.stride_w = reader.uint32_(position, 18, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SeparableConv3D = class SeparableConv3D { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SeparableConv3D(); + $.mode = reader.uint32_(position, 4, 0); + $.borderMode = reader.uint32_(position, 6, 0); + $.is_symm_kernel = reader.bool_(position, 8, true); + $.pad_d = reader.uint32_(position, 10, 0); + $.pad_h = reader.uint32_(position, 12, 0); + $.pad_w = reader.uint32_(position, 14, 0); + $.stride_d = reader.uint32_(position, 16, 0); + $.stride_h = reader.uint32_(position, 18, 1); + $.stride_w = reader.uint32_(position, 20, 1); + $.ksize_d = reader.uint32_(position, 22, 0); + $.ksize_h = reader.uint32_(position, 24, 3); + $.ksize_w = reader.uint32_(position, 26, 3); + $.anchor_d = reader.uint32_(position, 28, 0); + $.anchor_h = reader.uint32_(position, 30, 1); + $.anchor_w = reader.uint32_(position, 32, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.TopK = class TopK { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.TopK(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.RelayoutFormatV0 = class RelayoutFormatV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.RelayoutFormatV0(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.RelayoutFormat = class RelayoutFormat { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.RelayoutFormat(); + $.mode = reader.uint32_(position, 4, 0); + $.oc = reader.uint32_(position, 6, 0); + $.group = reader.uint32_(position, 8, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SeparableFilterV0 = class SeparableFilterV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SeparableFilterV0(); + $.format = reader.uint32_(position, 4, 0); + $.borderMode = reader.uint32_(position, 6, 0); + $.is_symm_kernel = reader.bool_(position, 8, true); + $.ksize_h = reader.uint32_(position, 10, 3); + $.ksize_w = reader.uint32_(position, 12, 3); + $.anchor_h = reader.uint32_(position, 14, 1); + $.anchor_w = reader.uint32_(position, 16, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.SeparableFilter = class SeparableFilter { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.SeparableFilter(); + $.format = reader.uint32_(position, 4, 0); + $.borderMode = reader.uint32_(position, 6, 0); + $.is_symm_kernel = reader.bool_(position, 8, true); + $.ksize_h = reader.uint32_(position, 10, 3); + $.ksize_w = reader.uint32_(position, 12, 3); + $.anchor_h = reader.uint32_(position, 14, 1); + $.anchor_w = reader.uint32_(position, 16, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LocalShareV0 = class LocalShareV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LocalShareV0(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 1); + $.stride_w = reader.uint32_(position, 12, 1); + $.dilate_h = reader.uint32_(position, 14, 1); + $.dilate_w = reader.uint32_(position, 16, 1); + $.spatial_groups_h = reader.uint32_(position, 18, 1); + $.spatial_groups_w = reader.uint32_(position, 20, 1); + $.sparse = reader.uint32_(position, 22, 0); + $.format = reader.uint32_(position, 24, 0); + $.computeMode = reader.uint32_(position, 26, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LocalShare = class LocalShare { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LocalShare(); + $.mode = reader.uint32_(position, 4, 0); + $.pad_h = reader.uint32_(position, 6, 0); + $.pad_w = reader.uint32_(position, 8, 0); + $.stride_h = reader.uint32_(position, 10, 1); + $.stride_w = reader.uint32_(position, 12, 1); + $.dilate_h = reader.uint32_(position, 14, 1); + $.dilate_w = reader.uint32_(position, 16, 1); + $.spatial_groups_h = reader.uint32_(position, 18, 1); + $.spatial_groups_w = reader.uint32_(position, 20, 1); + $.sparse = reader.uint32_(position, 22, 0); + $.format = reader.uint32_(position, 24, 0); + $.computeMode = reader.uint32_(position, 26, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ROIAlignV0 = class ROIAlignV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ROIAlignV0(); + $.mode = reader.uint32_(position, 4, 0); + $.format = reader.uint32_(position, 6, 0); + $.spatial_scale = reader.float32_(position, 8, 1); + $.offset = reader.float32_(position, 10, 0); + $.pooled_height = reader.uint32_(position, 12, 1); + $.pooled_width = reader.uint32_(position, 14, 1); + $.sample_height = reader.uint32_(position, 16, 2); + $.sample_width = reader.uint32_(position, 18, 2); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ROIAlign = class ROIAlign { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ROIAlign(); + $.mode = reader.uint32_(position, 4, 0); + $.format = reader.uint32_(position, 6, 0); + $.spatial_scale = reader.float32_(position, 8, 1); + $.offset = reader.float32_(position, 10, 0); + $.pooled_height = reader.uint32_(position, 12, 1); + $.pooled_width = reader.uint32_(position, 14, 1); + $.sample_height = reader.uint32_(position, 16, 2); + $.sample_width = reader.uint32_(position, 18, 2); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Correlation = class Correlation { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Correlation(); + $.format = reader.uint32_(position, 4, 0); + $.kernel_size = reader.uint32_(position, 6, 1); + $.max_displacement = reader.uint32_(position, 8, 1); + $.stride1 = reader.uint32_(position, 10, 1); + $.stride2 = reader.uint32_(position, 12, 1); + $.pad_size = reader.uint32_(position, 14, 0); + $.is_multiply = reader.bool_(position, 16, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.DeformablePSROIPooling = class DeformablePSROIPooling { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.DeformablePSROIPooling(); + $.no_trans = reader.bool_(position, 4, true); + $.spatial_scale = reader.float32_(position, 6, 1); + $.trans_std = reader.float32_(position, 8, 1); + $.pooled_h = reader.uint32_(position, 10, 1); + $.pooled_w = reader.uint32_(position, 12, 1); + $.part_size = reader.uint32_(position, 14, 1); + $.sample_per_part = reader.uint32_(position, 16, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.BatchConvBiasV0 = class BatchConvBiasV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.BatchConvBiasV0(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.pad_h = reader.uint32_(position, 8, 0); + $.pad_w = reader.uint32_(position, 10, 0); + $.stride_h = reader.uint32_(position, 12, 1); + $.stride_w = reader.uint32_(position, 14, 1); + $.dilate_h = reader.uint32_(position, 16, 1); + $.dilate_w = reader.uint32_(position, 18, 1); + $.sparse = reader.uint32_(position, 20, 0); + $.format = reader.uint32_(position, 22, 0); + $.compute_mode = reader.uint32_(position, 24, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.BatchConvBias = class BatchConvBias { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.BatchConvBias(); + $.nonlineMode = reader.uint32_(position, 4, 0); + $.mode = reader.uint32_(position, 6, 0); + $.pad_h = reader.uint32_(position, 8, 0); + $.pad_w = reader.uint32_(position, 10, 0); + $.stride_h = reader.uint32_(position, 12, 1); + $.stride_w = reader.uint32_(position, 14, 1); + $.dilate_h = reader.uint32_(position, 16, 1); + $.dilate_w = reader.uint32_(position, 18, 1); + $.sparse = reader.uint32_(position, 20, 0); + $.format = reader.uint32_(position, 22, 0); + $.compute_mode = reader.uint32_(position, 24, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.FakeQuant = class FakeQuant { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.FakeQuant(); + $.qmin = reader.int32_(position, 4, -2147483648); + $.qmax = reader.int32_(position, 6, 2147483647); + return $; + } +}; + +$root.mgb.serialization.fbs.param.TQT = class TQT { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.TQT(); + $.qmin = reader.int32_(position, 4, -2147483648); + $.qmax = reader.int32_(position, 6, 2147483647); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LSQ = class LSQ { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LSQ(); + $.qmin = reader.int32_(position, 4, -2147483648); + $.qmax = reader.int32_(position, 6, 2147483647); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Fill = class Fill { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Fill(); + $.value = reader.float32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CheckNonFinite = class CheckNonFinite { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CheckNonFinite(); + $.scale = reader.float32_(position, 4, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Padding = class Padding { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Padding(); + $.front_offset_dim0 = reader.uint32_(position, 4, 0); + $.front_offset_dim1 = reader.uint32_(position, 6, 0); + $.front_offset_dim2 = reader.uint32_(position, 8, 0); + $.front_offset_dim3 = reader.uint32_(position, 10, 0); + $.front_offset_dim4 = reader.uint32_(position, 12, 0); + $.front_offset_dim5 = reader.uint32_(position, 14, 0); + $.front_offset_dim6 = reader.uint32_(position, 16, 0); + $.back_offset_dim0 = reader.uint32_(position, 18, 0); + $.back_offset_dim1 = reader.uint32_(position, 20, 0); + $.back_offset_dim2 = reader.uint32_(position, 22, 0); + $.back_offset_dim3 = reader.uint32_(position, 24, 0); + $.back_offset_dim4 = reader.uint32_(position, 26, 0); + $.back_offset_dim5 = reader.uint32_(position, 28, 0); + $.back_offset_dim6 = reader.uint32_(position, 30, 0); + $.padding_val = reader.float32_(position, 32, 0); + $.padding_mode = reader.uint32_(position, 34, 2); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LayerNorm = class LayerNorm { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LayerNorm(); + $.affine = reader.bool_(position, 4, true); + $.eps = reader.float32_(position, 6, 0.00001); + $.normalized_dim = reader.uint64_(position, 8, 1); + $.normalized_size = reader.uint64_(position, 10, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.GroupNorm = class GroupNorm { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.GroupNorm(); + $.affine = reader.bool_(position, 4, true); + $.eps = reader.float32_(position, 6, 0.00001); + $.group = reader.uint32_(position, 8, 1); + $.format = reader.uint32_(position, 10, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Dropout = class Dropout { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Dropout(); + $.drop_prob = reader.float32_(position, 4, 0); + $.seed = reader.uint64_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.RNNCell = class RNNCell { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.RNNCell(); + $.nonlineMode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.RNN = class RNN { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.RNN(); + $.num_layers = reader.uint32_(position, 4, 1); + $.bidirectional = reader.bool_(position, 6, false); + $.bias = reader.bool_(position, 8, true); + $.hidden_size = reader.uint32_(position, 10, 128); + $.dropout = reader.float32_(position, 12, 0); + $.nonlineMode = reader.uint32_(position, 14, 0); + $.fwd_mode = reader.uint32_(position, 16, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.LSTM = class LSTM { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.LSTM(); + $.num_layers = reader.uint32_(position, 4, 1); + $.bidirectional = reader.bool_(position, 6, false); + $.bias = reader.bool_(position, 8, true); + $.hidden_size = reader.uint32_(position, 10, 128); + $.proj_size = reader.uint32_(position, 12, 0); + $.dropout = reader.float32_(position, 14, 0); + $.fwd_mode = reader.uint32_(position, 16, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CollectiveCommMode = { + REDUCE_SUM: 0, + BROADCAST: 1, + ALL_GATHER: 2, + REDUCE_SCATTER_SUM: 3, + ALL_REDUCE_SUM: 4, + ALL_REDUCE_MAX: 5, + ALL_REDUCE_MIN: 6, + ALL_REDUCE_PROD: 7, + GATHER: 8, + SCATTER: 9, + ALL_TO_ALL: 10 +}; + +$root.mgb.serialization.fbs.param.CondExecMarkGradMode = { + SUM: 0, + SUM_COND_OUT: 1 +}; + +$root.mgb.serialization.fbs.param.CondExecMarkStaticInfer = { + SHAPE_VALUE: 0, + SHAPE_ONLY: 1, + NONE: 2 +}; + +$root.mgb.serialization.fbs.param.CondExecMergeMode = { + EXACT_ONE: 0, + EXACT_ONE_SAME_SHAPE: 1, + SUM: 2, + SUM_COND_OUT: 3 +}; + +$root.mgb.serialization.fbs.param.CondExecPredMode = { + CASE: 0, + CASE_FALLBACK: 1, + PIECEWISE: 2 +}; + +$root.mgb.serialization.fbs.param.CondExecPredLogicalMode = { + OR: 0, + AND: 1, + XOR: 2, + NOR: 3, + NAND: 4, + XNOR: 5 +}; + +$root.mgb.serialization.fbs.param.ExecutionPolicyStrategy = { + HEURISTIC: 0, + PROFILE: 1, + REPRODUCIBLE: 2, + OPTIMIZED: 3 +}; + +$root.mgb.serialization.fbs.param.ExecutionPolicyV0Strategy = { + HEURISTIC: 0, + HEURISTIC_REPRODUCIBLE: 1, + PROFILE: 2, + PROFILE_REPRODUCIBLE: 3, + PROFILE_HEURISTIC: 4 +}; + +$root.mgb.serialization.fbs.param.DType = class DType { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.DType(); + $.dtype = reader.int8_(position, 4, 8); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PersistentOutputStorage = class PersistentOutputStorage { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PersistentOutputStorage(); + $.share_key = reader.int32_(position, 4, -1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.OptionalAxis = class OptionalAxis { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.OptionalAxis(); + $.axis = reader.int32_(position, 4, -1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.OptionalAxisV1 = class OptionalAxisV1 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.OptionalAxisV1(); + $.axis = reader.int32_(position, 4, 7); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ExecutionPolicyV0 = class ExecutionPolicyV0 { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ExecutionPolicyV0(); + $.strategy = reader.uint32_(position, 4, 0); + $.workspace_limit = reader.uint64_(position, 6, 18446744073709552000); + return $; + } +}; + +$root.mgb.serialization.fbs.param.ExecutionPolicy = class ExecutionPolicy { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.ExecutionPolicy(); + $.strategy = reader.uint32_(position, 4, 1); + $.workspace_limit = reader.uint64_(position, 6, 18446744073709552000); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AssertEqual = class AssertEqual { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AssertEqual(); + $.maxerr = reader.float32_(position, 4, 0.0001); + $.verbose = reader.bool_(position, 6, false); + return $; + } +}; + +$root.mgb.serialization.fbs.param.FpgaConv = class FpgaConv { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.FpgaConv(); + $.need_output_quantize = reader.bool_(position, 4, false); + $.need_output_threshold = reader.bool_(position, 6, false); + $.stride = reader.int32_(position, 8, 1); + $.input_bit_width = reader.int32_(position, 10, 2); + $.output_bit_width = reader.int32_(position, 12, 2); + $.weight_bit_width = reader.int32_(position, 14, 2); + $.thres0 = reader.int32_(position, 16, 0); + $.thres1 = reader.int32_(position, 18, 1); + $.unpool_size = reader.uint32_(position, 20, 4); + $.direct_size = reader.uint32_(position, 22, 4); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CollectiveComm = class CollectiveComm { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CollectiveComm(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.FakeSerializedDType = class FakeSerializedDType { + + static decode(/* reader, position */) { + const $ = new $root.mgb.serialization.fbs.param.FakeSerializedDType(); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CondExecPred = class CondExecPred { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CondExecPred(); + $.mode = reader.uint32_(position, 4, 0); + $.eps = reader.float32_(position, 6, 0.0001); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CondExecPredLogical = class CondExecPredLogical { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CondExecPredLogical(); + $.mode = reader.uint32_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CondExecMark = class CondExecMark { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CondExecMark(); + $.grad_mode = reader.uint32_(position, 4, 0); + $.static_infer = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.CondExecMerge = class CondExecMerge { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.CondExecMerge(); + $.nr_output = reader.uint32_(position, 4, 1); + $.mode = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.NvOf = class NvOf { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.NvOf(); + $.precision = reader.uint32_(position, 4, 1); + return $; + } +}; + +$root.mgb.serialization.fbs.param.PersistentDTypeScalar = class PersistentDTypeScalar { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.PersistentDTypeScalar(); + $.dtype = reader.int8(position + 0); + $.storage = undefined; // not implemented + return $; + } +}; + +$root.mgb.serialization.fbs.param.MGBAddUpdate = class MGBAddUpdate { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MGBAddUpdate(); + $.alpha = reader.struct(position, 4, $root.mgb.serialization.fbs.param.PersistentDTypeScalar.decode); + $.beta = reader.struct(position, 6, $root.mgb.serialization.fbs.param.PersistentDTypeScalar.decode); + $.bias = reader.struct(position, 8, $root.mgb.serialization.fbs.param.PersistentDTypeScalar.decode); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Host2DeviceCopy = class Host2DeviceCopy { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Host2DeviceCopy(); + $.enable_value_infer = reader.bool_(position, 4, true); + $.dump_default_value = reader.bool_(position, 6, false); + $.allow_cpu_mem_fwd = reader.bool_(position, 8, true); + return $; + } +}; + +$root.mgb.serialization.fbs.param.Dimshuffle = class Dimshuffle { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.Dimshuffle(); + $.pattern = reader.typedArray(position, 4, Int32Array); + $.ndim = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AxisDescMethod = { + ADD_1: 0, + REMOVE: 1 +}; + +$root.mgb.serialization.fbs.param.AxisDesc = class AxisDesc { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AxisDesc(); + $.method = reader.int8(position + 0); + $.axis = reader.int32(position + 4); + return $; + } +}; + +$root.mgb.serialization.fbs.param.AxisAddRemove = class AxisAddRemove { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.AxisAddRemove(); + $.desc = reader.structArray(position, 4, $root.mgb.serialization.fbs.param.AxisDesc.decode); + return $; + } +}; + +$root.mgb.serialization.fbs.param.MGBSleep = class MGBSleep { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.MGBSleep(); + $.device = reader.bool_(position, 4, true); + $.host = reader.bool_(position, 6, false); + $.seconds = reader.float64_(position, 8, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.param.IndexDescMaskItem = class IndexDescMaskItem { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.IndexDescMaskItem(); + $.axis = reader.int8(position + 0); + $.begin = reader.bool(position + 1); + $.end = reader.bool(position + 2); + $.step = reader.bool(position + 3); + $.idx = reader.bool(position + 4); + return $; + } +}; + +$root.mgb.serialization.fbs.param.IndexDescMaskDump = class IndexDescMaskDump { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.IndexDescMaskDump(); + $.items = reader.structArray(position, 4, $root.mgb.serialization.fbs.param.IndexDescMaskItem.decode); + return $; + } +}; + +$root.mgb.serialization.fbs.param.NMSKeep = class NMSKeep { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.param.NMSKeep(); + $.iou_thresh = reader.float32_(position, 4, 0); + $.max_output = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb = $root.mgb || {}; + +$root.mgb.serialization = $root.mgb.serialization || {}; + +$root.mgb.serialization.fbs = $root.mgb.serialization.fbs || {}; + +$root.mgb.serialization.fbs.v2 = $root.mgb.serialization.fbs.v2 || {}; + +$root.mgb.serialization.fbs.v2.CompNode = class CompNode { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.CompNode(); + $.logical_locator = reader.string_(position, 4, null); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.DefaultTensorFormat = class DefaultTensorFormat { + + static decode(/* reader, position */) { + const $ = new $root.mgb.serialization.fbs.v2.DefaultTensorFormat(); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.Image2DPackedTensorFormat = class Image2DPackedTensorFormat { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Image2DPackedTensorFormat(); + $.align_axis = reader.uint8_(position, 4, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.LowbitsAlignedTensorFormat = class LowbitsAlignedTensorFormat { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.LowbitsAlignedTensorFormat(); + $.size_nbits = reader.uint8_(position, 4, 0); + $.align_size_in_bits = reader.uint8_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.TensorFormat = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.mgb.serialization.fbs.v2.DefaultTensorFormat.decode(reader, position); + case 2: return $root.mgb.serialization.fbs.v2.Image2DPackedTensorFormat.decode(reader, position); + case 3: return $root.mgb.serialization.fbs.v2.LowbitsAlignedTensorFormat.decode(reader, position); + default: return undefined; + } + } +}; + +$root.mgb.serialization.fbs.v2.Blob = class Blob { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Blob(); + $.data = reader.typedArray(position, 4, Uint8Array); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Tensor(); + $.name = reader.string_(position, 4, null); + $.shape = reader.typedArray(position, 6, Uint32Array); + $.comp_node = reader.table(position, 8, $root.mgb.serialization.fbs.v2.CompNode.decode); + $.dtype = reader.table(position, 10, $root.mgb.serialization.fbs.DType.decode); + $.format = reader.union(position, 12, $root.mgb.serialization.fbs.v2.TensorFormat.decode); + $.data = reader.typedArray(position, 16, Uint8Array); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.Reserved0 = class Reserved0 { + + static decode(/* reader, position */) { + const $ = new $root.mgb.serialization.fbs.v2.Reserved0(); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.DeprecatedParam = class DeprecatedParam { + + static decode(/* reader, position */) { + const $ = new $root.mgb.serialization.fbs.v2.DeprecatedParam(); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.OperatorParam = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.mgb.serialization.fbs.param.Empty.decode(reader, position); + case 2: return $root.mgb.serialization.fbs.param.Axis.decode(reader, position); + case 3: return $root.mgb.serialization.fbs.param.Convolution.decode(reader, position); + case 4: return $root.mgb.serialization.fbs.param.MaskPropagate.decode(reader, position); + case 5: return $root.mgb.serialization.fbs.param.ConvPooling.decode(reader, position); + case 6: return $root.mgb.serialization.fbs.param.ConvBias.decode(reader, position); + case 7: return $root.mgb.serialization.fbs.param.SeparableConv.decode(reader, position); + case 8: return $root.mgb.serialization.fbs.param.Images2Neibs.decode(reader, position); + case 9: return $root.mgb.serialization.fbs.param.Pooling.decode(reader, position); + case 10: return $root.mgb.serialization.fbs.param.LRN.decode(reader, position); + case 11: return $root.mgb.serialization.fbs.param.BN.decode(reader, position); + case 12: return $root.mgb.serialization.fbs.param.ROIPooling.decode(reader, position); + case 13: return $root.mgb.serialization.fbs.param.WarpPerspective.decode(reader, position); + case 14: return $root.mgb.serialization.fbs.param.SpatialTfGridGenerator.decode(reader, position); + case 15: return $root.mgb.serialization.fbs.param.SpatialTfSampler.decode(reader, position); + case 16: return $root.mgb.serialization.fbs.param.MGBAddUpdate.decode(reader, position); + case 17: return $root.mgb.serialization.fbs.param.Elemwise.decode(reader, position); + case 18: return $root.mgb.serialization.fbs.param.ElemwiseMultiType.decode(reader, position); + case 19: return $root.mgb.serialization.fbs.param.PowC.decode(reader, position); + case 20: return $root.mgb.serialization.fbs.param.MatrixMul.decode(reader, position); + case 21: return $root.mgb.serialization.fbs.v2.DeprecatedParam.decode(reader, position); + case 22: return $root.mgb.serialization.fbs.param.SVD.decode(reader, position); + case 23: return $root.mgb.serialization.fbs.param.Reduce.decode(reader, position); + case 24: return $root.mgb.serialization.fbs.param.Cumsum.decode(reader, position); + case 25: return $root.mgb.serialization.fbs.param.CondTake.decode(reader, position); + case 26: return $root.mgb.serialization.fbs.param.Argsort.decode(reader, position); + case 27: return $root.mgb.serialization.fbs.param.IndexingRemap.decode(reader, position); + case 28: return $root.mgb.serialization.fbs.param.MGBSleep.decode(reader, position); + case 29: return $root.mgb.serialization.fbs.param.Linspace.decode(reader, position); + case 30: return $root.mgb.serialization.fbs.param.LinspaceFull.decode(reader, position); + case 31: return $root.mgb.serialization.fbs.param.Eye.decode(reader, position); + case 32: return $root.mgb.serialization.fbs.param.UniformRNG.decode(reader, position); + case 33: return $root.mgb.serialization.fbs.param.GaussianRNG.decode(reader, position); + case 34: return $root.mgb.serialization.fbs.param.Flip.decode(reader, position); + case 35: return $root.mgb.serialization.fbs.param.Rotate.decode(reader, position); + case 36: return $root.mgb.serialization.fbs.param.ROICopy.decode(reader, position); + case 37: return $root.mgb.serialization.fbs.param.CvtColor.decode(reader, position); + case 38: return $root.mgb.serialization.fbs.param.WarpAffine.decode(reader, position); + case 39: return $root.mgb.serialization.fbs.param.GaussianBlur.decode(reader, position); + case 40: return $root.mgb.serialization.fbs.param.Resize.decode(reader, position); + case 41: return $root.mgb.serialization.fbs.param.Convolution3D.decode(reader, position); + case 42: return $root.mgb.serialization.fbs.param.Conv3DBias.decode(reader, position); + case 43: return $root.mgb.serialization.fbs.param.SeparableConv3D.decode(reader, position); + case 44: return $root.mgb.serialization.fbs.param.TopK.decode(reader, position); + case 45: return $root.mgb.serialization.fbs.param.RelayoutFormat.decode(reader, position); + case 46: return $root.mgb.serialization.fbs.param.SeparableFilter.decode(reader, position); + case 47: return $root.mgb.serialization.fbs.param.LocalShare.decode(reader, position); + case 48: return $root.mgb.serialization.fbs.param.ROIAlign.decode(reader, position); + case 49: return $root.mgb.serialization.fbs.param.DeformablePSROIPooling.decode(reader, position); + case 50: return $root.mgb.serialization.fbs.param.BatchConvBias.decode(reader, position); + case 51: return $root.mgb.serialization.fbs.param.DType.decode(reader, position); + case 52: return $root.mgb.serialization.fbs.param.PersistentOutputStorage.decode(reader, position); + case 53: return $root.mgb.serialization.fbs.param.OptionalAxis.decode(reader, position); + case 54: return $root.mgb.serialization.fbs.param.OptionalAxisV1.decode(reader, position); + case 55: return $root.mgb.serialization.fbs.param.ExecutionPolicy.decode(reader, position); + case 56: return $root.mgb.serialization.fbs.param.AssertEqual.decode(reader, position); + case 57: return $root.mgb.serialization.fbs.param.FpgaConv.decode(reader, position); + case 58: return $root.mgb.serialization.fbs.param.CollectiveComm.decode(reader, position); + case 59: return $root.mgb.serialization.fbs.param.CondExecPred.decode(reader, position); + case 60: return $root.mgb.serialization.fbs.param.CondExecPredLogical.decode(reader, position); + case 61: return $root.mgb.serialization.fbs.param.CondExecMark.decode(reader, position); + case 62: return $root.mgb.serialization.fbs.param.CondExecMerge.decode(reader, position); + case 63: return $root.mgb.serialization.fbs.param.Host2DeviceCopy.decode(reader, position); + case 64: return $root.mgb.serialization.fbs.param.Dimshuffle.decode(reader, position); + case 65: return $root.mgb.serialization.fbs.param.AxisAddRemove.decode(reader, position); + case 66: return $root.mgb.serialization.fbs.param.IndexDescMaskDump.decode(reader, position); + case 67: return $root.mgb.serialization.fbs.DType.decode(reader, position); + case 68: return $root.mgb.serialization.fbs.param.Remap.decode(reader, position); + case 69: return $root.mgb.serialization.fbs.param.NMSKeep.decode(reader, position); + case 70: return $root.mgb.serialization.fbs.param.AdaptivePooling.decode(reader, position); + case 71: return $root.mgb.serialization.fbs.param.NvOf.decode(reader, position); + case 72: return $root.mgb.serialization.fbs.param.DctChannelSelect.decode(reader, position); + case 73: return $root.mgb.serialization.fbs.param.FakeQuant.decode(reader, position); + case 74: return $root.mgb.serialization.fbs.param.TQT.decode(reader, position); + case 75: return $root.mgb.serialization.fbs.param.Correlation.decode(reader, position); + case 76: return $root.mgb.serialization.fbs.param.LSQ.decode(reader, position); + case 77: return $root.mgb.serialization.fbs.param.GammaRNG.decode(reader, position); + case 78: return $root.mgb.serialization.fbs.param.PoissonRNG.decode(reader, position); + case 79: return $root.mgb.serialization.fbs.param.PermutationRNG.decode(reader, position); + case 80: return $root.mgb.serialization.fbs.param.BetaRNG.decode(reader, position); + case 81: return $root.mgb.serialization.fbs.param.SlidingWindowTranspose.decode(reader, position); + case 82: return $root.mgb.serialization.fbs.param.Padding.decode(reader, position); + case 83: return $root.mgb.serialization.fbs.param.ShuffleRNG.decode(reader, position); + case 84: return $root.mgb.serialization.fbs.param.CheckNonFinite.decode(reader, position); + case 85: return $root.mgb.serialization.fbs.param.LayerNorm.decode(reader, position); + case 86: return $root.mgb.serialization.fbs.param.Dropout.decode(reader, position); + case 87: return $root.mgb.serialization.fbs.param.RNNCell.decode(reader, position); + case 88: return $root.mgb.serialization.fbs.param.RNN.decode(reader, position); + case 89: return $root.mgb.serialization.fbs.param.LSTM.decode(reader, position); + case 90: return $root.mgb.serialization.fbs.param.Softmax.decode(reader, position); + case 91: return $root.mgb.serialization.fbs.param.Diag.decode(reader, position); + case 92: return $root.mgb.serialization.fbs.param.GroupNorm.decode(reader, position); + case 93: return $root.mgb.serialization.fbs.param.Fill.decode(reader, position); + default: return undefined; + } + } +}; + +$root.mgb.serialization.fbs.v2.Operator = class Operator { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Operator(); + $.type = reader.string_(position, 4, null); + $.type_id = reader.uint64_(position, 6, 0); + $.name = reader.string_(position, 8, null); + $.param = reader.union(position, 10, $root.mgb.serialization.fbs.v2.OperatorParam.decode); + $.additional_params = reader.unionArray(position, 14, $root.mgb.serialization.fbs.v2.OperatorParam.decode); + $.inputs = reader.typedArray(position, 18, Uint32Array); + $.outputs = reader.typedArray(position, 20, Uint32Array); + $.comp_node = reader.tableArray(position, 22, $root.mgb.serialization.fbs.v2.CompNode.decode); + $.output_dtype = reader.table(position, 24, $root.mgb.serialization.fbs.DType.decode); + $.tensors = reader.tableArray(position, 26, $root.mgb.serialization.fbs.v2.Tensor.decode); + $.opr_version = reader.uint32_(position, 28, 0); + $.priority = reader.int32_(position, 30, 0); + $.custom_data = reader.tableArray(position, 32, $root.mgb.serialization.fbs.v2.Blob.decode); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.Metadata = class Metadata { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Metadata(); + $.is_valid = reader.bool_(position, 4, false); + $.graph_modified = reader.bool_(position, 6, false); + $.optimize_options = reader.uint64_(position, 8, 0); + $.user_info = reader.string_(position, 10, null); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.MiddleTensor = class MiddleTensor { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.MiddleTensor(); + $.name = reader.string_(position, 4, null); + $.shape = reader.typedArray(position, 6, Uint32Array); + $.comp_node = reader.table(position, 8, $root.mgb.serialization.fbs.v2.CompNode.decode); + $.dtype = reader.table(position, 10, $root.mgb.serialization.fbs.DType.decode); + $.format = reader.union(position, 12, $root.mgb.serialization.fbs.v2.TensorFormat.decode); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.OutputVar = class OutputVar { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.OutputVar(); + $.compact_id = reader.uint32_(position, 4, 0); + $.original_id = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.OutputAlias = class OutputAlias { + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.OutputAlias(); + $.id = reader.uint32_(position, 4, 0); + $.name = reader.string_(position, 6, null); + return $; + } +}; + +$root.mgb.serialization.fbs.v2.Model = class Model { + + static identifier(reader) { + return reader.identifier === 'mge2'; + } + + static create(reader) { + return $root.mgb.serialization.fbs.v2.Model.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.mgb.serialization.fbs.v2.Model(); + $.mge_version = reader.uint32_(position, 4, 0); + $.model_version = reader.uint32_(position, 6, 0); + $.oprs = reader.tableArray(position, 8, $root.mgb.serialization.fbs.v2.Operator.decode); + $.middle_tensors = reader.tableArray(position, 10, $root.mgb.serialization.fbs.v2.MiddleTensor.decode); + $.output_vars_idx = reader.tableArray(position, 12, $root.mgb.serialization.fbs.v2.OutputVar.decode); + $.output_alias = reader.tableArray(position, 14, $root.mgb.serialization.fbs.v2.OutputAlias.decode); + $.nr_shared_tensor = reader.uint32_(position, 16, 0); + $.metadata = reader.table(position, 18, $root.mgb.serialization.fbs.v2.Metadata.decode); + return $; + } +}; diff --git a/megengine.js b/megengine.js new file mode 100644 index 00000000000..f3f53b5c46a --- /dev/null +++ b/megengine.js @@ -0,0 +1,633 @@ + +// Experimental + +import * as flatbuffers from './flatbuffers.js'; + +const megengine = {}; + +megengine.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream && stream.length >= 12) { + let buffer = stream.peek(12); + const tag = String.fromCharCode.apply(null, buffer); + const position = tag.startsWith('mgbtest0') ? 12 : 0; + if (stream.length > (position + 12)) { + buffer = stream.peek(24).slice(position, position + 12); + const size = buffer[0] + (buffer[1] << 8) + (buffer[2] << 16) + (buffer[3] << 24); + if (position > 0 || size === (stream.length - position - 4)) { + const reader = flatbuffers.BinaryReader.open(buffer.slice(4, 12)); + if (reader.identifier === 'mgv2') { + return 'megengine.mge'; + } + } + } + for (const value of [ 'mgb0001', 'mgb0000a', 'MGBS', 'MGBC' ]) { + if (tag.startsWith(value)) { + return `megengine.${value}`; + } + } + } + const obj = context.peek('pkl'); + if (obj && obj.__class__ && obj.__class__.__module__ === 'megengine.traced_module.traced_module' && obj.__class__.__name__ === 'TracedModule') { + return 'megengine.tm'; + } + return ''; + } + + async open(context, target) { + const metadata = await context.metadata('megengine-metadata.json'); + switch (target) { + case 'megengine.tm': { + const obj = context.peek('pkl'); + return new megengine.Model(metadata, obj, target); + } + case 'megengine.mge': { + await context.require('./megengine-schema'); + megengine.schema = flatbuffers.get('megengine').mgb.serialization.fbs; + let model = null; + const stream = context.stream; + try { + const buffer = stream.peek(12); + const tag = String.fromCharCode.apply(null, buffer); + stream.skip(tag.startsWith('mgbtest0') ? 12 : 0); + stream.skip(4); + const reader = flatbuffers.BinaryReader.open(stream); + model = megengine.schema.v2.Model.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new megengine.Error(`File format is not megengine.Model (${message.replace(/\.$/, '')}).`); + } + return new megengine.Model(metadata, model, target); + } + default: { + throw new megengine.Error(`Unsupported MegEngine format '${target.replace(/^megengine\./, '')}'.`); + } + } + } +}; + +megengine.Model = class { + + constructor(metadata, obj, type) { + this.format = 'MegEngine'; + if (type === 'megengine.tm') { + this.format += (obj.dump_info && obj.dump_info.version ? ` v${obj.dump_info.version}` : ''); + } else if (type === 'megengine.mge') { + this.format += ` Mge${obj.model_version ? ` v${obj.model_version}` : ''}`; + } + this.graphs = [ new megengine.Graph(metadata, obj) ]; + } +}; + +megengine.Graph = class { + + constructor(metadata, obj) { + this.name = ''; + this.nodes = []; + this.inputs = []; + this.outputs = []; + const values = new Map(); + const value = (name, type, tensor) => { + if (tensor && name.length === 0) { + return new megengine.Value(name, type || null, tensor); + } + if (!values.has(name)) { + values.set(name, new megengine.Value(name, type || null, tensor || null)); + } else if ((type && !type.equals(values.get(name).type)) || tensor) { + throw new megengine.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const loadGraph = (tmodule, igraph, context, namePrefix, metadata, isRoot) => { + const expressions = igraph._exprs; + const getTensorType = (dtype, shape) => { + dtype = dtype ? dtype.__name__ : null; + return new megengine.TensorType(dtype, new megengine.TensorShape(shape)); + }; + const getOpNode = (metadata, item, expr, state) => { + const node = new megengine.Node(metadata, item); + let inpIdx = 0; + for (const i of expr.inputs) { + if (i.__class__.__name__ !== 'ModuleNode') { + const initializer = i.initializer !== undefined ? i.initializer : null; + const name = `inp${inpIdx}`; + const type = getTensorType(i._dtype, i._shape); + const argument = new megengine.Argument(name, [ value(i._fullname, type, initializer) ]); + node.inputs.push(argument); + inpIdx += 1; + } + } + const outIdx = 0; + let qparams = null; + for (const o of expr.outputs) { + if (o._qparams !== null) { + /* eslint-disable prefer-destructuring */ + qparams = o._qparams[1]; + /* eslint-enable prefer-destructuring */ + } + const type = getTensorType(o._dtype, o._shape); + const argument = new megengine.Argument(`out${outIdx}`, [ value(o._fullname, type, null) ]); + node.outputs.push(argument); + } + if (qparams !== null) { + state = state === null ? {} : state; + state.scale = qparams.scale; + state.zero_point = qparams.zero_point; + state.quant_dtype_meta = qparams.dtype_meta; + } + if (state !== null) { + for (const key in state) { + const isModule = (obj) => { + return obj && (obj.state || obj._forward_pre_hooks); + }; + const isTensor = (obj) => { + return obj && obj.__class__ && obj.__class__.__module__ == 'megengine.tensor' && (obj.__class__.__name__ === 'Tensor' || obj.__class__.__name__ === 'Parameter'); + }; + if (!key.startsWith('_') && !isModule(state[key])) { + if (!isTensor(state[key])) { + const attribute = new megengine.Attribute(null, key, state[key] !== null ? state[key] : 'None'); + node.attributes.push(attribute); + } else { + const tensor = state[key]; + const type = getTensorType(tensor.dtype, tensor.data.shape); + const data = tensor.data.data; + const initializer = new megengine.Tensor(key, type, data); + const argument = new megengine.Argument(key, [ value('', type, initializer) ]); + node.inputs.push(argument); + } + } + } + } + return node; + }; + if (isRoot) { + for (const node of igraph._inputs) { + if (node.__class__.__name__ !== 'ModuleNode') { + const type = getTensorType(node._dtype, node._shape); + const argument = new megengine.Argument(node._name, [ value(node._name, type, null) ]); + this.inputs.push(argument); + } + } + for (const node of igraph._outputs) { + const type = getTensorType(node._dtype, node._shape); + const argument = new megengine.Argument(node._name, [ value(node._name, type, null) ]); + this.outputs.push(argument); + } + } + const parseGetAttr = (module, expression) => { + let names = expression.name.split('.'); + while (expression.inputs[0].expr.__class__.__name__ === 'GetAttr') { + expression = expression.inputs[0].expr; + names = expression.name.split('.').concat(names); + } + let obj = module; + for (const name of names) { + obj = obj[name]; + } + return obj; + }; + const parseArgs = (args, kwargs, meta) => { + const state = {}; + let argIdx = 0; + const processArgs = (inp, startIdx) => { + while (typeof inp === 'string' && inp.indexOf('Tensor') !== -1) { + inp = inp.replace('Tensor', `inp${startIdx}`); + startIdx += 1; + } + return [ inp, startIdx ]; + }; + const formatTreeDef = (obj) => { + if (obj.__class__.__name__ !== 'TreeDef' && obj.__class__.__name__ !== 'LeafDef') { + throw new megengine.Error(`Invalid argument '${obj.__class__.__name__}'.`); + } + if (obj.__class__.__name__ === 'TreeDef') { + const type = typeof obj.type !== 'string' ? obj.type.__name__ : obj.type.split('.').slice(-1)[0]; + const list = obj.children_defs.map((child) => formatTreeDef(child)); + switch (type) { + case 'tuple': { + return `(${list.join(',')})`; + } + case 'slice': { + return list.join(':'); + } + case 'list': { + return `[${list.join(',')}]`; + } + case 'dict': { + let content = ''; + for (let i = 0; i < this.children_defs.length; i++) { + content += `${this.aux_data[i]}:${list[i]}`; + } + return `{${content}}`; + } + default: { + return `${type}(${list.join(',')})`; + } + } + } + if (obj.const_val !== null) { + return obj.const_val; + } else if (obj.type[0].__module__ !== undefined) { + return obj.type[0].__name__; + } + return 'None'; + }; + let inpIdx = 0; + for (const arg of args.children_defs) { + let name = ''; + if (meta.attributes === undefined || (meta.attributes.length !== args.children_defs.length && meta.varargs === null)) { + name = `arg${argIdx}`; + } else if (argIdx < meta.attributes.length) { + name = meta.attributes[argIdx].name; + } else { + name = meta.varargs + (argIdx - meta.attributes.length); + } + const [value, index] = processArgs(formatTreeDef(arg), inpIdx); + state[name] = value; + inpIdx = index; + argIdx += 1; + } + for (let i = 0; i < kwargs.children_defs.length; i++) { + const [value, index] = processArgs(formatTreeDef(kwargs.children_defs[i]), inpIdx); + state[kwargs.aux_data[i]] = value; + inpIdx = index; + } + return state; + }; + const getName = (context, name) => { + let rst = name; + while (context.get(rst) !== undefined) { + if (rst === context.get(rst)) { + return rst; + } + rst = context.get(rst); + } + return rst; + }; + const getFullName = (prefix, name) => { + return prefix === '' ? name : `${prefix}_${name}`; + }; + for (const expression of expressions) { + const type = expression.__class__.__name__; + for (const input of expression.inputs) { + input._fullname = getName(context, getFullName(namePrefix, input._name)); + } + for (const output of expression.outputs) { + output._fullname = getName(context, getFullName(namePrefix, output._name)); + } + switch (type) { + case 'Input': { + break; + } + case 'GetAttr': { + if (expression.outputs[0].__class__.__name__ === 'TensorNode') { + const tensor = parseGetAttr(tmodule, expression); + const type = getTensorType(tensor.dtype, tensor.data.shape); + const data = tensor.data.data; + expression.outputs[0].initializer = new megengine.Tensor(expression.name, type, data); + } + break; + } + case 'Constant': { + if (expression.outputs[0].__class__.__name__ === 'TensorNode') { + const tensor = expression.value; + const type = getTensorType(tensor.dtype, tensor.data.shape); + const data = tensor.data.data; + expression.outputs[0].initializer = new megengine.Tensor('', type, data); + } + break; + } + case 'CallMethod': { + if (expression.method === '__call__') { + const module = parseGetAttr(tmodule, expression.inputs[0].expr); + const getModuleType = (obj) => { + if (obj.module !== undefined) { + return `${obj.module[0]}.${obj.module[1]}`; + } + return `${obj.__class__.__module__}.${obj.__class__.__name__}`; + }; + const moduleType = module.__class__.__name__ !== 'TracedModule' ? getModuleType(module) : 'TracedModule'; + if (moduleType === 'TracedModule') { + const moduleName = expression.outputs[0]._name.endsWith("_out") ? expression.outputs[0]._name.substring(0, expression.outputs[0]._name.length - 4) : expression.outputs[0]._name; + const prefix = getFullName(namePrefix, moduleName); + const internalGraph = module.argdef_graph_map[expression.arg_def.toString()]; + for (let i = 0; i < expression.inputs.length; i++) { + const actualName = getFullName(namePrefix, expression.inputs[i]._name); + const internalName = getFullName(prefix, internalGraph._inputs[i]._name); + context.set(internalName, actualName); + } + for (let i = 0; i < expression.outputs.length; i++) { + const actualName = getFullName(namePrefix, expression.outputs[i]._name); + const internalName = getFullName(prefix, internalGraph._outputs[i]._name); + if (context.get(internalName) !== undefined) { + context.set(actualName, context.get(internalName)); + } else { + context.set(internalName, actualName); + } + } + loadGraph(module, internalGraph, context, prefix, metadata, false); + continue; + } + const item = { 'name': '', 'type': moduleType }; + let state = module.__class__.__name__ !== 'TracedModule' ? module.state : module; + if (state === undefined) { + state = module; + } + const node = getOpNode(metadata, item, expression, state); + this.nodes.push(node); + } else { + const item = { 'name': '', 'type': expression.method }; + const [args, kwargs] = expression.arg_def.children_defs; + const schema = metadata.type(expression.method); + const state = parseArgs(args, kwargs, schema); + const node = getOpNode(metadata, item, expression, state); + this.nodes.push(node); + } + break; + } + case 'CallFunction': { + const getFunctionType = (obj) => { + if (obj.func.__module__ !== undefined) { + return `${obj.func.__module__}.${obj.func.__name__}`; + } + return `${obj.func[0]}.${obj.func[1]}`; + }; + const func = getFunctionType(expression); + const item = { 'name': '', 'type': func }; + const [args, kwargs] = expression.arg_def.children_defs; + const schema = metadata.type(func); + const state = parseArgs(args, kwargs, schema); + const node = getOpNode(metadata, item, expression, state); + this.nodes.push(node); + break; + } + case 'Apply': { + const opdef = expression.opdef_state ? expression.opdef_state.opdef_type : expression.opdef.type; + const item = { 'name': '', 'type': `${opdef.__module__}.${opdef.__name__}` }; + const node = getOpNode(metadata, item, expression, expression.opdef_state); + this.nodes.push(node); + break; + } + default: { + break; + } + } + } + }; + if (obj.argdef_graph_map) { + const [graph] = Object.values(obj.argdef_graph_map); + loadGraph(obj, graph, new Map(), '', metadata, true); + return; + } + const extraInfoNameset = new Set(); + const getExtraInfo = (opr) => { + let name = opr.name; + let repeatIdx = 0; + while (extraInfoNameset.has(name)) { + for (const id of opr.inputs) { + name = `${name}[${id}]`; + } + name += repeatIdx; + repeatIdx += 1; + } + extraInfoNameset.add(name); + const type = opr.type.replace(/V(\d+)$/, ''); + const args = []; + if (opr.tensors.length > 0) { + const [tensor] = opr.tensors; + const type = new megengine.TensorType(tensor.dtype.type, new megengine.TensorShape(tensor.shape)); + const data = tensor.data.byteLength !== 0 ? tensor.data.slice(0) : undefined; + const initializer = opr.type === 'Host2DeviceCopy' ? undefined : new megengine.Tensor('', type, data); + const quantization = tensor.dtype.param ? { scale: tensor.dtype.param.scale, zeroPoint: tensor.dtype.param.zero_point } : null; + args.push(value(name, type, initializer, quantization)); + } else if (opr.shape) { + const type = new megengine.TensorType('?', new megengine.TensorShape(opr.shape)); + args.push(value(name, type)); + } else { + args.push(value(name)); + } + return { name: name, type: type, args: args }; + }; + const getAllOprAndTensor = (oprs) => { + const allOprAndTensor = new Map(); + for (const opr of oprs) { + if (opr.type === 'MultipleDeviceTensorWithFormatHolder' || opr.outputs.length > 1) { + if (opr.type === 'MultipleDeviceTensorWithFormatHolder' || opr.type === 'MultipleDeviceTensorHolder') { + opr.type = 'ImmutableTensor'; + } + for (let id = 0; id < opr.outputs.length; id++) { + const keyId = opr.outputs[id]; + const name = obj.middle_tensors[keyId] ? obj.middle_tensors[keyId].name : String(keyId); + const type = opr.type; + const tensors = opr.tensors.length ? [opr.tensors[id]] : []; + const onlyShape = obj.middle_tensors[keyId] ? obj.middle_tensors[keyId].shape : []; + allOprAndTensor.set(keyId, { name: name, type: type, tensors: tensors, shape: onlyShape, inputs: opr.inputs, outputs: opr.outputs }); + const _opr = allOprAndTensor.get(keyId); + _opr.extraInfo = getExtraInfo(_opr); + } + } else { + const [keyId] = opr.outputs; + opr.name = obj.middle_tensors[keyId] ? obj.middle_tensors[keyId].name : String(keyId); + if (obj.middle_tensors[keyId] && obj.middle_tensors[keyId].shape) { + opr.shape = obj.middle_tensors[keyId].shape; + } + allOprAndTensor.set(keyId, opr); + const _opr = allOprAndTensor.get(keyId); + _opr.extraInfo = getExtraInfo(_opr); + } + } + return allOprAndTensor; + }; + const allOprAndTensor = getAllOprAndTensor(obj.oprs); + for (const op of Array.from(allOprAndTensor.values())) { + if (op.type === 'Host2DeviceCopy') { + const argument = new megengine.Argument('input', op.extraInfo.args); + this.inputs.push(argument); + } else if (op.type !== 'ImmutableTensor') { + this.nodes.push(new megengine.Node(metadata, op, allOprAndTensor)); + } + } + for (let i = 0; i < obj.output_vars_idx.length; i++) { + const id = obj.output_vars_idx[i].compact_id; + const out_type = `output${i === 0 ? '' : i}`; + const argument = new megengine.Argument(out_type, allOprAndTensor.get(id).extraInfo.args); + this.outputs.push(argument); + } + } +}; + +megengine.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +megengine.Value = class { + + constructor(name, type, initializer, quantization) { + if (typeof name !== 'string') { + throw new megengine.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer && initializer.type ? initializer.type : null; + this.initializer = initializer; + if (quantization && ((quantization.scale !== undefined && quantization.scale !== 0) || quantization.zeroPoint !== undefined && quantization.zeroPoint !== 0)) { + this.quantization = { + type: 'linear', + scale: [ quantization.scale ], + offset: [ quantization.zeroPoint ] + }; + } + } +}; + +megengine.Node = class { + + constructor(metadata, item, allOprAndTensor) { + this.name = ''; + this.type = Object.assign({}, metadata.type(item.type)); + this.type.name = this.type.name.replace(/V(\d+)$/, ''); + if (this.type.name.length > 4 && this.type.name.startsWith('__') && this.type.name.endsWith('__')) { + this.type.name = this.type.name.substring(2, this.type.name.length - 2); + } + this.type.category = this.type.category? this.type.category: metadata.type(item.type.replace(/V(\d+)$/, '')).category; + this.inputs = []; + this.outputs = []; + this.chain = []; + this.attributes = []; + if (item.inputs && item.outputs) { + const inputSchemas = this.type && this.type.inputs ? [ ...this.type.inputs ] : []; + for (let i = 0; i < item.inputs.length; i++) { + const inputOpr = allOprAndTensor.get(item.inputs[i]); + const inputSchema = inputSchemas.length > 0 ? inputSchemas.shift() : { name: (`input${i}`) }; + const argument = new megengine.Argument(inputSchema.name, inputOpr.extraInfo.args); + this.inputs.push(argument); + } + const outputSchemas = this.type && this.type.outputs ? [ ...this.type.outputs ] : []; + for (let i = 0; i < item.outputs.length; i++) { + const outputOpr = allOprAndTensor.get(item.outputs[i]); + const outputSchema = outputSchemas.length > 0 ? outputSchemas.shift() : { name: (`output${i}`) }; + const argument = new megengine.Argument(outputSchema.name, outputOpr.extraInfo.args); + this.outputs.push(argument); + } + if (item.param) { + for (const [name, value] of Object.entries(item.param)) { + if (value !== null) { + const attribute = new megengine.Attribute(metadata.attribute(item.param.constructor.name, name), name, value); + this.attributes.push(attribute); + } + } + } + } + } +}; + +megengine.Attribute = class { + + constructor(metadata, name, value) { + this.type = metadata ? metadata.type : null; + this.name = name; + this.value = ArrayBuffer.isView(value) ? Array.from(value) : value; + if (this.name === 'training') { + this.visible = false; + this.type = 'boolean'; + } + if (megengine.schema) { + if (megengine.schema.param[this.type]) { + this.value = megengine.Utility.enum(megengine.schema.param, this.type, this.value); + } else if (megengine.schema[this.type]) { + this.value = megengine.Utility.enum(megengine.schema, this.type, this.value); + } else if (megengine.schema.v2[this.type]) { + this.value = megengine.Utility.enum(megengine.schema.v2, this.type, this.value); + } + } + } +}; + +megengine.Tensor = class { + + constructor(name, type, data) { + this.category = 'Tensor'; + this.name = name || ''; + this.type = type; + this.values = data; + } +}; + +megengine.TensorType = class { + + constructor(dataType, shape) { + dataType = megengine.Utility.enum(megengine.schema, 'DTypeEnum', dataType).toLowerCase(); + megengine.TensorType._dataTypes = megengine.TensorType._dataTypes || new Map([ + [ 'bool', 'boolean' ], + [ 'byte', 'uint8' ], [ 'quantizeds4asymm', 'uint8' ], [ 'quantizeds8asymm', 'uint8' ], [ 'uintb4', 'uint8' ], + [ 'quantizeds1', 'int8' ], [ 'quantizeds4', 'int8' ], [ 'quantizeds8', 'int8' ], [ 'intb1', 'int8' ], [ 'intb2', 'int8' ], [ 'intb4', 'int8' ], [ 'qint8', 'int8' ], + [ 'quantizeds16', 'int16' ], + [ 'quantizeds32', 'int32' ] + ]); + this.dataType = megengine.TensorType._dataTypes.get(dataType) || dataType; + this.shape = shape; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +megengine.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.from(dimensions || []); + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && + Array.isArray(this.dimensions) && this.dimensions.length === obj.dimensions.length + && obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +megengine.Utility = class { + + static enum(schema, name, value) { + const type = name && schema ? schema[name] : undefined; + if (type) { + megengine.Utility._enums = megengine.Utility._enums || new Map(); + if (!megengine.Utility._enums.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + megengine.Utility._enums.set(name, entries); + } + const map = megengine.Utility._enums.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } +}; + +megengine.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MegEngine model.'; + } +}; + +export const ModelFactory = megengine.ModelFactory; + diff --git a/mlir.js b/mlir.js new file mode 100644 index 00000000000..1c68b7b2630 --- /dev/null +++ b/mlir.js @@ -0,0 +1,1181 @@ + +// Experimental +// contributor @tucan9389 + +import * as text from './text.js'; + +const mlir = {}; + +mlir.ModelFactory = class { + + match(/* context */) { + return 'mlir'; + } + + async open(context) { + const stream = context.stream; + const decoder = text.Decoder.open(stream); + const parser = new mlir.Parser(decoder); + const obj = parser.read(); + return new mlir.Model(obj); + } +}; + +mlir.Model = class { + + constructor(obj) { + this.format = 'MLIR'; + this.graphs = obj.functions.map((func) => new mlir.Graph(func, '')); + } +}; + +mlir.Graph = class { + + constructor(func, group) { + this.inputs = []; // [mlir.Argument] + this.outputs = []; // [mlir.Argument] + this.nodes = []; // [mlir.Node] + const valueType = (type) => { + if (type === undefined) { + return null; + } + // eg. tensor + if (type.startsWith('tensor<')) { + const shapeString = type.substring(7, type.length - 1); + if (!/^[0-9xfiq?*]+$/i.test(shapeString)) { + return type; + } + const parts = shapeString.split('x'); + const dataType = parts[parts.length - 1]; + const shape = parts + .slice(0, -1) + .map((dimension) => { + const parsedDimension = parseInt(dimension.trim()); + return isNaN(parsedDimension) ? '?' : parsedDimension; + }); + return new mlir.TensorType(dataType, new mlir.TensorShape(shape)); + } + return type; + }; + // inputs of function + for (let i = 0; i < func.inputs.length; i++) { + const input = func.inputs[i]; + const inputType = func.inputTypes[i]; + const type = valueType(inputType); + const value = new mlir.Value(input, type, "input desc", null); + const argument = new mlir.Argument(input, [ value ]); + this.inputs.push(argument); + } + // outputs of function + for (let i = 0; i < func.outputTypes.length; i++) { + const output = `%return` + `/${i}`; + const outputType = func.outputTypes[i]; + const type = valueType(outputType); + const value = new mlir.Value(output, type, "output desc", null); + const argument = new mlir.Argument(output, [ value ]); + this.outputs.push(argument); + } + // operations + // args is map of edges. args will be converted to mlir.Arguemnts. + const values = new Map(); + values.map = (name) => { + if (!values.has(name)) { + values.set(name, { name: name, to: [], from: [] }); + } + return values.get(name); + }; + // operations - setup arguments + const operations = func.operations.map((op) => { + const operation = { + type: op.name, + attributes: {}, + inputs: [], + outputs: [], + delete: false, + }; + // TODO: convert attributes to proper types + operation.attributes = op.attributes; + // for (const [key, value] of Object.entries(op.attributes)) { + // operation.attributes[key] = convertValue(value); + // } + for (let j = 0; j < (op.inputs ? op.inputs.length : 0); j++) { + const input = op.inputs[j]; + const inputType = op.inputTypes[j]; + const value = values.map(input); + value.to.push(operation); + const args = [{ name: input, value: inputType }]; + operation.inputs.push({ + name: input, + arguments: args + }); + } + for (let j = 0; j < (op.outputs ? op.outputs.length : 0); j++) { + const output = op.outputs[j]; + const outputType = op.outputTypes[j]; + const value = values.map(output); + value.type = valueType(outputType); + value.from.push(operation); + operation.outputs.push({ + name: output, + arguments: [value] + }); + } + return operation; + }); + + // // operations - constant ops + // for (const op of operations) { + // if (op.type === 'const' && op.inputs.length === 0 && + // op.outputs.length === 1 && op.outputs[0].arguments.length === 1) { + // const argument = op.outputs[0].arguments[0]; + // if (op.attributes && op.attributes.val) { + // const type = argument.type; + // const data = op.attributes.val; + // if (data instanceof Uint8Array && data.length === 2 && + // type.dataType === 'float16' && type.shape.dimensions.length === 0) { + // const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + // argument.value = view.getFloat16(0, true); + // } else { + // argument.value = data; + // } + // argument.const = true; + // op.delete = true; + // } + // } + // } + + // // + // for (const op of operations) { + // for (const input of op.inputs) { + // if (input.arguments.length > 1 && input.arguments.some((argument) => argument.const)) { + // if (input.arguments.every((argument) => argument.value instanceof mlir.Tensor)) { + // continue; + // } + // for (const argument of input.arguments) { + // for (const from of argument.from) { + // from.delete = false; + // } + // delete argument.value; + // } + // } + // } + // } + + // for (const op of operations) { + // if (op.delete) { + // continue; + // } + // op.inputs = op.inputs.filter((input) => { + // if (input.arguments.every((argument) => argument.value === undefined || argument.value instanceof coreml.Tensor)) { + // return true; + // } + // if (input.arguments.length === 1) { + // const argument = input.arguments[0]; + // op.attributes[input.name] = argument.value; + // return false; + // } + // op.attributes[input.name] = input.arguments.map((argument) => argument.value[0]); + // return false; + // }); + // } + const tensors = new Map(); + const tensor = (arg) => { + if (!tensors.has(arg.name)) { + tensors.set(arg.name, new mlir.Value(arg.name, arg.type, null, arg.value)); + } + return tensors.get(arg.name); + }; + for (const input of this.inputs) { + for (const arg of input.value) { + tensors.set(arg.name, arg); + } + } + for (const output of this.outputs) { + for (const arg of output.value) { + tensors.set(arg.name, arg); + } + } + for (const op of operations) { + if (op.delete) { + continue; + } + op.inputs = op.inputs.map((input) => new mlir.Argument(input.name, input.arguments.map((argument) => tensor(argument)))); + op.outputs = op.outputs.map((output) => new mlir.Argument(output.name, output.arguments.map((argument) => tensor(argument)))); + } + for (const op of operations.filter((op) => !op.delete)) { + const type = op.type; // 'program:' + op.type; + // const metadata = this._metadata.type(type); + // if (metadata && Array.isArray(metadata.inputs)) { + // let index = 1; + // const map = new Map(metadata.inputs.map((input) => [ input.name, index++ ])); + // op.inputs.sort((a, b) => (map.get(a.name) || map.size) - (map.get(b.name) || map.size)); + // } + const node = new mlir.Node(/*this._metadata, */group, type, null, null, op.attributes, op.inputs, op.outputs); + this.nodes.push(node); + } + } +}; + +mlir.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +mlir.Value = class { + + constructor(name, type, description, initializer) { + if (typeof name !== 'string') { + throw new mlir.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer && initializer.type ? initializer.type : null; + this.description = description || null; + this.initializer = initializer || null; + } +}; + +mlir.Node = class { + + constructor(group, type, name, description, attributes, inputs, outputs) { + if (!type) { + throw new mlir.Error('Undefined node type.'); + } + if (group) { + this.group = group; + } + this.type = { name: type || '' }; // string (metadata.type(type) || { name: type } + this.name = name || ''; // string + this.description = description || ''; // string + this.inputs = inputs || []; // [mlir.Parameter] + this.outputs = outputs || []; // [mlir.Parameter] + this.attributes = []; // [mlir.Attribute] + if (attributes) { + for (const key of Object.keys(attributes)) { + const value = attributes[key]; + const attribute = new mlir.Attribute(key, value); + this.attributes.push(attribute); + } + } + } +}; + +mlir.Attribute = class { + + constructor(name, value) { + this.name = name; + this.type = 'string'; + this.value = value; + } +}; + +mlir.Tensor = class { + + constructor(type, data) { + this.type = type; // mlir.TensorType + this.values = data; + switch (this.type.dataType) { + case 'float32': this.encoding = '|'; break; + default: this.encoding = '<'; break; + } + } +}; + +mlir.TensorType = class { + + constructor(dataType, shape) { + this.dataType = mlir.Utility.dataType(dataType); // string + this.shape = shape || new mlir.TensorShape([]); // mlir.TensorShape + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +mlir.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + if (!this.dimensions || this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +mlir.TokenType = { + IDENTIFIER: 'IDENTIFIER', + BOOLEAN_LITERAL: 'BOOLEAN_LITERAL', + INTEGER_LITERAL: 'INTEGER_LITERAL', + HEXADECIMAL_LITERAL: 'HEXADECIMAL_LITERAL', + FLOAT_LITERAL: 'FLOAT_LITERAL', + STRING_LITERAL: 'STRING_LITERAL', + SYMBOL_REF_ID: 'SYMBOL_REF_ID', + TYPE: 'TYPE', + DENSE: 'DENSE', + VALUE_ID: '%', + CARET_ID: '^', + COLON: ':', + COMMA: ',', + EQUAL: '=', + LPAREN: '(', + RPAREN: ')', + ARROW: '->', + LBRACKET: '[', + RBRACKET: ']', + LBRACE: '{', + RBRACE: '}', + LESS_THAN: '<', + GREATER_THAN: '>', + KEYWORD: 'KEYWORD', + EOF: 'EOF', +}; + +mlir.Token = class { + + constructor(type, value) { + this.type = type; + this.value = value; + } +}; + +mlir.Tokenizer = class { + + constructor(decoder) { + this._decoder = decoder; + this._currentPosition = this._decoder.position; + this._current = this._decoder.decode(); + this._nextPosition = this._decoder.position; + this._next = this._decoder.decode(); + } + + read() { + this._position = this._currentPosition; + while (this._current) { + switch (this._current) { + case ' ': + case '\t': + case '\n': + case '\r': + case '\f': + this._skipWhitespace(); + this._position = this._currentPosition; + continue; + case '/': + this._skipComment(); + this._position = this._currentPosition; + continue; + case '.': + if (/[0-9]/.test(this._peek())) { + return this._number(); + } + return new mlir.Token(mlir.TokenType.KEYWORD, '.'); + case '-': + if (/[0-9]/.test(this._peek())) { + return this._number(); + } else if (this._peek() === '>') { + this._read(); + this._read(); + return new mlir.Token(mlir.TokenType.ARROW, '->'); + } + this._read(); + return new mlir.Token(mlir.TokenType.KEYWORD, '-'); + case '+': + if (/[0-9]/.test(this._peek())) { + return this._number(); + } + this._read(); + return new mlir.Token(mlir.TokenType.KEYWORD, '+'); + case '"': + return this._stringLiteral(); + case '@': + return this._symbolRefId(); + case '%': + return this._valueId(); + case '^': + return this._caretId(); + case '=': + if (this._peek() === '=') { + this._read(); + this._read(); + return new mlir.Token(mlir.TokenType.EQUAL_EQUAL, '=='); + } + this._read(); + return new mlir.Token(mlir.TokenType.EQUAL, '='); + case ':': + if (this._peek() === ':') { + this._read(); + this._read(); + return new mlir.Token(mlir.TokenType.DOUBLE_COLON, '::'); + } + this._read(); + return new mlir.Token(mlir.TokenType.COLON, ':'); + case ',': + this._read(); + return new mlir.Token(mlir.TokenType.COMMA, ','); + case '(': + this._read(); + return new mlir.Token(mlir.TokenType.LPAREN, '('); + case ')': + this._read(); + return new mlir.Token(mlir.TokenType.RPAREN, ')'); + case '{': + this._read(); + return new mlir.Token(mlir.TokenType.LBRACE, '{'); + case '}': + this._read(); + return new mlir.Token(mlir.TokenType.RBRACE, '}'); + case '[': + this._read(); + return new mlir.Token(mlir.TokenType.LBRACKET, '['); + case ']': + this._read(); + return new mlir.Token(mlir.TokenType.RBRACKET, ']'); + case '<': + this._read(); + return new mlir.Token(mlir.TokenType.LESS_THAN, '<'); + case '>': + this._read(); + return new mlir.Token(mlir.TokenType.GREATER_THAN, '>'); + default: + if (/[a-zA-Z_$]/.test(this._current) || /[-.]/.test(this._current)) { + return this._identifier(); + } + if (/[0-9]/.test(this._current)) { + let result = ''; + const type = mlir.TokenType.INTEGER_LITERAL; + while (this._current && /[0-9]/.test(this._current)) { + result += this._read(); + } + if (this._current === 'x') { + // Read the rest of the shape + do { + result += this._read(); + } while (this._current && /[0-9x]/.test(this._current)); + return new mlir.Token(mlir.TokenType.SHAPE, result); + } + return new mlir.Token(type, parseInt(result, 10)); + } + return new mlir.Token(mlir.TokenType.KEYWORD, this._read()); + } + } + return new mlir.Token(mlir.TokenType.EOF, null); + } + + location() { + let line = 1; + let column = 1; + this._decoder.position = 0; + let c; + do { + if (this._decoder.position === this._position) { + return `at ${line}:${column}.`; + } + c = this._decoder.decode(); + if (c === '\n') { + line++; + column = 1; + } else { + column++; + } + } + while (c !== undefined); + return `at ${line}:${column}.`; + } + + _read() { + const current = this._current; + this._current = this._next; + this._currentPosition = this._nextPosition; + this._nextPosition = this._decoder.position; + this._next = this._decoder.decode(); + return current; + } + + _peek() { + return this._next; + } + + _skipWhitespace() { + while (this._current !== undefined && (this._current === ' ' || this._current === '\t' || this._current === '\n' || this._current === '\r' || this._current === '\f')) { + this._read(); + } + } + + _match(value) { + if (this._current === value) { + this._read(); + return true; + } + return false; + } + + _skipComment() { + if (this._match('/')) { + if (this._current === '/') { + while (this._current && this._current !== '\n') { + this._read(); + } + this._skipWhitespace(); + this._skipComment(); + } else if (this._current === '*') { + while (this._current) { + this._read(); + if (this._match('*') && this._match('/')) { + break; + } + } + this._skipWhitespace(); + this._skipComment(); + } + } + } + + _number() { + let result = ''; + let type = mlir.TokenType.INTEGER_LITERAL; + while (this._current && /[0-9]/.test(this._current)) { + result += this._read(); + } + if (this._current === 'x') { + result += this._read(); + type = mlir.TokenType.HEXADECIMAL_LITERAL; + while (this._current && /[0-9a-fA-F]/.test(this._current)) { + result += this._read(); + } + } else if (this._current === '.') { + result += this._read(); + type = mlir.TokenType.FLOAT_LITERAL; + while (this._current && /[0-9]/.test(this._current)) { + result += this._read(); + } + if (this._current === 'e' || this._current === 'E') { + result += this._read(); + if (this._current === '+' || this._current === '-') { + result += this._read(); + } + while (this._current && /[0-9]/.test(this._current)) { + result += this._read(); + } + if (type === mlir.TokenType.INTEGER_LITERAL && /[.eE]/.test(this._current)) { + type = mlir.TokenType.FLOAT_LITERAL; + } + if (type === mlir.TokenType.FLOAT_LITERAL && !/[.eE]/.test(this._current)) { + return new mlir.Token(type, parseFloat(result)); + } + if (type === mlir.TokenType.HEXADECIMAL_LITERAL && !/[x]/.test(this._current)) { + return new mlir.Token(type, parseInt(result, 16)); + } + return new mlir.Token(type, result); + } + } + return new mlir.Token(type, parseInt(result, 10)); + } + + _stringLiteral() { + let result = ''; + this._read(); + while (this._current && this._current !== '"') { + if (this._match('\\')) { + switch (this._current) { + case 'n': + result += '\n'; + break; + case 'r': + result += '\r'; + break; + case 't': + result += '\t'; + break; + default: + result += this._current; + break; + } + } else { + result += this._current; + } + this._read(); + } + if (this._match('"')) { + return new mlir.Token(mlir.TokenType.STRING_LITERAL, result); + } + throw new mlir.Error('Unterminated string literal'); + } + + _identifier() { + let result = ''; + let opened = 0; + let wasOpened = false; + while (this._current) { + if (!opened) { + if (this._current && (/[a-zA-Z_$<>\-.*]/.test(this._current) || /[0-9]/.test(this._current))) { + if (this._current === '<') { + opened += 1; + wasOpened = true; + } + result += this._read(); + } else { + break; + } + } else if (!this._current) { + break; + } else if (this._current === '>') { + result += this._read(); + opened -= 1; + if (opened === 0) { + break; + } + } else { + if (this._current === '<') { + opened += 1; + } + result += this._read(); + } + } + if (wasOpened) { + if (result.startsWith('dense')) { + return new mlir.Token(mlir.TokenType.DENSE, result); + } + return new mlir.Token(mlir.TokenType.TYPE, result); + } + if (result.endsWith('func')) { + return new mlir.Token(mlir.TokenType.KEYWORD, result); + } + switch (result) { + case 'module': + case 'func': + case 'loc': + return new mlir.Token(mlir.TokenType.KEYWORD, result); + case 'true': + case 'false': + return new mlir.Token(mlir.TokenType.BOOLEAN_LITERAL, result === 'true'); + default: + return new mlir.Token(mlir.TokenType.IDENTIFIER, result); + } + } + + _symbolRefId() { + let result = '@'; + this._read(); + if (this._current === '"') { + result += this._stringLiteral().value; + } else { + while (this._current && (/[a-zA-Z_$]/.test(this._current) || /[0-9]/.test(this._current) || /[-.]/.test(this._current))) { + result += this._read(); + } + if (this._current === ':' && this._peek() === ':') { + result += this._read(); + result += this._read(); + result += this._symbolRefId().value; + } + } + return new mlir.Token(mlir.TokenType.SYMBOL_REF_ID, result); + } + + _valueId() { + let result = ''; + if (this._current === '%') { + result = '%'; + } else if (this._current === '$') { + result = '$'; + } + this._read(); + while (this._current) { + if (/[a-zA-Z_$]/.test(this._current) || /[0-9]/.test(this._current) || /[-.#]/.test(this._current)) { + result += this._read(); + } else if (/[:]/.test(this._current) && /[0-9]/.test(this._next)) { // %myid:3 case + result += this._read(); + } else { + break; + } + } + return new mlir.Token(mlir.TokenType.VALUE_ID, result); + } + + _caretId() { + let result = '^'; + this._read(); + if (this._current === ':' && this._peek() !== ':') { + result += this._read(); + return new mlir.Token(mlir.TokenType.CARET_ID, result); + } + while (this._current && (/[a-zA-Z_$]/.test(this._current) || /[0-9]/.test(this._current) || /[-.]/.test(this._current))) { + result += this._read(); + } + if (this._current === ':' && this._peek() === ':') { + result += this._read(); + result += this._read(); + result += this._caretId().value; + } + return new mlir.Token(mlir.TokenType.CARET_ID, result); + } +}; + +mlir.Parser = class { + + constructor(decoder) { + this._tokenizer = new mlir.Tokenizer(decoder); + this._current = this._tokenizer.read(); + } + + read() { + const hasModule = this._match(mlir.TokenType.KEYWORD, 'module'); + let attributes = {}; + if (hasModule) { + // Attributes + if (this._current.value === 'attributes') { + this._read(mlir.TokenType.IDENTIFIER, 'attributes'); + attributes = Object.assign(attributes, this._parseAttribute()); + } + this._read(mlir.TokenType.LBRACE); + } + const graph = { + functions: [], + operations: [], + attributes: attributes, + }; + // functions or operations + const terminal = hasModule ? mlir.TokenType.RBRACE : mlir.TokenType.EOF; + while (this._current.type !== terminal) { + if (this._current.type === mlir.TokenType.KEYWORD && this._current.value.endsWith('func')) { + // function + const func = this._parseFunction(); + graph.functions.push(func); + } else { + // operation + const op = this._parseOperation(); + graph.operations.push(op); + } + } + if (hasModule) { + this._read(mlir.TokenType.RBRACE); + } + return graph; + } + + _parseFunction() { + // func keyword + this._read(mlir.TokenType.KEYWORD); + let visibility = null; + if (this._current.type != mlir.TokenType.SYMBOL_REF_ID) { + visibility = this._current.value; + this._read(this._current.type); + } + const name = this._parseFunctionName(); + const inputs = this._parseFunctionInputs(); + let attributes = {}; + // attributes + if (this._match(mlir.TokenType.IDENTIFIER, 'attributes')) { + attributes = Object.assign(attributes, this._parseAttribute()); + } + let outputs = {}; + if (this._match(mlir.TokenType.ARROW)) { + outputs = Object.assign(outputs, this._parseFunctionOutputs()); + } + // attributes + if (this._match(mlir.TokenType.IDENTIFIER, 'attributes')) { + attributes = Object.assign(attributes, this._parseAttribute()); + } + this._read(mlir.TokenType.LBRACE); + // operations + const operations = []; + while (this._current.type !== mlir.TokenType.RBRACE) { + const operation = this._parseOperation(); + operations.push(operation); + } + this._read(mlir.TokenType.RBRACE); + return { + name: name, + inputs: inputs.map((input) => input.name), + inputTypes: inputs.map((input) => input.type), + outputTypes: outputs, + operations: operations, + attributes: attributes, + visibility: visibility, + }; + } + + _parseFunctionName() { + const name = this._current.value; + this._read(mlir.TokenType.SYMBOL_REF_ID); + return name; + } + + _parseFunctionInputs() { + this._read(mlir.TokenType.LPAREN); + const inputs = []; + while (!this._match(mlir.TokenType.RPAREN)) { + const input = { + name: this._current.value, + }; + this._read(mlir.TokenType.VALUE_ID); + this._read(mlir.TokenType.COLON); + input.type = this._current.value; + if (!this._match(mlir.TokenType.TYPE)) { + this._match(mlir.TokenType.IDENTIFIER); + } + // attribute + if (this._current.type === mlir.TokenType.LBRACE) { + input.attributes = this._parseAttribute(); + } + inputs.push(input); + this._match(mlir.TokenType.COMMA); + } + return inputs; + } + + _parseFunctionOutputs() { + const outputs = []; + if (this._match(mlir.TokenType.LPAREN)) { + while (!this._match(mlir.TokenType.RPAREN)) { + const output = { + type: this._current.value, + }; + if (!this._match(mlir.TokenType.TYPE)) { + this._match(mlir.TokenType.IDENTIFIER); + } + // attribute + if (this._current.type === mlir.TokenType.LBRACE) { + output.attributes = this._parseAttribute(); + } + outputs.push(output); + this._match(mlir.TokenType.COMMA); + } + } else { + const output = { + type: this._current.value, + }; + if (!this._match(mlir.TokenType.TYPE)) { + this._match(mlir.TokenType.IDENTIFIER); + } + outputs.push(output); + } + return outputs; + } + + _parseOperation() { + // %3 + const outputs = this._parseReturnValues(); + // = + this._match(mlir.TokenType.EQUAL); + // "add" + const operationName = this._parseOperationName(); + if (this._current.type === mlir.TokenType.RBRACE) { + // early return + return { + outputs: outputs, + name: operationName, + }; + } + const skipSymbolBetween = (openingTokenType, closingTokenType) => { + let count = 1; + while (count > 0) { + if (this._current.type === openingTokenType) { + count++; + } else if (this._current.type === closingTokenType) { + count--; + } + this._read(this._current.type); + } + }; + // (%a, %b) + // condition: start with `(%`, `%`, or `()` + const { inputs } = this._parseInputArguments(); + // successor-list? + // condition: start with `[`, end with `]` + if (this._match(mlir.TokenType.LBRACKET)) { + skipSymbolBetween(mlir.TokenType.LBRACKET, mlir.TokenType.RBRACKET); // TODO + } + // dictionary-properties? + // condition: start with `<`, end with `>` + if (this._match(mlir.TokenType.LESS_THAN)) { + skipSymbolBetween(mlir.TokenType.LESS_THAN, mlir.TokenType.GREATER_THAN); // TODO + } + // region-list? + // condition: start with `({^`, or (operation, end with `)` + if (this._match(mlir.TokenType.LPAREN) && this._current.type === mlir.TokenType.LBRACE) { + skipSymbolBetween(mlir.TokenType.LPAREN, mlir.TokenType.RPAREN); // TODO + } + // dictionary-attribute? + // condition: start with `{`, end with `}` + let attributes = this._parseAttribute(); + // : (f32, tensor<1xf32>) + let inputTypes = []; + if (this._match(mlir.TokenType.COLON)) { + inputTypes = this._parseInputArgumentTypes(); + } + const outputTypes = []; + if (operationName.endsWith('constant') && this._current.type !== mlir.TokenType.ARROW) { + // constant + const result = { + name: operationName, + attributes: attributes, + // data: this._parseConstantData(), + outputs: outputs, + outputTypes: outputTypes, + isConstant: true, + }; + return result; + } + // -> f32 + if (this._match(mlir.TokenType.ARROW)) { + outputTypes.push(...this._parseOutputType()); + } + let body = null; + if (this._match(mlir.TokenType.LBRACE)) { + let braceCount = 0; + braceCount++; + body = '{ '; + while (braceCount > 0) { + if (this._current.type === mlir.TokenType.LBRACE) { + braceCount++; + } else if (this._current.type === mlir.TokenType.RBRACE) { + braceCount--; + } + if (braceCount > 0) { + body += this._current.value; + if (this._current.type === mlir.TokenType.LBRACE || this._current.type === mlir.TokenType.RBRACE) { + body += '\n'; + } else if (this._current.type !== mlir.TokenType.WHITESPACE) { + body += ' '; + } + } + this._read(this._current.type); + } + body += '}'; + } + attributes = Object.assign(attributes, this._parseAttribute()); + const result = { + name: operationName, + attributes: attributes, + inputs: inputs, + inputTypes: inputTypes, + outputs: outputs, + outputTypes: outputTypes, + body: body, + }; + return result; + } + + _parseReturnValues() { + const outputs = []; + if (this._match(mlir.TokenType.LPAREN)) { + while (!this._match(mlir.TokenType.RPAREN)) { + const value = this._match(mlir.TokenType.VALUE_ID); + if (value) { + outputs.push(value.value); + } + this._match(mlir.TokenType.COMMA); + } + } else { + const value = this._match(mlir.TokenType.VALUE_ID); + if (value) { + outputs.push(value.value); + } + if (this._match(mlir.TokenType.COMMA)) { + while (this._current.type === mlir.TokenType.VALUE_ID) { + const value = this._read(mlir.TokenType.VALUE_ID); + outputs.push(value.value); + this._match(mlir.TokenType.COMMA); + } + } + } + const result = []; + for (const output of outputs) { + if (output.split(':').length == 2) { + const [valueId, length] = output.split(':'); + for (let i = 0; i < length; i++) { + result.push(`${valueId}#${i}`); + } + } else { + result.push(output); + } + } + return result; + } + + _parseOperationName() { + let value; + switch (this._current.type) { + case mlir.TokenType.STRING_LITERAL: + value = this._current.value; + this._read(mlir.TokenType.STRING_LITERAL); + break; + case mlir.TokenType.IDENTIFIER: + value = this._current.value; + this._read(mlir.TokenType.IDENTIFIER); + if (this._current.type === mlir.TokenType.IDENTIFIER) { + value += this._current.value; + this._read(mlir.TokenType.IDENTIFIER); + } + break; + default: + throw new mlir.Error(`Unexpected operation name '${JSON.stringify(this._current)}' ${this._tokenizer.location()}`); + } + return value; + } + + _parseInputArguments() { + const inputs = []; + this._match(mlir.TokenType.LPAREN); + while (this._current.type !== mlir.TokenType.RPAREN && + this._current.type !== mlir.TokenType.COLON && + this._current.type !== mlir.TokenType.ARROW && + this._current.type !== mlir.TokenType.RBRACE && + this._current.type !== mlir.TokenType.IDENTIFIER && + this._current.type !== mlir.TokenType.STRING_LITERAL) { + const value = this._match(mlir.TokenType.VALUE_ID); + if (value) { + inputs.push(value.value); + } else { + const dense = this._match(mlir.TokenType.DENSE); + inputs.push(dense.value); + return { inputs }; + } + this._match(mlir.TokenType.COMMA); + } + this._match(mlir.TokenType.RPAREN); + return { inputs }; + } + + _parseInputArgumentTypes() { + const inputTypes = []; + this._match(mlir.TokenType.LPAREN); + while (this._current.type === mlir.TokenType.TYPE || (this._current.type === mlir.TokenType.IDENTIFIER && this._current.value === 'none')) { + inputTypes.push(this._current.value); + this._read(this._current.type); + this._match(mlir.TokenType.COMMA); + } + this._match(mlir.TokenType.RPAREN); + return inputTypes; + } + + _parseOutputArguments() { + const outputs = []; + const outputTypes = []; + this._read(mlir.TokenType.LPAREN); + while (!this._match(mlir.TokenType.RPAREN)) { + const value = this._match(mlir.TokenType.VALUE_ID); + if (value) { + outputs.push(value.value); + } + if (this._match(mlir.TokenType.COLON)) { + const type = this._read(mlir.TokenType.TYPE); + outputTypes.push(type.value); + } + this._match(mlir.TokenType.COMMA); + } + return { outputs, outputTypes }; + } + + _parseOutputType() { + const outputTypes = []; + if (this._match(mlir.TokenType.LPAREN)) { + while (!this._match(mlir.TokenType.RPAREN)) { + outputTypes.push(this._current.value); + if (!this._match(mlir.TokenType.TYPE)) { + if (this._current.type === mlir.TokenType.IDENTIFIER && (this._current.value === 'none' || /[^f\\d+$]/.test(this._current.value) || /[^i\\d+$]/.test(this._current.value))) { + this._read(mlir.TokenType.IDENTIFIER); + } + } + this._match(mlir.TokenType.COMMA); + } + } else { + outputTypes.push(this._current.value); + if (!this._match(mlir.TokenType.TYPE)) { + if (this._current.type === mlir.TokenType.IDENTIFIER && (this._current.value === 'none' || /[^f\\d+$]/.test(this._current.value) || /[^i\\d+$]/.test(this._current.value))) { + this._read(mlir.TokenType.IDENTIFIER); + } + } + } + return outputTypes; + } + + _parseAttribute() { + const attributes = {}; + if (this._match(mlir.TokenType.LBRACE)) { + while (!this._match(mlir.TokenType.RBRACE)) { + const name = this._read(mlir.TokenType.IDENTIFIER).value; + if (this._match(mlir.TokenType.EQUAL)) { + let value = ''; + let openingCount = 0; + while (openingCount !== 0 || (this._current.type !== mlir.TokenType.COMMA && this._current.type !== mlir.TokenType.RBRACE)) { + switch (this._current.type) { + case mlir.TokenType.LBRACKET: + case mlir.TokenType.LBRACE: + case mlir.TokenType.LPAREN: + openingCount++; + break; + case mlir.TokenType.RBRACKET: + case mlir.TokenType.RBRACE: + case mlir.TokenType.RPAREN: + openingCount--; + break; + default: + break; + } + value += `${this._current.value} `; + this._read(this._current.type); + } + attributes[name] = value.trim(); + } else { + attributes[name] = name; + } + this._match(mlir.TokenType.COMMA); + } + } + return attributes; + } + + _match(type, value) { + if (this._current.type === type && (!value || this._current.value === value)) { + return this._read(type, value); + } + return null; + } + + _read(type, value) { + if (this._current.type !== type) { + throw new mlir.Error(`Expected token of type '${type}', but got '${this._current.type}' ${this._tokenizer.location()}`); + } + if (value && this._current.value !== value) { + throw new mlir.Error(`Expected token with value '${value}', but got '${this._current.value}' ${this._tokenizer.location()}`); + } + const current = this._current; + this._current = this._tokenizer.read(); + return current; + } +}; + +mlir.Utility = class { + + static dataType(value) { + switch (value) { + case 'f16': return 'float16'; + case 'f32': return 'float32'; + case 'f64': return 'float64'; + case 'i16': return 'int16'; + case 'i32': return 'int32'; + case 'i64': return 'int64'; + case 'i1': return 'boolean'; + default: throw new mlir.Error(`Unknown data type '${value}'.`); + } + } +}; + +mlir.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MLIR model.'; + } +}; + +export const ModelFactory = mlir.ModelFactory; + diff --git a/mlnet-metadata.json b/mlnet-metadata.json new file mode 100644 index 00000000000..005c177d076 --- /dev/null +++ b/mlnet-metadata.json @@ -0,0 +1,136 @@ +[ + { + "name": "CharToken", + "description": "Character-oriented tokenizer where text is considered a sequence of characters.", + "attributes": [ + { + "name": "UseMarkerChars", + "type": "boolean" + }, + { + "name": "IsSeparatorStartEnd", + "type": "boolean" + } + ] + }, + { + "name": "ConcatTransform", + "category": "Tensor", + "description": "Concatenates one or more columns of the same item type." + }, + { + "name": "CopyTransform", + "category": "Tensor", + "description": "Duplicates columns from the dataset." + }, + { + "name": "ImageLoaderTransform", + "description": "Load images from files.", + "attributes": [ + { + "name": "ImageFolder", + "type": "string", + "description": "Folder where to search for images" + } + ] + }, + { + "name": "ImagePixelExtractor", + "description": "Scales an image to specified dimensions using one of the three scale types: isotropic with padding, isotropic with cropping or anisotropic. In case of isotropic padding, transparent color is used to pad resulting image.", + "attributes": [ + { + "name": "ColorsToExtract", + "type": "ImagePixelExtractingTransformer.ColorBits" + }, + { + "name": "OrderOfExtraction", + "type": "ImagePixelExtractingTransformer.ColorsOrder" + }, + { + "name": "Planes", + "type": "uint8" + }, + { + "name": "OutputAsFloatArray", + "type": "boolean" + }, + { + "name": "OffsetImage", + "type": "float32" + }, + { + "name": "ScaleImage", + "type": "float32" + }, + { + "name": "InterleavePixelColors", + "type": "boolean" + } + ] + }, + { + "name": "ImageScalerTransform", + "description": "Scales an image to specified dimensions using one of the three scale types: isotropic with padding, isotropic with cropping or anisotropic. In case of isotropic padding, transparent color is used to pad resulting image.", + "attributes": [ + { + "name": "Width" + }, + { + "name": "Height" + }, + { + "name": "Resizing", + "type": "ImageResizingTransformer.ResizingKind" + }, + { + "name": "Anchor", + "type": "ImageResizingTransformer.Anchor" + } + ] + }, + { + "name": "SSAModel", + "attributes": [ + { + "name": "UseMarkerChars", + "type": "boolean" + } + ] + }, + { + "name": "TensorFlowTransform", + "description": "Transforms the data using the TensorFlow model.", + "attributes": [ + { + "name": "IsFrozen", + "type": "boolean" + }, + { + "name": "AddBatchDimensionInput", + "type": "boolean" + } + ] + }, + { + "name": "TextNormalizerTransform", + "description": "A text normalization transform that allows normalizing text case, removing diacritical marks, punctuation marks and/or numbers. The transform operates on text input as well as vector of tokens/text (vector of ReadOnlyMemory).", + "attributes": [ + { + "name": "CaseMode", + "type": "TextNormalizingTransformer.CaseMode" + }, + { + "name": "KeepDiacritics", + "type": "boolean" + }, + { + "name": "KeepPunctuations", + "type": "boolean" + }, + { + "name": "KeepNumbers", + "type": "boolean" + } + ] + } +] \ No newline at end of file diff --git a/mlnet.js b/mlnet.js new file mode 100644 index 00000000000..7e075e2a789 --- /dev/null +++ b/mlnet.js @@ -0,0 +1,2459 @@ + +// Experimental + +import * as base from './base.js'; + +const mlnet = {}; + +mlnet.ModelFactory = class { + + match(context) { + const entries = context.peek('zip'); + if (entries instanceof Map && entries.size > 0) { + const root = new Set([ 'TransformerChain', 'Predictor']); + if (Array.from(entries.keys()).some((name) => root.has(name.split('\\').shift().split('/').shift()))) { + return entries; + } + } + return null; + } + + async open(context, target) { + const metadata = await context.metadata('mlnet-metadata.json'); + const entries = target; + const reader = new mlnet.ModelReader(entries); + return new mlnet.Model(metadata, reader); + } +}; + +mlnet.Model = class { + + constructor(metadata, reader) { + this._format = "ML.NET"; + if (reader.version && reader.version.length > 0) { + this._format += ` v${reader.version}`; + } + this._graphs = []; + this._graphs.push(new mlnet.Graph(metadata, reader)); + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +mlnet.Graph = class { + + constructor(metadata, reader) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._groups = false; + const values = new Map(); + values.map = (name, type) => { + if (!values.has(name)) { + values.set(name, new mlnet.Value(name, type || null)); + } else if (type) { + throw new mlnet.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + if (reader.schema && reader.schema.inputs) { + for (const input of reader.schema.inputs) { + const argument = new mlnet.Argument(input.name, [ values.map(input.name, new mlnet.TensorType(input.type)) ]); + this._inputs.push(argument); + } + } + const createNode = (scope, group, transformer) => { + if (transformer.inputs && transformer.outputs) { + for (const input of transformer.inputs) { + input.name = scope[input.name] ? scope[input.name].argument : input.name; + } + for (const output of transformer.outputs) { + if (scope[output.name]) { + scope[output.name].counter++; + const next = `${output.name}\n${scope[output.name].counter}`; // custom argument id + scope[output.name].argument = next; + output.name = next; + } else { + scope[output.name] = { + argument: output.name, + counter: 0 + }; + } + } + } + const node = new mlnet.Node(metadata, group, transformer, values); + this._nodes.push(node); + }; + /* eslint-disable no-use-before-define */ + const loadChain = (scope, name, chain) => { + this._groups = true; + const group = name.split('/').splice(1).join('/'); + for (const childTransformer of chain) { + loadTransformer(scope, group, childTransformer); + } + }; + const loadTransformer = (scope, group, transformer) => { + switch (transformer.__type__) { + case 'TransformerChain': + case 'Text': + loadChain(scope, transformer.__name__, transformer.chain); + break; + default: + createNode(scope, group, transformer); + break; + } + }; + /* eslint-enable no-use-before-define */ + const scope = new Map(); + if (reader.dataLoaderModel) { + loadTransformer(scope, '', reader.dataLoaderModel); + } + if (reader.predictor) { + loadTransformer(scope, '', reader.predictor); + } + if (reader.transformerChain) { + loadTransformer(scope, '', reader.transformerChain); + } + } + + get groups() { + return this._groups; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +mlnet.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +mlnet.Value = class { + + constructor(name, type) { + if (typeof name !== 'string') { + throw new mlnet.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } +}; + +mlnet.Node = class { + + constructor(metadata, group, transformer, values) { + this._metadata = metadata; + this._group = group; + this._name = transformer.__name__; + this._inputs = []; + this._outputs = []; + this._attributes = []; + const type = transformer.__type__; + this._type = metadata.type(type) || { name: type }; + if (transformer.inputs) { + let i = 0; + for (const input of transformer.inputs) { + const argument = new mlnet.Argument(i.toString(), [ values.map(input.name) ]); + this._inputs.push(argument); + i++; + } + } + if (transformer.outputs) { + let i = 0; + for (const output of transformer.outputs) { + const argument = new mlnet.Argument(i.toString(), [ values.map(output.name) ]); + this._outputs.push(argument); + i++; + } + } + for (const key of Object.keys(transformer).filter((key) => !key.startsWith('_') && key !== 'inputs' && key !== 'outputs')) { + const attribute = new mlnet.Attribute(metadata.attribute(type, this._name), key, transformer[key]); + this._attributes.push(attribute); + } + } + + get group() { + return this._group; + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +mlnet.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + if (schema.type) { + this._type = schema.type; + } + if (this._type) { + let type = mlnet; + const id = this._type.split('.'); + while (type && id.length > 0) { + type = type[id.shift()]; + } + if (type) { + mlnet.Attribute._reverseMap = mlnet.Attribute._reverseMap || {}; + let reverse = mlnet.Attribute._reverseMap[this._type]; + if (!reverse) { + reverse = {}; + for (const key of Object.keys(type)) { + reverse[type[key.toString()]] = key; + } + mlnet.Attribute._reverseMap[this._type] = reverse; + } + if (Object.prototype.hasOwnProperty.call(reverse, this._value)) { + this._value = reverse[this._value]; + } + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +mlnet.TensorType = class { + + constructor(codec) { + mlnet.TensorType._map = mlnet.TensorType._map || new Map([ + [ 'Byte', 'uint8' ], + [ 'Boolean', 'boolean' ], + [ 'Single', 'float32' ], + [ 'Double', 'float64' ], + [ 'UInt32', 'uint32' ], + [ 'Int64', 'int64' ], + [ 'TextSpan', 'string' ] + ]); + this._dataType = '?'; + this._shape = new mlnet.TensorShape(null); + if (mlnet.TensorType._map.has(codec.name)) { + this._dataType = mlnet.TensorType._map.get(codec.name); + } else if (codec.name == 'VBuffer') { + if (mlnet.TensorType._map.has(codec.itemType.name)) { + this._dataType = mlnet.TensorType._map.get(codec.itemType.name); + } else { + throw new mlnet.Error(`Unsupported data type '${codec.itemType.name}'.`); + } + this._shape = new mlnet.TensorShape(codec.dims); + } else if (codec.name == 'Key2') { + this._dataType = 'key2'; + } else { + throw new mlnet.Error(`Unsupported data type '${codec.name}'.`); + } + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +mlnet.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.join(',')}]`; + } +}; + +mlnet.ModelReader = class { + + constructor(entries) { + + const catalog = new mlnet.ComponentCatalog(); + catalog.register('AffineNormExec', mlnet.AffineNormSerializationUtils); + catalog.register('AnomalyPredXfer', mlnet.AnomalyPredictionTransformer); + catalog.register('BinaryPredXfer', mlnet.BinaryPredictionTransformer); + catalog.register('BinaryLoader', mlnet.BinaryLoader); + catalog.register('CaliPredExec', mlnet.CalibratedPredictor); + catalog.register('CdfNormalizeFunction', mlnet.CdfColumnFunction); + catalog.register('CharToken', mlnet.TokenizingByCharactersTransformer); + catalog.register('ChooseColumnsTransform', mlnet.ColumnSelectingTransformer); + catalog.register('ClusteringPredXfer', mlnet.ClusteringPredictionTransformer); + catalog.register('ConcatTransform', mlnet.ColumnConcatenatingTransformer); + catalog.register('CopyTransform', mlnet.ColumnCopyingTransformer); + catalog.register('ConvertTransform', mlnet.TypeConvertingTransformer); + catalog.register('CSharpTransform', mlnet.CSharpTransform); + catalog.register('DropColumnsTransform', mlnet.DropColumnsTransform); + catalog.register('FAFMPredXfer', mlnet.FieldAwareFactorizationMachinePredictionTransformer); + catalog.register('FastForestBinaryExec', mlnet.FastForestClassificationPredictor); + catalog.register('FastTreeBinaryExec', mlnet.FastTreeBinaryModelParameters); + catalog.register('FastTreeTweedieExec', mlnet.FastTreeTweedieModelParameters); + catalog.register('FastTreeRankerExec', mlnet.FastTreeRankingModelParameters); + catalog.register('FastTreeRegressionExec', mlnet.FastTreeRegressionModelParameters); + catalog.register('FeatWCaliPredExec', mlnet.FeatureWeightsCalibratedModelParameters); + catalog.register('FieldAwareFactMacPredict', mlnet.FieldAwareFactorizationMachineModelParameters); + catalog.register('GcnTransform', mlnet.LpNormNormalizingTransformer); + catalog.register('GenericScoreTransform', mlnet.GenericScoreTransform); + catalog.register('IidChangePointDetector', mlnet.IidChangePointDetector); + catalog.register('IidSpikeDetector', mlnet.IidSpikeDetector); + catalog.register('ImageClassificationTrans', mlnet.ImageClassificationTransformer); + catalog.register('ImageClassificationPred', mlnet.ImageClassificationModelParameters); + catalog.register('ImageLoaderTransform', mlnet.ImageLoadingTransformer); + catalog.register('ImageScalerTransform', mlnet.ImageResizingTransformer); + catalog.register('ImagePixelExtractor', mlnet.ImagePixelExtractingTransformer); + catalog.register('KeyToValueTransform', mlnet.KeyToValueMappingTransformer); + catalog.register('KeyToVectorTransform', mlnet.KeyToVectorMappingTransformer); + catalog.register('KMeansPredictor', mlnet.KMeansModelParameters); + catalog.register('LinearRegressionExec', mlnet.LinearRegressionModelParameters); + catalog.register('LightGBMRegressionExec', mlnet.LightGbmRegressionModelParameters); + catalog.register('LightGBMBinaryExec', mlnet.LightGbmBinaryModelParameters); + catalog.register('Linear2CExec', mlnet.LinearBinaryModelParameters); + catalog.register('LinearModelStats', mlnet.LinearModelParameterStatistics); + catalog.register('MaFactPredXf', mlnet.MatrixFactorizationPredictionTransformer); + catalog.register('MFPredictor', mlnet.MatrixFactorizationModelParameters); + catalog.register('MulticlassLinear', mlnet.LinearMulticlassModelParameters); + catalog.register('MultiClassLRExec', mlnet.MaximumEntropyModelParameters); + catalog.register('MultiClassNaiveBayesPred', mlnet.NaiveBayesMulticlassModelParameters); + catalog.register('MultiClassNetPredictor', mlnet.MultiClassNetPredictor); + catalog.register('MulticlassPredXfer', mlnet.MulticlassPredictionTransformer); + catalog.register('NAReplaceTransform', mlnet.MissingValueReplacingTransformer); + catalog.register('NgramTransform', mlnet.NgramExtractingTransformer); + catalog.register('NgramHashTransform', mlnet.NgramHashingTransformer); + catalog.register('NltTokenizeTransform', mlnet.NltTokenizeTransform); + catalog.register('Normalizer', mlnet.NormalizingTransformer); + catalog.register('NormalizeTransform', mlnet.NormalizeTransform); + catalog.register('OnnxTransform', mlnet.OnnxTransformer); + catalog.register('OptColTransform', mlnet.OptionalColumnTransform); + catalog.register('OVAExec', mlnet.OneVersusAllModelParameters); + catalog.register('pcaAnomExec', mlnet.PcaModelParameters); + catalog.register('PcaTransform', mlnet.PrincipalComponentAnalysisTransformer); + catalog.register('PipeDataLoader', mlnet.CompositeDataLoader); + catalog.register('PlattCaliExec', mlnet.PlattCalibrator); + catalog.register('PMixCaliPredExec', mlnet.ParameterMixingCalibratedModelParameters); + catalog.register('PoissonRegressionExec', mlnet.PoissonRegressionModelParameters); + catalog.register('ProtonNNMCPred', mlnet.ProtonNNMCPred); + catalog.register('RegressionPredXfer', mlnet.RegressionPredictionTransformer); + catalog.register('RowToRowMapper', mlnet.RowToRowMapperTransform); + catalog.register('SsaForecasting', mlnet.SsaForecastingTransformer); + catalog.register('SSAModel', mlnet.AdaptiveSingularSpectrumSequenceModelerInternal); + catalog.register('SelectColumnsTransform', mlnet.ColumnSelectingTransformer); + catalog.register('StopWordsTransform', mlnet.StopWordsTransform); + catalog.register('TensorFlowTransform', mlnet.TensorFlowTransformer); + catalog.register('TermLookupTransform', mlnet.ValueMappingTransformer); + catalog.register('TermTransform', mlnet.ValueToKeyMappingTransformer); + catalog.register('TermManager', mlnet.TermManager); + catalog.register('Text', mlnet.TextFeaturizingEstimator); + catalog.register('TextLoader', mlnet.TextLoader); + catalog.register('TextNormalizerTransform', mlnet.TextNormalizingTransformer); + catalog.register('TokenizeTextTransform', mlnet.WordTokenizingTransformer); + catalog.register('TransformerChain', mlnet.TransformerChain); + catalog.register('ValueMappingTransformer', mlnet.ValueMappingTransformer); + catalog.register('XGBoostMulticlass', mlnet.XGBoostMulticlass); + + const root = new mlnet.ModelHeader(catalog, entries, '', null); + + const version = root.openText('TrainingInfo/Version.txt'); + if (version) { + this.version = version.split(' ').shift().split('\r').shift(); + } + + const schemaReader = root.openBinary('Schema'); + if (schemaReader) { + this.schema = new mlnet.BinaryLoader(null, schemaReader).schema; + } + + const transformerChain = root.open('TransformerChain'); + if (transformerChain) { + this.transformerChain = transformerChain; + } + + const dataLoaderModel = root.open('DataLoaderModel'); + if (dataLoaderModel) { + this.dataLoaderModel = dataLoaderModel; + } + + const predictor = root.open('Predictor'); + if (predictor) { + this.predictor = predictor; + } + } +}; + +mlnet.ComponentCatalog = class { + + constructor() { + this._map = new Map(); + } + + register(signature, type) { + this._map.set(signature, type); + } + + create(signature, context) { + if (!this._map.has(signature)) { + throw new mlnet.Error(`Unsupported loader signature '${signature}'.`); + } + const type = this._map.get(signature); + return Reflect.construct(type, [ context ]); + } +}; + +mlnet.ModelHeader = class { + + constructor(catalog, entries, directory, data) { + + this._entries = entries; + this._catalog = catalog; + this._directory = directory; + + if (data) { + const reader = new mlnet.BinaryReader(data); + + const decoder = new TextDecoder('ascii'); + reader.assert('ML\0MODEL'); + this.versionWritten = reader.uint32(); + this.versionReadable = reader.uint32(); + + const modelBlockOffset = reader.uint64(); + /* let modelBlockSize = */ reader.uint64(); + const stringTableOffset = reader.uint64(); + const stringTableSize = reader.uint64(); + const stringCharsOffset = reader.uint64(); + /* v stringCharsSize = */ reader.uint64(); + this.modelSignature = decoder.decode(reader.read(8)); + this.modelVersionWritten = reader.uint32(); + this.modelVersionReadable = reader.uint32(); + this.loaderSignature = decoder.decode(reader.read(24).filter((c) => c != 0)); + this.loaderSignatureAlt = decoder.decode(reader.read(24).filter((c) => c != 0)); + const tailOffset = reader.uint64(); + /* let tailLimit = */ reader.uint64(); + const assemblyNameOffset = reader.uint64(); + const assemblyNameSize = reader.uint32(); + if (stringTableOffset != 0 && stringCharsOffset != 0) { + reader.seek(stringTableOffset); + const stringCount = stringTableSize >> 3; + const stringSizes = []; + let previousStringSize = 0; + for (let i = 0; i < stringCount; i++) { + const stringSize = reader.uint64(); + stringSizes.push(stringSize - previousStringSize); + previousStringSize = stringSize; + } + reader.seek(stringCharsOffset); + this.strings = []; + for (let i = 0; i < stringCount; i++) { + const cch = stringSizes[i] >> 1; + let sb = ''; + for (let ich = 0; ich < cch; ich++) { + sb += String.fromCharCode(reader.uint16()); + } + this.strings.push(sb); + } + } + if (assemblyNameOffset != 0) { + reader.seek(assemblyNameOffset); + this.assemblyName = decoder.decode(reader.read(assemblyNameSize)); + } + reader.seek(tailOffset); + reader.assert('LEDOM\0LM'); + + this._reader = reader; + this._reader.seek(modelBlockOffset); + } + } + + get reader() { + return this._reader; + } + + string(empty) { + const id = this.reader.int32(); + if (empty === null && id < 0) { + return null; + } + return this.strings[id]; + } + + open(name) { + const dir = this._directory.length > 0 ? `${this._directory}/` : this._directory; + name = dir + name; + const key = `${name}/Model.key`; + const stream = this._entries.get(key) || this._entries.get(key.replace(/\//g, '\\')); + if (stream) { + const buffer = stream.peek(); + const context = new mlnet.ModelHeader(this._catalog, this._entries, name, buffer); + const value = this._catalog.create(context.loaderSignature, context); + value.__type__ = value.__type__ || context.loaderSignature; + value.__name__ = name; + return value; + } + return null; + } + + openBinary(name) { + const dir = this._directory.length > 0 ? `${this._directory}/` : this._directory; + name = dir + name; + const stream = this._entries.get(name) || this._entries.get(name.replace(/\//g, '\\')); + if (stream) { + return new mlnet.BinaryReader(stream); + } + return null; + } + + openText(name) { + const dir = this._directory.length > 0 ? `${this._directory}/` : this._directory; + name = dir + name; + const stream = this._entries.get(name) || this._entries.get(name.replace(/\//g, '\\')); + if (stream) { + const buffer = stream.peek(); + const decoder = new TextDecoder(); + return decoder.decode(buffer); + } + return null; + } + + check(signature, verWrittenCur, verWeCanReadBack) { + return signature === this.modelSignature && verWrittenCur >= this.modelVersionReadable && verWeCanReadBack <= this.modelVersionWritten; + } +}; + +mlnet.BinaryReader = class extends base.BinaryReader { + + match(text) { + const position = this.position; + for (let i = 0; i < text.length; i++) { + if (this.byte() != text.charCodeAt(i)) { + this.seek(position); + return false; + } + } + return true; + } + + assert(text) { + if (!this.match(text)) { + throw new mlnet.Error(`Invalid '${text.split('\0').join('')}' signature.`); + } + } + + booleans(count) { + const values = []; + for (let i = 0; i < count; i++) { + values.push(this.boolean()); + } + return values; + } + + int32s(count) { + const values = []; + for (let i = 0; i < count; i++) { + values.push(this.int32()); + } + return values; + } + + uint32s(count) { + const values = []; + for (let i = 0; i < count; i++) { + values.push(this.uint32()); + } + return values; + } + + int64() { + const low = this.uint32(); + const hi = this.uint32(); + if (low == 0xffffffff && hi == 0x7fffffff) { + return Number.MAX_SAFE_INTEGER; + } + if (hi === 0xffffffff) { + return -low; + } + if (hi !== 0) { + throw new mlnet.Error('Value not in 32-bit range.'); + } + return low; + } + + float32s(count) { + const values = []; + for (let i = 0; i < count; i++) { + values.push(this.float32()); + } + return values; + } + + float64s(count) { + const values = []; + for (let i = 0; i < count; i++) { + values.push(this.float64()); + } + return values; + } + + string() { + const size = this.leb128(); + const buffer = this.read(size); + return new TextDecoder('utf-8').decode(buffer); + } + + leb128() { + let result = 0; + let shift = 0; + let value; + do { + value = this.byte(); + result |= (value & 0x7F) << shift; + shift += 7; + } while ((value & 0x80) != 0); + return result; + } +}; + +mlnet.BinaryLoader = class { // 'BINLOADR' + + constructor(context, reader) { + if (context) { + if (context.modelVersionWritten >= 0x00010002) { + this.Threads = context.reader.int32(); + this.GeneratedRowIndexName = context.string(null); + } + this.ShuffleBlocks = context.modelVersionWritten >= 0x00010003 ? context.reader.float64() : 4; + reader = context.openBinary('Schema.idv'); + } + // https://github.com/dotnet/machinelearning/blob/master/docs/code/IdvFileFormat.md + reader.assert('CML\0DVB\0'); + reader.skip(8); // version + reader.skip(8); // compatibleVersion + const tableOfContentsOffset = reader.uint64(); + const tailOffset = reader.int64(); + reader.int64(); // rowCount + const columnCount = reader.int32(); + reader.seek(tailOffset); + reader.assert('\0BVD\0LMC'); + reader.seek(tableOfContentsOffset); + this.schema = {}; + this.schema.inputs = []; + for (let c = 0; c < columnCount; c ++) { + const input = {}; + input.name = reader.string(); + input.type = new mlnet.Codec(reader); + input.compression = reader.byte(); // None = 0, Deflate = 1 + input.rowsPerBlock = reader.leb128(); + input.lookupOffset = reader.int64(); + input.metadataTocOffset = reader.int64(); + this.schema.inputs.push(input); + } + } +}; + +mlnet.TransformerChain = class { + + constructor(context) { + const reader = context.reader; + const length = reader.int32(); + this.scopes = []; + this.chain = []; + for (let i = 0; i < length; i++) { + this.scopes.push(reader.int32()); // 0x01 = Training, 0x02 = Testing, 0x04 = Scoring + const dirName = `Transform_${(`00${i}`).slice(-3)}`; + const transformer = context.open(dirName); + this.chain.push(transformer); + } + } +}; + +mlnet.TransformBase = class { + + constructor(/* context */) { + + } +}; + +mlnet.RowToRowTransformBase = class extends mlnet.TransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.RowToRowTransformerBase = class { + + constructor(/* context */) { + } +}; + +mlnet.RowToRowMapperTransformBase = class extends mlnet.RowToRowTransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.OneToOneTransformerBase = class { + + constructor(context) { + const reader = context.reader; + const n = reader.int32(); + this.inputs = []; + this.outputs = []; + for (let i = 0; i < n; i++) { + const output = context.string(); + const input = context.string(); + this.outputs.push({ name: output }); + this.inputs.push({ name: input }); + } + } +}; + +mlnet.ColumnCopyingTransformer = class { + + constructor(context) { + const reader = context.reader; + const length = reader.uint32(); + this.inputs = []; + this.outputs = []; + for (let i = 0; i < length; i++) { + this.outputs.push({ name: context.string() }); + this.inputs.push({ name: context.string() }); + } + } +}; + +mlnet.ColumnConcatenatingTransformer = class { + + constructor(context) { + const reader = context.reader; + if (context.modelVersionReadable >= 0x00010003) { + const count = reader.int32(); + for (let i = 0; i < count; i++) { + this.outputs = []; + this.outputs.push({ name: context.string() }); + const n = reader.int32(); + this.inputs = []; + for (let j = 0; j < n; j++) { + const input = { + name: context.string() + }; + const alias = context.string(null); + if (alias) { + input.alias = alias; + } + this.inputs.push(input); + } + } + } else { + this.precision = reader.int32(); + const n = reader.int32(); + const names = []; + const inputs = []; + for (let i = 0; i < n; i++) { + names.push(context.string()); + const numSources = reader.int32(); + const input = []; + for (let j = 0; j < numSources; j++) { + input.push(context.string()); + } + inputs.push(input); + } + const aliases = []; + if (context.modelVersionReadable >= 0x00010002) { + for (let i = 0; i < n; i++) { + /* let length = */ inputs[i].length; + const alias = {}; + aliases.push(alias); + if (context.modelVersionReadable >= 0x00010002) { + for (;;) { + const j = reader.int32(); + if (j == -1) { + break; + } + alias[j] = context.string(); + } + } + } + } + + if (n > 1) { + throw new mlnet.Error(`Unsupported ColumnConcatenatingTransformer name count '${n}'.`); + } + + this.outputs = []; + for (let i = 0; i < n; i++) { + this.outputs.push({ + name: names[i] + }); + this.inputs = inputs[i]; + } + } + } +}; + +mlnet.PredictionTransformerBase = class { + + constructor(context) { + this.Model = context.open('Model'); + const trainSchemaReader = context.openBinary('TrainSchema'); + if (trainSchemaReader) { + new mlnet.BinaryLoader(null, trainSchemaReader).schema; + } + } +}; + +mlnet.MatrixFactorizationModelParameters = class { + + constructor(context) { + const reader = context.reader; + this.NumberOfRows = reader.int32(); + if (context.modelVersionWritten < 0x00010002) { + reader.uint64(); // mMin + } + this.NumberOfColumns = reader.int32(); + if (context.modelVersionWritten < 0x00010002) { + reader.uint64(); // nMin + } + this.ApproximationRank = reader.int32(); + + this._leftFactorMatrix = reader.float32s(this.NumberOfRows * this.ApproximationRank); + this._rightFactorMatrix = reader.float32s(this.NumberOfColumns * this.ApproximationRank); + } +}; + +mlnet.MatrixFactorizationPredictionTransformer = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + this.MatrixColumnIndexColumnName = context.string(); + this.MatrixRowIndexColumnName = context.string(); + // TODO + } +}; + +mlnet.FieldAwareFactorizationMachinePredictionTransformer = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.inputs = []; + for (let i = 0; i < this.FieldCount; i++) { + this.inputs.push({ name: context.string() }); + } + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + this.inputs.push({ name: this.ThresholdColumn }); + } +}; + +mlnet.SingleFeaturePredictionTransformerBase = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + const featureColumn = context.string(null); + this.inputs = []; + this.inputs.push({ name: featureColumn }); + this.outputs = []; + this.outputs.push({ name: featureColumn }); + } +}; + +mlnet.ClusteringPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.AnomalyPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + } +}; + +mlnet.AffineNormSerializationUtils = class { + + constructor(context) { + const reader = context.reader; + /* cbFloat = */ reader.int32(); + this.NumFeatures = reader.int32(); + const morphCount = reader.int32(); + if (morphCount == -1) { + this.ScalesSparse = reader.float32s(reader.int32()); + this.OffsetsSparse = reader.float32s(reader.int32()); + } else { + // debugger; + } + } +}; + +mlnet.RegressionPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.BinaryPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + } +}; + +mlnet.MulticlassPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + this.TrainLabelColumn = context.string(null); + this.inputs.push({ name: this.TrainLabelColumn }); + } +}; + +mlnet.MissingValueReplacingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + for (let i = 0; i < this.inputs.length; i++) { + const codec = new mlnet.Codec(reader); + const count = reader.int32(); + this.values = codec.read(reader, count); + } + } +}; + +mlnet.PredictorBase = class { + + constructor(context) { + const reader = context.reader; + if (reader.int32() != 4) { + throw new mlnet.Error('Invalid float type size.'); + } + } +}; + +mlnet.ModelParametersBase = class { + + constructor(context) { + const reader = context.reader; + const cbFloat = reader.int32(); + if (cbFloat !== 4) { + throw new mlnet.Error('This file was saved by an incompatible version.'); + } + } +}; + +mlnet.ImageClassificationModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.classCount = reader.int32(); + this.imagePreprocessorTensorInput = reader.string(); + this.imagePreprocessorTensorOutput = reader.string(); + this.graphInputTensor = reader.string(); + this.graphOutputTensor = reader.string(); + this.modelFile = 'TFModel'; + // const modelBytes = context.openBinary('TFModel'); + // first uint32 is size of TensorFlow model + // inputType = new VectorDataViewType(uint8); + // outputType = new VectorDataViewType(float32, classCount); + } +}; + +mlnet.NaiveBayesMulticlassModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._labelHistogram = reader.int32s(reader.int32()); + this._featureCount = reader.int32(); + this._featureHistogram = []; + for (let i = 0; i < this._labelHistogram.length; i++) { + if (this._labelHistogram[i] > 0) { + this._featureHistogram.push(reader.int32s(this._featureCount)); + } + } + this._absentFeaturesLogProb = reader.float64s(this._labelHistogram.length); + } +}; + +mlnet.LinearModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Bias = reader.float32(); + /* let len = */ reader.int32(); + this.Indices = reader.int32s(reader.int32()); + this.Weights = reader.float32s(reader.int32()); + } +}; + +mlnet.LinearBinaryModelParameters = class extends mlnet.LinearModelParameters { + + constructor(context) { + super(context); + if (context.modelVersionWritten > 0x00020001) { + this.Statistics = context.open('ModelStats'); + } + } +}; + +mlnet.ModelStatisticsBase = class { + + constructor(context) { + const reader = context.reader; + this.ParametersCount = reader.int32(); + this.TrainingExampleCount = reader.int64(); + this.Deviance = reader.float32(); + this.NullDeviance = reader.float32(); + + } +}; + +mlnet.LinearModelParameterStatistics = class extends mlnet.ModelStatisticsBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten < 0x00010002) { + if (!reader.boolean()) { + return; + } + } + const stdErrorValues = reader.float32s(this.ParametersCount); + const length = reader.int32(); + if (length == this.ParametersCount) { + this._coeffStdError = stdErrorValues; + } else { + this.stdErrorIndices = reader.int32s(this.ParametersCount); + this._coeffStdError = stdErrorValues; + } + this._bias = reader.float32(); + const isWeightsDense = reader.byte(); + const weightsLength = reader.int32(); + const weightsValues = reader.float32s(weightsLength); + + if (isWeightsDense) { + this._weights = weightsValues; + } else { + this.weightsIndices = reader.int32s(weightsLength); + } + } +}; + +mlnet.LinearMulticlassModelParametersBase = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + const numberOfFeatures = reader.int32(); + const numberOfClasses = reader.int32(); + this.Biases = reader.float32s(numberOfClasses); + const numStarts = reader.int32(); + if (numStarts == 0) { + /* let numIndices = */ reader.int32(); + /* let numWeights = */ reader.int32(); + this.Weights = []; + for (let i = 0; i < numberOfClasses; i++) { + const w = reader.float32s(numberOfFeatures); + this.Weights.push(w); + } + } else { + + const starts = reader.int32s(reader.int32()); + /* let numIndices = */ reader.int32(); + const indices = []; + for (let i = 0; i < numberOfClasses; i++) { + indices.push(reader.int32s(starts[i + 1] - starts[i])); + } + /* let numValues = */ reader.int32(); + this.Weights = []; + for (let i = 0; i < numberOfClasses; i++) { + const values = reader.float32s(starts[i + 1] - starts[i]); + this.Weights.push(values); + } + } + + const labelNamesReader = context.openBinary('LabelNames'); + if (labelNamesReader) { + this.LabelNames = []; + for (let i = 0; i < numberOfClasses; i++) { + const id = labelNamesReader.int32(); + this.LabelNames.push(context.strings[id]); + } + } + + const statistics = context.open('ModelStats'); + if (statistics) { + this.Statistics = statistics; + } + } +}; + +mlnet.LinearMulticlassModelParameters = class extends mlnet.LinearMulticlassModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.RegressionModelParameters = class extends mlnet.LinearModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.PoissonRegressionModelParameters = class extends mlnet.RegressionModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.LinearRegressionModelParameters = class extends mlnet.RegressionModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.MaximumEntropyModelParameters = class extends mlnet.LinearMulticlassModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.TokenizingByCharactersTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.UseMarkerChars = reader.boolean(); + this.IsSeparatorStartEnd = context.modelVersionReadable < 0x00010002 ? true : reader.boolean(); + } +}; + +mlnet.SequencePool = class { + + constructor(reader) { + this.idLim = reader.int32(); + this.start = reader.int32s(this.idLim + 1); + this.bytes = reader.read(this.start[this.idLim]); + } +}; + +mlnet.NgramExtractingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(context, reader, this); + } else { + // debugger; + } + } + + _option(context, reader, option) { + const readWeighting = context.modelVersionReadable >= 0x00010002; + option.NgramLength = reader.int32(); + option.SkipLength = reader.int32(); + if (readWeighting) { + option.Weighting = reader.int32(); + } + option.NonEmptyLevels = reader.booleans(option.NgramLength); + option.NgramMap = new mlnet.SequencePool(reader); + if (readWeighting) { + option.InvDocFreqs = reader.float64s(reader.int32()); + } + } +}; + +// mlnet.NgramExtractingTransformer.WeightingCriteria + +mlnet.NgramHashingTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const loadLegacy = context.modelVersionWritten < 0x00010003; + const reader = context.reader; + if (loadLegacy) { + reader.int32(); // cbFloat + } + this.inputs = []; + this.outputs = []; + const columnsLength = reader.int32(); + if (loadLegacy) { + /* TODO + for (let i = 0; i < columnsLength; i++) { + this.Columns.push(new NgramHashingEstimator.ColumnOptions(context)); + } */ + } else { + for (let i = 0; i < columnsLength; i++) { + this.outputs.push(context.string()); + const csrc = reader.int32(); + for (let j = 0; j < csrc; j++) { + const src = context.string(); + this.inputs.push(src); + // TODO inputs[i][j] = src; + } + } + } + } +}; + +mlnet.WordTokenizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this.Separators = []; + const count = reader.int32(); + for (let i = 0; i < count; i++) { + this.Separators.push(String.fromCharCode(reader.int16())); + } + } else { + // debugger; + } + } +}; + +mlnet.TextNormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.CaseMode = reader.byte(); + this.KeepDiacritics = reader.boolean(); + this.KeepPunctuations = reader.boolean(); + this.KeepNumbers = reader.boolean(); + } +}; + +mlnet.TextNormalizingTransformer.CaseMode = { + Lower: 0, + Upper: 1, + None: 2 +}; + +mlnet.PrincipalComponentAnalysisTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionReadable === 0x00010001) { + if (reader.int32() !== 4) { + throw new mlnet.Error('This file was saved by an incompatible version.'); + } + } + this.TransformInfos = []; + for (let i = 0; i < this.inputs.length; i++) { + const option = {}; + option.Dimension = reader.int32(); + option.Rank = reader.int32(); + option.Eigenvectors = []; + for (let j = 0; j < option.Rank; j++) { + option.Eigenvectors.push(reader.float32s(option.Dimension)); + } + option.MeanProjected = reader.float32s(reader.int32()); + this.TransformInfos.push(option); + } + } +}; + +mlnet.LpNormNormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + + if (context.modelVersionWritten <= 0x00010002) { + /* cbFloat */ reader.int32(); + } + // let normKindSerialized = context.modelVersionWritten >= 0x00010002; + if (this.inputs.length == 1) { + this.EnsureZeroMean = reader.boolean(); + this.Norm = reader.byte(); + this.Scale = reader.float32(); + } else { + // debugger; + } + } +}; + +mlnet.KeyToVectorMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten == 0x00010001) { + /* cbFloat = */ reader.int32(); + } + const columnsLength = this.inputs.length; + this.Bags = reader.booleans(columnsLength); + } +}; + +mlnet.TypeConvertingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.ImageLoadingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + this.ImageFolder = context.string(null); + } +}; + +mlnet.ImageResizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(reader, this); + } else { + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + const option = {}; + this._option(reader, option); + this.Options.push(option); + } + } + } + + _option(reader, option) { + option.Width = reader.int32(); + option.Height = reader.int32(); + option.Resizing = reader.byte(); + option.Anchor = reader.byte(); + } +}; + +mlnet.ImageResizingTransformer.ResizingKind = { + IsoPad: 0, + IsoCrop: 1, + Fill: 2 +}; + +mlnet.ImageResizingTransformer.Anchor = { + Right: 0, + Left: 1, + Top: 2, + Bottom: 3, + Center: 4 +}; + +mlnet.ImagePixelExtractingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(context, reader, this); + } else { + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + const option = {}; + this._option(context, reader, option); + this.Options.push(option); + } + } + } + + _option(context, reader, option) { + option.ColorsToExtract = reader.byte(); + option.OrderOfExtraction = context.modelVersionWritten <= 0x00010002 ? mlnet.ImagePixelExtractingTransformer.ColorsOrder.ARGB : reader.byte(); + let planes = option.ColorsToExtract; + planes = (planes & 0x05) + ((planes >> 1) & 0x05); + planes = (planes & 0x03) + ((planes >> 2) & 0x03); + option.Planes = planes & 0xFF; + option.OutputAsFloatArray = reader.boolean(); + option.OffsetImage = reader.float32(); + option.ScaleImage = reader.float32(); + option.InterleavePixelColors = reader.boolean(); + } +}; + +mlnet.ImagePixelExtractingTransformer.ColorBits = { + Alpha: 0x01, + Red: 0x02, + Green: 0x04, + Blue: 0x08, + Rgb: 0x0E, + All: 0x0F +}; + +mlnet.ImagePixelExtractingTransformer.ColorsOrder = { + ARGB: 1, + ARBG: 2, + ABRG: 3, + ABGR: 4, + AGRB: 5, + AGBR: 6 +}; + +mlnet.NormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + let isVector = false; + let shape = 0; + let itemKind = ''; + if (context.modelVersionWritten < 0x00010002) { + isVector = reader.boolean(); + shape = [ reader.int32() ]; + itemKind = reader.byte(); + } else { + isVector = reader.boolean(); + itemKind = reader.byte(); + shape = reader.int32s(reader.int32()); + } + let itemType = ''; + switch (itemKind) { + case 9: itemType = 'float32'; break; + case 10: itemType = 'float64'; break; + default: throw new mlnet.Error(`Unsupported NormalizingTransformer item kind '${itemKind}'.`); + } + const type = itemType + (!isVector ? '' : `[${shape.map((dim) => dim.toString()).join(',')}]`); + const name = `Normalizer_${(`00${i}`).slice(-3)}`; + const func = context.open(name); + this.Options.push({ type: type, func: func }); + } + } +}; + +mlnet.KeyToValueMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.ValueToKeyMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten >= 0x00010003) { + this.textMetadata = reader.booleans(this.outputs.length + this.inputs.length); + } else { + this.textMetadata = []; + for (let i = 0; i < this.columnPairs.length; i++) { + this.textMetadata.push(false); + } + } + const vocabulary = context.open('Vocabulary'); + if (vocabulary) { + this.termMap = vocabulary.termMap; + } + } +}; + +mlnet.TermMap = class { + + constructor(context) { + const reader = context.reader; + const mtype = reader.byte(); + switch (mtype) { + case 0: { // Text + this.values = []; + const cstr = reader.int32(); + for (let i = 0; i < cstr; i++) { + this.values.push(context.string()); + } + break; + } + case 1: { // Codec + const codec = new mlnet.Codec(reader); + const count = reader.int32(); + this.values = codec.read(reader, count); + break; + } + default: + throw new mlnet.Error(`Unsupported term map type '${mtype}'.`); + } + } +}; + +mlnet.TermManager = class { + + constructor(context) { + const reader = context.reader; + const cmap = reader.int32(); + this.termMap = []; + if (context.modelVersionWritten >= 0x00010002) { + for (let i = 0; i < cmap; ++i) { + this.termMap.push(new mlnet.TermMap(context)); + // debugger; + // termMap[i] = TermMap.Load(c, host, CodecFactory); + } + } else { + throw new mlnet.Error('Unsupported TermManager version.'); + // for (let i = 0; i < cmap; ++i) { + // debugger; + // // termMap[i] = TermMap.TextImpl.Create(c, host) + // } + } + } +}; + + +mlnet.ValueMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + this.keyColumnName = 'Key'; + if (context.check('TXTLOOKT', 0x00010002, 0x00010002)) { + this.keyColumnName = 'Term'; + } + // TODO + } +}; + +mlnet.KeyToVectorTransform = class { + + constructor(/* context */) { + } +}; + +mlnet.GenericScoreTransform = class { + + constructor(/* context */) { + } +}; + +mlnet.CompositeDataLoader = class { + + constructor(context) { + /* let loader = */ context.open('Loader'); + const reader = context.reader; + // LoadTransforms + reader.int32(); // floatSize + const cxf = reader.int32(); + const tagData = []; + for (let i = 0; i < cxf; i++) { + let tag = ''; + let args = null; + if (context.modelVersionReadable >= 0x00010002) { + tag = context.string(); + args = context.string(null); + } + tagData.push([ tag, args ]); + } + this.chain = []; + for (let j = 0; j < cxf; j++) { + const name = `Transform_${(`00${j}`).slice(-3)}`; + const transform = context.open(name); + this.chain.push(transform); + } + } +}; + +mlnet.RowToRowMapperTransform = class extends mlnet.RowToRowTransformBase { + + constructor(context) { + super(context); + const mapper = context.open('Mapper'); + this.__type__ = mapper.__type__; + for (const key of Object.keys(mapper)) { + this[key] = mapper[key]; + } + } +}; + +mlnet.ImageClassificationTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.addBatchDimensionInput = reader.boolean(); + const numInputs = reader.int32(); + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + this.outputs = []; + const numOutputs = reader.int32(); + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + this.labelColumn = reader.string(); + this.checkpointName = reader.string(); + this.arch = reader.int32(); // Architecture + this.scoreColumnName = reader.string(); + this.predictedColumnName = reader.string(); + this.learningRate = reader.float32(); + this.classCount = reader.int32(); + this.keyValueAnnotations = []; + for (let i = 0; i < this.classCount; i++) { + this.keyValueAnnotations.push(context.string()); + } + this.predictionTensorName = reader.string(); + this.softMaxTensorName = reader.string(); + this.jpegDataTensorName = reader.string(); + this.resizeTensorName = reader.string(); + } +}; + +mlnet.OnnxTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.modelFile = 'OnnxModel'; + // const modelBytes = context.openBinary('OnnxModel'); + // first uint32 is size of .onnx model + const numInputs = context.modelVersionWritten > 0x00010001 ? reader.int32() : 1; + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + const numOutputs = context.modelVersionWritten > 0x00010001 ? reader.int32() : 1; + this.outputs = []; + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + if (context.modelVersionWritten > 0x0001000C) { + const customShapeInfosLength = reader.int32(); + this.LoadedCustomShapeInfos = []; + for (let i = 0; i < customShapeInfosLength; i++) { + this.LoadedCustomShapeInfos.push({ + name: context.string(), + shape: reader.int32s(reader.int32()) + }); + } + } + } +}; + +mlnet.OptionalColumnTransform = class extends mlnet.RowToRowMapperTransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.TensorFlowTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.IsFrozen = context.modelVersionReadable >= 0x00010002 ? reader.boolean() : true; + this.AddBatchDimensionInput = context.modelVersionReadable >= 0x00010003 ? reader.boolean() : true; + const numInputs = reader.int32(); + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + const numOutputs = context.modelVersionReadable >= 0x00010002 ? reader.int32() : 1; + this.outputs = []; + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + } +}; + +mlnet.OneVersusAllModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.UseDist = reader.boolean(); + const len = reader.int32(); + this.chain = []; + for (let i = 0; i < len; i++) { + const name = `SubPredictor_${(`00${i}`).slice(-3)}`; + const predictor = context.open(name); + this.chain.push(predictor); + } + } +}; + +mlnet.TextFeaturizingEstimator = class { + + constructor(context) { + + if (context.modelVersionReadable === 0x00010001) { + const reader = context.reader; + const n = reader.int32(); + this.chain = []; + /* let loader = */ context.open('Loader'); + for (let i = 0; i < n; i++) { + const name = `Step_${(`00${i}`).slice(-3)}`; + const transformer = context.open(name); + this.chain.push(transformer); + // debugger; + } + + // throw new mlnet.Error('Unsupported TextFeaturizingEstimator format.'); + } else { + const chain = context.open('Chain'); + this.chain = chain.chain; + } + } +}; + +mlnet.TextLoader = class { + + constructor(context) { + const reader = context.reader; + reader.int32(); // floatSize + this.MaxRows = reader.int64(); + this.Flags = reader.uint32(); + this.InputSize = reader.int32(); + const separatorCount = reader.int32(); + this.Separators = []; + for (let i = 0; i < separatorCount; i++) { + this.Separators.push(String.fromCharCode(reader.uint16())); + } + this.Bindinds = new mlnet.TextLoader.Bindinds(context); + } +}; + +mlnet.TextLoader.Bindinds = class { + + constructor(context) { + const reader = context.reader; + const cinfo = reader.int32(); + for (let i = 0; i < cinfo; i++) { + // debugger; + } + } +}; + +mlnet.CalibratedPredictorBase = class { + + constructor(predictor, calibrator) { + this.SubPredictor = predictor; + this.Calibrator = calibrator; + } +}; + +mlnet.ValueMapperCalibratedPredictorBase = class extends mlnet.CalibratedPredictorBase { + + constructor(predictor, calibrator) { + super(predictor, calibrator); + } +}; + +mlnet.CalibratedModelParametersBase = class { + + constructor(context) { + this.Predictor = context.open('Predictor'); + this.Calibrator = context.open('Calibrator'); + } +}; + +mlnet.ValueMapperCalibratedModelParametersBase = class extends mlnet.CalibratedModelParametersBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.CalibratedPredictor = class extends mlnet.ValueMapperCalibratedPredictorBase { + + constructor(context) { + const predictor = context.open('Predictor'); + const calibrator = context.open('Calibrator'); + super(predictor, calibrator); + } +}; + +mlnet.ParameterMixingCalibratedModelParameters = class extends mlnet.ValueMapperCalibratedModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.FieldAwareFactorizationMachineModelParameters = class { + + constructor(context) { + const reader = context.reader; + this.Norm = reader.boolean(); + this.FieldCount = reader.int32(); + this.FeatureCount = reader.int32(); + this.LatentDim = reader.int32(); + this.LinearWeights = reader.float32s(reader.int32()); + this.LatentWeights = reader.float32s(reader.int32()); + } +}; + +mlnet.KMeansModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.k = reader.int32(); + this.Dimensionality = reader.int32(); + this.Centroids = []; + for (let i = 0; i < this.k; i++) { + const count = context.modelVersionWritten >= 0x00010002 ? reader.int32() : this.Dimensionality; + const indices = count < this.Dimensionality ? reader.int32s(count) : null; + const values = reader.float32s(count); + this.Centroids.push({ indices: indices, values: values }); + } + // input type = float32[dimensionality] + // output type = float32[k] + } +}; + +mlnet.PcaModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Dimension = reader.int32(); + this.Rank = reader.int32(); + const center = reader.boolean(); + if (center) { + this.Mean = reader.float32s(this.Dimension); + } else { + this.Mean = []; + } + this.EigenVectors = []; + for (let i = 0; i < this.Rank; ++i) { + this.EigenVectors.push(reader.float32s(this.Dimension)); + } + // input type -> float32[Dimension] + } +}; + +mlnet.TreeEnsembleModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + const usingDefaultValues = context.modelVersionWritten >= this.VerDefaultValueSerialized; + const categoricalSplits = context.modelVersionWritten >= this.VerCategoricalSplitSerialized; + this.TrainedEnsemble = new mlnet.InternalTreeEnsemble(context, usingDefaultValues, categoricalSplits); + this.InnerOptions = context.string(null); + if (context.modelVersionWritten >= this.verNumFeaturesSerialized) { + this.NumFeatures = reader.int32(); + } + + // input type -> float32[NumFeatures] + // output type -> float32 + } +}; + +mlnet.InternalTreeEnsemble = class { + + constructor(context, usingDefaultValues, categoricalSplits) { + const reader = context.reader; + this.Trees = []; + const numTrees = reader.int32(); + for (let i = 0; i < numTrees; i++) { + switch (reader.byte()) { + case mlnet.InternalTreeEnsemble.TreeType.Regression: + this.Trees.push(new mlnet.InternalRegressionTree(context, usingDefaultValues, categoricalSplits)); + break; + case mlnet.InternalTreeEnsemble.TreeType.FastForest: + this.Trees.push(new mlnet.InternalQuantileRegressionTree(context, usingDefaultValues, categoricalSplits)); + break; + case mlnet.InternalTreeEnsemble.TreeType.Affine: + // Affine regression trees do not actually work, nor is it clear how they ever + // could have worked within TLC, so the chance of this happening seems remote. + throw new mlnet.Error('Affine regression trees unsupported.'); + default: + throw new mlnet.Error('Unsupported ensemble tree type.'); + } + } + this.Bias = reader.float64(); + this.FirstInputInitializationContent = context.string(null); + } +}; + +mlnet.InternalRegressionTree = class { + + constructor(context, usingDefaultValue, categoricalSplits) { + const reader = context.reader; + this.NumLeaves = reader.int32(); + this.MaxOuptut = reader.float64(); + this.Weight = reader.float64(); + this.LteChild = reader.int32s(reader.int32()); + this.GtChild = reader.int32s(reader.int32()); + this.SplitFeatures = reader.int32s(reader.int32()); + if (categoricalSplits) { + const categoricalNodeIndices = reader.int32s(reader.int32()); + if (categoricalNodeIndices.length > 0) { + this.CategoricalSplitFeatures = []; + this.CategoricalSplitFeatureRanges = []; + for (const index of categoricalNodeIndices) { + this.CategoricalSplitFeatures[index] = reader.int32s(reader.int32()); + this.CategoricalSplitFeatureRanges[index] = reader.int32s(2); + } + } + } + this.Thresholds = reader.uint32s(reader.int32()); + this.RawThresholds = reader.float32s(reader.int32()); + this.DefaultValueForMissing = usingDefaultValue ? reader.float32s(reader.int32()) : null; + this.LeafValues = reader.float64s(reader.int32()); + + this.SplitGain = reader.float64s(reader.int32()); + this.GainPValue = reader.float64s(reader.int32()); + this.PreviousLeafValue = reader.float64s(reader.int32()); + } +}; + +mlnet.InternalTreeEnsemble.TreeType = { + Regression: 0, + Affine: 1, + FastForest: 2 +}; + +mlnet.TreeEnsembleModelParametersBasedOnRegressionTree = class extends mlnet.TreeEnsembleModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.FastTreeTweedieModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010001; + } + + get VerDefaultValueSerialized() { + return 0x00010002; + } + + get VerCategoricalSplitSerialized() { + return 0x00010003; + } +}; + +mlnet.FastTreeRankingModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010002; + } + + get VerDefaultValueSerialized() { + return 0x00010004; + } + + get VerCategoricalSplitSerialized() { + return 0x00010005; + } +}; + +mlnet.FastTreeBinaryModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010002; + } + + get VerDefaultValueSerialized() { + return 0x00010004; + } + + get VerCategoricalSplitSerialized() { + return 0x00010005; + } +}; + +mlnet.FastTreeRegressionModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010002; + } + + get VerDefaultValueSerialized() { + return 0x00010004; + } + + get VerCategoricalSplitSerialized() { + return 0x00010005; + } +}; + +mlnet.LightGbmRegressionModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010002; + } + + get VerDefaultValueSerialized() { + return 0x00010004; + } + + get VerCategoricalSplitSerialized() { + return 0x00010005; + } +}; + +mlnet.LightGbmBinaryModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { + return 0x00010002; + } + + get VerDefaultValueSerialized() { + return 0x00010004; + } + + get VerCategoricalSplitSerialized() { + return 0x00010005; + } +}; + +mlnet.FeatureWeightsCalibratedModelParameters = class extends mlnet.ValueMapperCalibratedModelParametersBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.FastTreePredictionWrapper = class { + + constructor(/* context */) { + } +}; + +mlnet.FastForestClassificationPredictor = class extends mlnet.FastTreePredictionWrapper { + constructor(context) { + super(context); + } +}; + +mlnet.PlattCalibrator = class { + + constructor(context) { + const reader = context.reader; + this.ParamA = reader.float64(); + this.ParamB = reader.float64(); + } +}; + +mlnet.Codec = class { + + constructor(reader) { + this.name = reader.string(); + const size = reader.leb128(); + const data = reader.read(size); + reader = new mlnet.BinaryReader(data); + switch (this.name) { + case 'Boolean': break; + case 'Single': break; + case 'Double': break; + case 'Byte': break; + case 'Int32': break; + case 'UInt32': break; + case 'Int64': break; + case 'TextSpan': break; + case 'VBuffer': + this.itemType = new mlnet.Codec(reader); + this.dims = reader.int32s(reader.int32()); + break; + case 'Key': + case 'Key2': + this.itemType = new mlnet.Codec(reader); + this.count = reader.uint64(); + break; + default: + throw new mlnet.Error(`Unsupported codec '${this.name}'.`); + } + } + + read(reader, count) { + const values = []; + switch (this.name) { + case 'Single': + for (let i = 0; i < count; i++) { + values.push(reader.float32()); + } + break; + case 'Int32': + for (let i = 0; i < count; i++) { + values.push(reader.int32()); + } + break; + case 'Int64': + for (let i = 0; i < count; i++) { + values.push(reader.int64()); + } + break; + default: + throw new mlnet.Error(`Unsupported codec read operation '${this.name}'.`); + } + return values; + } +}; + +mlnet.SequentialTransformerBase = class { + + constructor(context) { + const reader = context.reader; + this.WindowSize = reader.int32(); + this.InitialWindowSize = reader.int32(); + this.inputs = []; + this.inputs.push({ name: context.string() }); + this.outputs = []; + this.outputs.push({ name: context.string() }); + this.ConfidenceLowerBoundColumn = reader.string(); + this.ConfidenceUpperBoundColumn = reader.string(); + this.Type = new mlnet.Codec(reader); + } +}; + +mlnet.AnomalyDetectionStateBase = class { + + constructor(context) { + const reader = context.reader; + this.LogMartingaleUpdateBuffer = mlnet.AnomalyDetectionStateBase._deserializeFixedSizeQueueDouble(reader); + this.RawScoreBuffer = mlnet.AnomalyDetectionStateBase._deserializeFixedSizeQueueDouble(reader); + this.LogMartingaleValue = reader.float64(); + this.SumSquaredDist = reader.float64(); + this.MartingaleAlertCounter = reader.int32(); + } + + static _deserializeFixedSizeQueueDouble(reader) { + /* let capacity = */ reader.int32(); + const count = reader.int32(); + const queue = []; + for (let i = 0; i < count; i++) { + queue.push(reader.float64()); + } + return queue; + } +}; + +mlnet.SequentialAnomalyDetectionTransformBase = class extends mlnet.SequentialTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Martingale = reader.byte(); + this.ThresholdScore = reader.byte(); + this.Side = reader.byte(); + this.PowerMartingaleEpsilon = reader.float64(); + this.AlertThreshold = reader.float64(); + this.State = new mlnet.AnomalyDetectionStateBase(context); + } +}; + +mlnet.TimeSeriesUtils = class { + + static deserializeFixedSizeQueueSingle(reader) { + /* const capacity = */ reader.int32(); + const count = reader.int32(); + const queue = []; + for (let i = 0; i < count; i++) { + queue.push(reader.float32()); + } + return queue; + } +}; + +mlnet.IidAnomalyDetectionBase = class extends mlnet.SequentialAnomalyDetectionTransformBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.WindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.InitialWindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + } +}; + +mlnet.IidAnomalyDetectionBaseWrapper = class { + + constructor(context) { + const internalTransform = new mlnet.IidAnomalyDetectionBase(context); + for (const key of Object.keys(internalTransform)) { + this[key] = internalTransform[key]; + } + } +}; + +mlnet.IidChangePointDetector = class extends mlnet.IidAnomalyDetectionBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.IidSpikeDetector = class extends mlnet.IidAnomalyDetectionBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.SequenceModelerBase = class { + + constructor(/* context */) { + } +}; + +mlnet.RankSelectionMethod = { + Fixed: 0, + Exact: 1, + Fact: 2 +}; + +mlnet.AdaptiveSingularSpectrumSequenceModelerInternal = class extends mlnet.SequenceModelerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._seriesLength = reader.int32(); + this._windowSize = reader.int32(); + this._trainSize = reader.int32(); + this._rank = reader.int32(); + this._discountFactor = reader.float32(); + this._rankSelectionMethod = reader.byte(); // RankSelectionMethod + const isWeightSet = reader.byte(); + this._alpha = reader.float32s(reader.int32()); + if (context.modelVersionReadable >= 0x00010002) { + this._state = reader.float32s(reader.int32()); + } + this.ShouldComputeForecastIntervals = reader.byte(); + this._observationNoiseVariance = reader.float32(); + this._autoregressionNoiseVariance = reader.float32(); + this._observationNoiseMean = reader.float32(); + this._autoregressionNoiseMean = reader.float32(); + if (context.modelVersionReadable >= 0x00010002) { + this._nextPrediction = reader.float32(); + } + this._maxRank = reader.int32(); + this._shouldStablize = reader.byte(); + this._shouldMaintainInfo = reader.byte(); + this._maxTrendRatio = reader.float64(); + if (isWeightSet) { + this._wTrans = reader.float32s(reader.int32()); + this._y = reader.float32s(reader.int32()); + } + this._buffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + } +}; + +mlnet.SequentialForecastingTransformBase = class extends mlnet.SequentialTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._outputLength = reader.int32(); + } +}; + +mlnet.SsaForecastingBaseWrapper = class extends mlnet.SequentialForecastingTransformBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.IsAdaptive = reader.boolean(); + this.Horizon = reader.int32(); + this.ConfidenceLevel = reader.float32(); + this.WindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.InitialWindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.Model = context.open('SSA'); + } +}; + +mlnet.SsaForecastingTransformer = class extends mlnet.SsaForecastingBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.ColumnSelectingTransformer = class { + + constructor(context) { + const reader = context.reader; + if (context.check('DRPCOLST', 0x00010002, 0x00010002)) { + throw new mlnet.Error("'LoadDropColumnsTransform' not supported."); + } else if (context.check('CHSCOLSF', 0x00010001, 0x00010001)) { + reader.int32(); // cbFloat + this.KeepHidden = this._getHiddenOption(reader.byte()); + const count = reader.int32(); + this.inputs = []; + for (let colIdx = 0; colIdx < count; colIdx++) { + const dst = context.string(); + this.inputs.push(dst); + context.string(); // src + this._getHiddenOption(reader.byte()); // colKeepHidden + } + } else { + const keepColumns = reader.boolean(); + this.KeepHidden = reader.boolean(); + this.IgnoreMissing = reader.boolean(); + const length = reader.int32(); + this.inputs = []; + for (let i = 0; i < length; i++) { + this.inputs.push({ name: context.string() }); + } + if (keepColumns) { + this.ColumnsToKeep = this.inputs; + } else { + this.ColumnsToDrop = this.inputs; + } + } + } + + _getHiddenOption(value) { + switch (value) { + case 1: return true; + case 2: return false; + default: throw new mlnet.Error('Unsupported hide option specified'); + } + } +}; + +mlnet.XGBoostMulticlass = class {}; + +mlnet.NltTokenizeTransform = class {}; + +mlnet.DropColumnsTransform = class {}; + +mlnet.StopWordsTransform = class {}; + +mlnet.CSharpTransform = class {}; + +mlnet.GenericScoreTransform = class {}; + +mlnet.NormalizeTransform = class {}; + +mlnet.CdfColumnFunction = class { + + constructor(/* context, typeSrc */) { + // TODO + } +}; + +mlnet.MultiClassNetPredictor = class {}; + +mlnet.ProtonNNMCPred = class {}; + +mlnet.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading ML.NET model.'; + } +}; + +export const ModelFactory = mlnet.ModelFactory; diff --git a/mnn-metadata.json b/mnn-metadata.json new file mode 100644 index 00000000000..1d49282d85a --- /dev/null +++ b/mnn-metadata.json @@ -0,0 +1,624 @@ +[ + { + "name": "AbsVal", + "operator": 0 + }, + { + "name": "ArgMax", + "operator": 2 + }, + { + "name": "AsString", + "operator": 3, + "category": "Transform" + }, + { + "name": "BatchMatMul", + "operator": 106 + }, + { + "name": "BatchNorm", + "operator": 4, + "category": "Normalization" + }, + { + "name": "BatchToSpaceND", + "operator": 5, + "category": "Shape" + }, + { + "name": "Bias", + "operator": 6, + "category": "Layer" + }, + { + "name": "BinaryOp", + "operator": 7, + "attributes": [ + { "name": "activationType", "type": "FusedActivation" }, + { "name": "opType", "type": "BinaryOpOperation" }, + { "name": "T", "type": "DataType" } + ] + }, + { + "name": "Bnll", + "operator": 8 + }, + { + "name": "Broastcast", + "operator": 259, + "category": "Layer" + }, + { + "name": "Cast", + "operator": 9 + }, + { + "name": "Concat", + "operator": 10, + "category": "Tensor" + }, + { + "name": "Const", + "operator": 11, + "category": "Constant" + }, + { + "name": "Conv2DBackPropFilter", + "operator": 265, + "category": "Layer" + }, + { + "name": "ConvertTensor", + "operator": 129, + "attributes": [ + { "name": "source", "type": "MNN_DATA_FORMAT" }, + { "name": "dest", "type": "MNN_DATA_FORMAT" } + ] + }, + { + "name": "ConvInt8", + "operator": 513, + "category": "Layer" + }, + { + "name": "Convolution", + "operator": 12, + "category": "Layer", + "attributes": [ + { "name": "padMode", "type": "PadMode" } + ] + }, + { + "name": "ConvolutionDepthwise", + "operator": 13, + "category": "Layer", + "attributes": [ + { "name": "padMode", "type": "PadMode" } + ] + }, + { + "name": "Crop", + "operator": 14, + "category": "Data" + }, + { + "name": "CropAndResize", + "operator": 15, + "category": "Shape" + }, + { + "name": "Cubic", + "operator": 16, + "category": "Layer" + }, + { + "name": "Deconvolution", + "operator": 17, + "category": "Layer" + }, + { + "name": "DeconvolutionDepthwise", + "operator": 18, + "category": "Layer" + }, + { + "name": "DepthwiseConvInt8", + "operator": 515, + "category": "Layer" + }, + { + "name": "Dequantize", + "operator": 19 + }, + { + "name": "DetectionOutput", + "operator": 20 + }, + { + "name": "Dropout", + "operator": 21, + "category": "Dropout" + }, + { + "name": "Eltwise", + "operator": 22 + }, + { + "name": "ELU", + "operator": 23, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32" } + ] + }, + { + "name": "Embed", + "operator": 24, + "category": "Transform" + }, + { + "name": "Exp", + "operator": 25 + }, + { + "name": "ExpandDims", + "operator": 26 + }, + { + "name": "Fill", + "operator": 27, + "category": "Data" + }, + { + "name": "Flatten", + "operator": 28, + "category": "Shape" + }, + { + "name": "FloatToInt8", + "operator": 517 + }, + { + "name": "FloorMod", + "operator": 29, + "category": "Layer" + }, + { + "name": "Gather", + "operator": 30, + "category": "Transform" + }, + { + "name": "GatherV2", + "operator": 31, + "category": "Data" + }, + { + "name": "Im2Seq", + "operator": 32, + "category": "Transform" + }, + { + "name": "InnerProduct", + "operator": 33, + "category": "Layer" + }, + { + "name": "Input", + "operator": 34 + }, + { + "name": "Int8ToFloat", + "operator": 514 + }, + { + "name": "Interp", + "operator": 35 + }, + { + "name": "Log", + "operator": 36, + "category": "Layer" + }, + { + "name": "LRN", + "operator": 37, + "category": "Normalization" + }, + { + "name": "LSTM", + "operator": 38, + "category": "Layer" + }, + { + "name": "MatMul", + "operator": 39, + "attributes": [ + { "name": "T", "type": "DataType" } + ] + }, + { + "name": "MaxLayerCount", + "operator": 128 + }, + { + "name": "Moments", + "operator": 104, + "category": "Layer" + }, + { + "name": "MVN", + "operator": 40 + }, + { + "name": "NonMaxSuppression", + "operator": 41, + "category": "Layer" + }, + { + "name": "NonMaxSuppressionV2", + "operator": 42, + "category": "Layer" + }, + { + "name": "Normalize", + "operator": 43, + "category": "Normalization" + }, + { + "name": "Pack", + "operator": 44 + }, + { + "name": "Padding", + "operator": 45, + "category": "Tensor" + }, + { + "name": "Permute", + "operator": 46, + "category": "Shape" + }, + { + "name": "PLUGIN", + "operator": 256, + "category": "Layer" + }, + { + "name": "PoolGrad", + "operator": 263, + "category": "Pool" + }, + { + "name": "Pooling", + "operator": 47, + "category": "Pool", + "attributes": [ + { "name": "isGlobal", "type": "boolean", "default": false }, + { "name": "type", "type": "PoolType" }, + { "name": "padType", "type": "PoolPadType" }, + { "name": "dataType", "type": "DataType" }, + { "name": "ceilModel", "type": "boolean", "default": true } + ] + }, + { + "name": "PoolInt8", + "operator": 516, + "category": "Layer" + }, + { + "name": "Power", + "operator": 48 + }, + { + "name": "PReLU", + "operator": 49, + "category": "Activation" + }, + { + "name": "PriorBox", + "operator": 50 + }, + { + "name": "Proposal", + "operator": 51 + }, + { + "name": "QuantizedAdd", + "operator": 1, + "attributes": [ + { "name": "activationType", "type": "FusedActivation" } + ] + }, + { + "name": "QuantizedAvgPool", + "operator": 52, + "category": "Pool" + }, + { + "name": "QuantizedBiasAdd", + "operator": 53 + }, + { + "name": "QuantizedConcat", + "operator": 54, + "category": "Tensor" + }, + { + "name": "QuantizedDepthwiseConv2D", + "operator": 55, + "category": "Layer" + }, + { + "name": "QuantizedLogistic", + "operator": 56, + "category": "Activation" + }, + { + "name": "QuantizedMatMul", + "operator": 57 + }, + { + "name": "QuantizedMaxPool", + "operator": 58, + "category": "Pool" + }, + { + "name": "QuantizedRelu", + "operator": 59, + "category": "Activation" + }, + { + "name": "QuantizedRelu6", + "operator": 60, + "category": "Activation" + }, + { + "name": "QuantizedReshape", + "operator": 61, + "category": "Shape" + }, + { + "name": "QuantizedSoftmax", + "operator": 62, + "category": "Activation" + }, + { + "name": "QuantizeMaxMin", + "operator": 63 + }, + { + "name": "QuantizeV2", + "operator": 64 + }, + { + "name": "Range", + "operator": 65 + }, + { + "name": "Rank", + "operator": 66 + }, + { + "name": "ReduceJoin", + "operator": 67 + }, + { + "name": "Reduction", + "operator": 68 + }, + { + "name": "ReLU", + "operator": 69, + "category": "Activation" + }, + { + "name": "ReLU6", + "operator": 70, + "category": "Activation" + }, + { + "name": "Relu6Grad", + "operator": 262, + "category": "Activation" + }, + { + "name": "ReluGrad", + "operator": 261, + "category": "Activation" + }, + { + "name": "RequantizationRange", + "operator": 71 + }, + { + "name": "Requantize", + "operator": 72 + }, + { + "name": "Reshape", + "operator": 73, + "category": "Shape", + "attributes": [ + { "name": "dims", "type": "int32[]" }, + { "name": "dimType", "type": "MNN_DATA_FORMAT" } + ] + }, + { + "name": "Resize", + "operator": 74, + "category": "Shape" + }, + { + "name": "RNN", + "operator": 75, + "category": "Layer" + }, + { + "name": "RNNSequenceGRU", + "operator": 105, + "category": "Layer" + }, + { + "name": "ROIPooling", + "operator": 76, + "category": "Pool" + }, + { + "name": "Scale", + "operator": 77, + "category": "Layer" + }, + { + "name": "Select", + "operator": 257, + "category": "Layer" + }, + { + "name": "Selu", + "operator": 78, + "category": "Activation" + }, + { + "name": "Seq2Out", + "operator": 79, + "category": "Transform" + }, + { + "name": "SetDiff1D", + "operator": 260, + "category": "Layer" + }, + { + "name": "Shape", + "operator": 80, + "category": "Shape" + }, + { + "name": "Sigmoid", + "operator": 81, + "category": "Activation" + }, + { + "name": "Size", + "operator": 82 + }, + { + "name": "Slice", + "operator": 83, + "category": "Tensor" + }, + { + "name": "SliceTf", + "operator": 84 + }, + { + "name": "Softmax", + "operator": 85, + "category": "Activation" + }, + { + "name": "SoftmaxGrad", + "operator": 264, + "category": "Activation" + }, + { + "name": "SpaceToBatchND", + "operator": 86, + "category": "Shape" + }, + { + "name": "SpatialProduct", + "operator": 87, + "category": "Layer" + }, + { + "name": "Split", + "operator": 88 + }, + { + "name": "SPP", + "operator": 89, + "category": "Layer" + }, + { + "name": "Squeeze", + "operator": 90, + "category": "Transform" + }, + { + "name": "StridedSlice", + "operator": 91, + "category": "Tensor", + "attributes": [ + { "name": "Index", "type": "DataType" }, + { "name": "T", "type": "DataType" } + ] + }, + { + "name": "StringJoin", + "operator": 92, + "category": "Transform" + }, + { + "name": "StringSplit", + "operator": 93, + "category": "Transform" + }, + { + "name": "StringToNumber", + "operator": 94, + "category": "Transform" + }, + { + "name": "TanH", + "operator": 95, + "category": "Activation" + }, + { + "name": "TfQuantizedConv2D", + "operator": 96, + "category": "Layer" + }, + { + "name": "Threshold", + "operator": 97, + "category": "Activation" + }, + { + "name": "Tile", + "operator": 98 + }, + { + "name": "TopKV2", + "operator": 99, + "category": "Layer" + }, + { + "name": "Transpose", + "operator": 100, + "category": "Transform" + }, + { + "name": "UnaryOp", + "operator": 101 + }, + { + "name": "Unpack", + "operator": 102 + }, + { + "name": "Unsqueeze", + "operator": 107 + }, + { + "name": "Where", + "operator": 103 + }, + { + "name": "ZerosLike", + "operator": 258, + "category": "Layer" + } +] \ No newline at end of file diff --git a/mnn-schema.js b/mnn-schema.js new file mode 100644 index 00000000000..79d3bcb2249 --- /dev/null +++ b/mnn-schema.js @@ -0,0 +1,1972 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('mnn'); + +$root.MNN = $root.MNN || {}; + +$root.MNN.NetSource = { + CAFFE: 0, + TENSORFLOW: 1, + TFLITE: 2, + ONNX: 3, + TORCH: 4 +}; + +$root.MNN.DataType = { + DT_INVALID: 0, + DT_FLOAT: 1, + DT_DOUBLE: 2, + DT_INT32: 3, + DT_UINT8: 4, + DT_INT16: 5, + DT_INT8: 6, + DT_STRING: 7, + DT_COMPLEX64: 8, + DT_INT64: 9, + DT_BOOL: 10, + DT_QINT8: 11, + DT_QUINT8: 12, + DT_QINT32: 13, + DT_BFLOAT16: 14, + DT_QINT16: 15, + DT_QUINT16: 16, + DT_UINT16: 17, + DT_COMPLEX128: 18, + DT_HALF: 19, + DT_RESOURCE: 20, + DT_VARIANT: 21 +}; + +$root.MNN.MNN_DATA_FORMAT = { + NCHW: 0, + NHWC: 1, + NC4HW4: 2, + NHWC4: 3, + UNKNOWN: 4 +}; + +$root.MNN.Blob = class Blob { + + static decode(reader, position) { + const $ = new $root.MNN.Blob(); + $.dims = reader.typedArray(position, 4, Int32Array); + $.dataFormat = reader.int8_(position, 6, 0); + $.dataType = reader.int32_(position, 8, 1); + $.uint8s = reader.typedArray(position, 10, Uint8Array); + $.int8s = reader.typedArray(position, 12, Int8Array); + $.int32s = reader.typedArray(position, 14, Int32Array); + $.int64s = reader.int64s_(position, 16); + $.float32s = reader.typedArray(position, 18, Float32Array); + $.strings = reader.strings_(position, 20); + $.external = reader.int64s_(position, 22); + return $; + } +}; + +$root.MNN.ListValue = class ListValue { + + static decode(reader, position) { + const $ = new $root.MNN.ListValue(); + $.s = reader.strings_(position, 4); + $.i = reader.typedArray(position, 6, Int32Array); + $.f = reader.typedArray(position, 8, Float32Array); + $.b = reader.bools_(position, 10); + $.type = reader.typedArray(position, 12, Int32Array); + return $; + } +}; + +$root.MNN.Attribute = class Attribute { + + static decode(reader, position) { + const $ = new $root.MNN.Attribute(); + $.s = reader.string_(position, 4, null); + $.i = reader.int32_(position, 6, 0); + $.b = reader.bool_(position, 8, false); + $.key = reader.string_(position, 10, null); + $.type = reader.int32_(position, 12, 0); + $.f = reader.float32_(position, 14, 0); + $.tensor = reader.table(position, 16, $root.MNN.Blob.decode); + $.list = reader.table(position, 18, $root.MNN.ListValue.decode); + $.func = reader.table(position, 20, $root.MNN.NamedAttrList.decode); + return $; + } +}; + +$root.MNN.NamedAttrList = class NamedAttrList { + + static decode(reader, position) { + const $ = new $root.MNN.NamedAttrList(); + $.name = reader.string_(position, 4, null); + $.attr = reader.tableArray(position, 6, $root.MNN.Attribute.decode); + return $; + } +}; + +$root.MNN.PadMode = { + CAFFE: 0, + VALID: 1, + SAME: 2 +}; + +$root.MNN.Convolution2DCommon = class Convolution2DCommon { + + static decode(reader, position) { + const $ = new $root.MNN.Convolution2DCommon(); + $.padX = reader.int32_(position, 4, 0); + $.padY = reader.int32_(position, 6, 0); + $.kernelX = reader.int32_(position, 8, 1); + $.kernelY = reader.int32_(position, 10, 1); + $.strideX = reader.int32_(position, 12, 1); + $.strideY = reader.int32_(position, 14, 1); + $.dilateX = reader.int32_(position, 16, 1); + $.dilateY = reader.int32_(position, 18, 1); + $.padMode = reader.int8_(position, 20, 0); + $.group = reader.int32_(position, 22, 1); + $.outputCount = reader.int32_(position, 24, 0); + $.inputCount = reader.int32_(position, 26, 0); + $.relu = reader.bool_(position, 28, false); + $.relu6 = reader.bool_(position, 30, false); + $.pads = reader.typedArray(position, 32, Int32Array); + $.outPads = reader.typedArray(position, 34, Int32Array); + $.hasOutputShape = reader.bool_(position, 36, false); + return $; + } +}; + +$root.MNN.Convolution3DCommon = class Convolution3DCommon { + + static decode(reader, position) { + const $ = new $root.MNN.Convolution3DCommon(); + $.dilates = reader.typedArray(position, 4, Int32Array); + $.strides = reader.typedArray(position, 6, Int32Array); + $.kernels = reader.typedArray(position, 8, Int32Array); + $.pads = reader.typedArray(position, 10, Int32Array); + $.padMode = reader.int8_(position, 12, 0); + $.inputCount = reader.int32_(position, 14, 0); + $.outputCount = reader.int32_(position, 16, 0); + $.relu = reader.bool_(position, 18, false); + $.relu6 = reader.bool_(position, 20, false); + $.group = reader.int32_(position, 22, 1); + $.outPads = reader.typedArray(position, 24, Int32Array); + $.hasOutputShape = reader.bool_(position, 26, false); + return $; + } +}; + +$root.MNN.SparseAlgo = { + RANDOM: 0, + SIMD_OC: 1 +}; + +$root.MNN.SparseCommon = class SparseCommon { + + static decode(reader, position) { + const $ = new $root.MNN.SparseCommon(); + $.method = reader.int8_(position, 4, 0); + $.args = reader.tableArray(position, 6, $root.MNN.Attribute.decode); + return $; + } +}; + +$root.MNN.IDSTQuan = class IDSTQuan { + + static decode(reader, position) { + const $ = new $root.MNN.IDSTQuan(); + $.buffer = reader.typedArray(position, 4, Int8Array); + $.alpha = reader.typedArray(position, 6, Float32Array); + $.type = reader.int32_(position, 8, 0); + $.useInt32 = reader.bool_(position, 10, false); + $.quantScale = reader.float32_(position, 12, 0); + $.scaleIn = reader.float32_(position, 14, 0); + $.scaleOut = reader.float32_(position, 16, 0); + $.aMax = reader.int32_(position, 18, 0); + $.aMin = reader.int32_(position, 20, 0); + $.readType = reader.int32_(position, 22, 0); + $.has_scaleInt = reader.bool_(position, 24, false); + $.shapeInt32 = reader.bool_(position, 26, false); + $.weightSize = reader.uint32_(position, 28, 0); + $.index = reader.typedArray(position, 30, Uint32Array); + return $; + } +}; + +$root.MNN.QuantizeAlgo = { + DEFAULT: 0, + OVERFLOW_AWARE: 1, + WINOGRAD_AWARE: 2 +}; + +$root.MNN.QuantizedFloatParam = class QuantizedFloatParam { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedFloatParam(); + $.weight = reader.typedArray(position, 4, Int8Array); + $.bias = reader.typedArray(position, 6, Int32Array); + $.scale = reader.typedArray(position, 8, Float32Array); + $.tensorScale = reader.typedArray(position, 10, Float32Array); + $.method = reader.int8_(position, 12, 0); + $.nbits = reader.int32_(position, 14, 8); + $.zeroPoint = reader.int8_(position, 16, 0); + $.outputZeroPoint = reader.int8_(position, 18, 0); + $.clampMin = reader.int8_(position, 20, -128); + $.clampMax = reader.int8_(position, 22, 127); + $.winogradAttr = reader.typedArray(position, 24, Int32Array); + $.outputDataType = reader.int32_(position, 26, 6); + return $; + } +}; + +$root.MNN.Convolution2D = class Convolution2D { + + static decode(reader, position) { + const $ = new $root.MNN.Convolution2D(); + $.common = reader.table(position, 4, $root.MNN.Convolution2DCommon.decode); + $.weight = reader.typedArray(position, 6, Float32Array); + $.bias = reader.typedArray(position, 8, Float32Array); + $.quanParameter = reader.table(position, 10, $root.MNN.IDSTQuan.decode); + $.symmetricQuan = reader.table(position, 12, $root.MNN.QuantizedFloatParam.decode); + $.sparseParameter = reader.table(position, 14, $root.MNN.SparseCommon.decode); + $.external = reader.int64s_(position, 16); + return $; + } +}; + +$root.MNN.Convolution3D = class Convolution3D { + + static decode(reader, position) { + const $ = new $root.MNN.Convolution3D(); + $.common = reader.table(position, 4, $root.MNN.Convolution3DCommon.decode); + $.weight = reader.typedArray(position, 6, Float32Array); + $.bias = reader.typedArray(position, 8, Float32Array); + $.external = reader.int64s_(position, 10); + return $; + } +}; + +$root.MNN.InnerProduct = class InnerProduct { + + static decode(reader, position) { + const $ = new $root.MNN.InnerProduct(); + $.outputCount = reader.int32_(position, 4, 0); + $.biasTerm = reader.int32_(position, 6, 0); + $.weightSize = reader.int32_(position, 8, 0); + $.weight = reader.typedArray(position, 10, Float32Array); + $.bias = reader.typedArray(position, 12, Float32Array); + $.axis = reader.int32_(position, 14, 0); + $.transpose = reader.bool_(position, 16, false); + $.quanParameter = reader.table(position, 18, $root.MNN.IDSTQuan.decode); + return $; + } +}; + +$root.MNN.PoolType = { + MAXPOOL: 0, + AVEPOOL: 1 +}; + +$root.MNN.PoolPadType = { + CAFFE: 0, + VALID: 1, + SAME: 2 +}; + +$root.MNN.AvgPoolCountType = { + DEFAULT: 0, + INCLUDE_PADDING: 1, + EXCLUDE_PADDING: 2 +}; + +$root.MNN.Pool = class Pool { + + static decode(reader, position) { + const $ = new $root.MNN.Pool(); + $.padX = reader.int32_(position, 4, 0); + $.padY = reader.int32_(position, 6, 0); + $.isGlobal = reader.bool_(position, 8, false); + $.kernelX = reader.int32_(position, 10, 0); + $.kernelY = reader.int32_(position, 12, 0); + $.strideX = reader.int32_(position, 14, 0); + $.strideY = reader.int32_(position, 16, 0); + $.type = reader.int8_(position, 18, 0); + $.padType = reader.int8_(position, 20, 0); + $.dataType = reader.int32_(position, 22, 1); + $.ceilModel = reader.bool_(position, 24, true); + $.pads = reader.typedArray(position, 26, Int32Array); + $.countType = reader.int8_(position, 28, 0); + return $; + } +}; + +$root.MNN.Pool3D = class Pool3D { + + static decode(reader, position) { + const $ = new $root.MNN.Pool3D(); + $.strides = reader.typedArray(position, 4, Int32Array); + $.kernels = reader.typedArray(position, 6, Int32Array); + $.pads = reader.typedArray(position, 8, Int32Array); + $.type = reader.int8_(position, 10, 0); + $.padType = reader.int8_(position, 12, 0); + $.isGlobal = reader.bool_(position, 14, false); + return $; + } +}; + +$root.MNN.Relu = class Relu { + + static decode(reader, position) { + const $ = new $root.MNN.Relu(); + $.slope = reader.float32_(position, 4, 0); + return $; + } +}; + +$root.MNN.Relu6 = class Relu6 { + + static decode(reader, position) { + const $ = new $root.MNN.Relu6(); + $.minValue = reader.float32_(position, 4, 0); + $.maxValue = reader.float32_(position, 6, 6); + return $; + } +}; + +$root.MNN.PRelu = class PRelu { + + static decode(reader, position) { + const $ = new $root.MNN.PRelu(); + $.slopeCount = reader.int32_(position, 4, 0); + $.slope = reader.typedArray(position, 6, Float32Array); + return $; + } +}; + +$root.MNN.ELU = class ELU { + + static decode(reader, position) { + const $ = new $root.MNN.ELU(); + $.alpha = reader.float32_(position, 4, 0); + return $; + } +}; + +$root.MNN.LRN = class LRN { + + static decode(reader, position) { + const $ = new $root.MNN.LRN(); + $.regionType = reader.int32_(position, 4, 0); + $.localSize = reader.int32_(position, 6, 0); + $.alpha = reader.float32_(position, 8, 0); + $.beta = reader.float32_(position, 10, 0); + $.bias = reader.float32_(position, 12, 1); + return $; + } +}; + +$root.MNN.ArgMax = class ArgMax { + + static decode(reader, position) { + const $ = new $root.MNN.ArgMax(); + $.outMaxVal = reader.int32_(position, 4, 0); + $.topK = reader.int32_(position, 6, 0); + $.axis = reader.int32_(position, 8, 0); + $.softmaxThreshold = reader.int32_(position, 10, 0); + return $; + } +}; + +$root.MNN.Axis = class Axis { + + static decode(reader, position) { + const $ = new $root.MNN.Axis(); + $.axis = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.Input = class Input { + + static decode(reader, position) { + const $ = new $root.MNN.Input(); + $.dims = reader.typedArray(position, 4, Int32Array); + $.dtype = reader.int32_(position, 6, 1); + $.dformat = reader.int8_(position, 8, 2); + return $; + } +}; + +$root.MNN.LSTM = class LSTM { + + static decode(reader, position) { + const $ = new $root.MNN.LSTM(); + $.outputCount = reader.int32_(position, 4, 0); + $.weightSize = reader.int32_(position, 6, 0); + $.clippingThreshold = reader.float32_(position, 8, 0); + $.weightI = reader.table(position, 10, $root.MNN.Blob.decode); + $.weightH = reader.table(position, 12, $root.MNN.Blob.decode); + $.bias = reader.table(position, 14, $root.MNN.Blob.decode); + $.weightIQ = reader.table(position, 16, $root.MNN.Blob.decode); + $.weightIA = reader.table(position, 18, $root.MNN.Blob.decode); + $.quantScale = reader.float32_(position, 20, 0); + return $; + } +}; + +$root.MNN.Slice = class Slice { + + static decode(reader, position) { + const $ = new $root.MNN.Slice(); + $.axis = reader.int32_(position, 4, 0); + $.slicePoints = reader.typedArray(position, 6, Int32Array); + $.sourceType = reader.int8_(position, 8, 0); + return $; + } +}; + +$root.MNN.BatchNorm = class BatchNorm { + + static decode(reader, position) { + const $ = new $root.MNN.BatchNorm(); + $.channels = reader.int32_(position, 4, 0); + $.slopeData = reader.typedArray(position, 6, Float32Array); + $.meanData = reader.typedArray(position, 8, Float32Array); + $.varData = reader.typedArray(position, 10, Float32Array); + $.biasData = reader.typedArray(position, 12, Float32Array); + $.Adata = reader.typedArray(position, 14, Float32Array); + $.Bdata = reader.typedArray(position, 16, Float32Array); + $.epsilon = reader.float32_(position, 18, 0.001); + return $; + } +}; + +$root.MNN.Scale = class Scale { + + static decode(reader, position) { + const $ = new $root.MNN.Scale(); + $.channels = reader.int32_(position, 4, 0); + $.scaleData = reader.typedArray(position, 6, Float32Array); + $.biasData = reader.typedArray(position, 8, Float32Array); + $.external = reader.int64s_(position, 10); + return $; + } +}; + +$root.MNN.EltwiseType = { + PROD: 0, + SUM: 1, + MAXIMUM: 2, + SUB: 3 +}; + +$root.MNN.Eltwise = class Eltwise { + + static decode(reader, position) { + const $ = new $root.MNN.Eltwise(); + $.type = reader.int8_(position, 4, 0); + $.coeff = reader.typedArray(position, 6, Float32Array); + return $; + } +}; + +$root.MNN.Flatten = class Flatten { + + static decode(reader, position) { + const $ = new $root.MNN.Flatten(); + $.axis = reader.int32_(position, 4, 0); + $.endAxis = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.MNN.Permute = class Permute { + + static decode(reader, position) { + const $ = new $root.MNN.Permute(); + $.dims = reader.typedArray(position, 4, Int32Array); + return $; + } +}; + +$root.MNN.Reshape = class Reshape { + + static decode(reader, position) { + const $ = new $root.MNN.Reshape(); + $.dims = reader.typedArray(position, 4, Int32Array); + $.dimType = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.MNN.DetectionOutput = class DetectionOutput { + + static decode(reader, position) { + const $ = new $root.MNN.DetectionOutput(); + $.classCount = reader.int32_(position, 4, 0); + $.nmsThresholdold = reader.float32_(position, 6, 0); + $.nmsTopK = reader.int32_(position, 8, 0); + $.keepTopK = reader.int32_(position, 10, 0); + $.confidenceThreshold = reader.float32_(position, 12, 0); + $.shareLocation = reader.int32_(position, 14, 0); + $.backgroundLable = reader.int32_(position, 16, 0); + $.varianceEncodedTarget = reader.int32_(position, 18, 0); + $.codeType = reader.int32_(position, 20, 0); + $.objectnessScore = reader.float32_(position, 22, 0.01); + return $; + } +}; + +$root.MNN.RoiParameters = class RoiParameters { + + static decode(reader, position) { + const $ = new $root.MNN.RoiParameters(); + $.pooledWidth = reader.int32_(position, 4, 0); + $.pooledHeight = reader.int32_(position, 6, 0); + $.spatialScale = reader.float32_(position, 8, 0); + $.samplingRatio = reader.int32_(position, 10, -1); + $.aligned = reader.bool_(position, 12, false); + $.poolType = reader.int8_(position, 14, 1); + $.outputGrad = reader.bool_(position, 16, false); + return $; + } +}; + +$root.MNN.Proposal = class Proposal { + + static decode(reader, position) { + const $ = new $root.MNN.Proposal(); + $.featStride = reader.int32_(position, 4, 0); + $.baseSize = reader.int32_(position, 6, 0); + $.preNmsTopN = reader.int32_(position, 8, 0); + $.afterNmsTopN = reader.int32_(position, 10, 0); + $.nmsThreshold = reader.float32_(position, 12, 0); + $.minSize = reader.int32_(position, 14, 0); + $.ratios = reader.table(position, 16, $root.MNN.Blob.decode); + $.scales = reader.table(position, 18, $root.MNN.Blob.decode); + $.anchors = reader.table(position, 20, $root.MNN.Blob.decode); + return $; + } +}; + +$root.MNN.CoordinateTransformationMode = { + NotSet: 0, + AlignCorners: 1, + HalfPixels: 2, + PytorchHalfPixels: 3, + Asymmetric: 4, + TensorflowHalfPixels: 5, + TensorflowCropAndResize: 6 +}; + +$root.MNN.Interp = class Interp { + + static decode(reader, position) { + const $ = new $root.MNN.Interp(); + $.widthScale = reader.float32_(position, 4, 0); + $.heightScale = reader.float32_(position, 6, 0); + $.outputWidth = reader.int32_(position, 8, 0); + $.outputHeight = reader.int32_(position, 10, 0); + $.resizeType = reader.int32_(position, 12, 0); + $.alignCorners = reader.bool_(position, 14, false); + $.halfPixelCenters = reader.bool_(position, 16, false); + $.widthOffset = reader.float32_(position, 18, 0); + $.heightOffset = reader.float32_(position, 20, 0); + $.cubicCoeffA = reader.float32_(position, 22, -0.75); + $.ctm = reader.int8_(position, 24, 0); + $.depthScale = reader.float32_(position, 26, 0); + $.outputDepth = reader.int32_(position, 28, 0); + $.depthOffset = reader.float32_(position, 30, 0); + return $; + } +}; + +$root.MNN.Resize = class Resize { + + static decode(reader, position) { + const $ = new $root.MNN.Resize(); + $.xScale = reader.float32_(position, 4, 0); + $.yScale = reader.float32_(position, 6, 0); + return $; + } +}; + +$root.MNN.PriorBox = class PriorBox { + + static decode(reader, position) { + const $ = new $root.MNN.PriorBox(); + $.minSizes = reader.typedArray(position, 4, Float32Array); + $.maxSizes = reader.typedArray(position, 6, Float32Array); + $.aspectRatios = reader.typedArray(position, 8, Float32Array); + $.variances = reader.typedArray(position, 10, Float32Array); + $.flip = reader.bool_(position, 12, false); + $.clip = reader.bool_(position, 14, false); + $.imageWidth = reader.int32_(position, 16, 0); + $.imageHeight = reader.int32_(position, 18, 0); + $.stepWidth = reader.int32_(position, 20, 0); + $.stepHeight = reader.int32_(position, 22, 0); + $.offset = reader.float32_(position, 24, 0); + return $; + } +}; + +$root.MNN.Normalize = class Normalize { + + static decode(reader, position) { + const $ = new $root.MNN.Normalize(); + $.acrossSpatial = reader.int32_(position, 4, 0); + $.channelShared = reader.int32_(position, 6, 0); + $.eps = reader.float32_(position, 8, 0); + $.scale = reader.typedArray(position, 10, Float32Array); + return $; + } +}; + +$root.MNN.EltwiseInt8 = class EltwiseInt8 { + + static decode(reader, position) { + const $ = new $root.MNN.EltwiseInt8(); + $.type = reader.int8_(position, 4, 0); + $.inputQuan0 = reader.table(position, 6, $root.MNN.QuantizedFloatParam.decode); + $.inputQuan1 = reader.table(position, 8, $root.MNN.QuantizedFloatParam.decode); + $.outputQuan = reader.table(position, 10, $root.MNN.QuantizedFloatParam.decode); + return $; + } +}; + +$root.MNN.CumSum = class CumSum { + + static decode(reader, position) { + const $ = new $root.MNN.CumSum(); + $.exclusive = reader.bool_(position, 4, false); + $.reverse = reader.bool_(position, 6, false); + return $; + } +}; + +$root.MNN.BinaryOpOperation = { + ADD: 0, + SUB: 1, + MUL: 2, + DIV: 3, + MAX_TEMP: 4, + MIN_TEMP: 5, + POW: 6, + REALDIV: 7, + MINIMUM: 8, + MAXIMUM: 9, + GREATER: 10, + GREATER_EQUAL: 11, + LESS: 12, + FLOORDIV: 13, + SquaredDifference: 14, + EQUAL: 15, + LESS_EQUAL: 16, + FLOORMOD: 17, + MOD: 19, + ATAN2: 20, + LOGICALOR: 21, + NOTEQUAL: 22, + BITWISE_AND: 23, + BITWISE_OR: 24, + BITWISE_XOR: 25, + LOGICALXOR: 26, + LEFTSHIFT: 27, + RIGHTSHIFT: 28 +}; + +$root.MNN.BinaryOp = class BinaryOp { + + static decode(reader, position) { + const $ = new $root.MNN.BinaryOp(); + $.opType = reader.int32_(position, 4, 0); + $.T = reader.int32_(position, 6, 1); + $.activationType = reader.int32_(position, 8, 0); + return $; + } +}; + +$root.MNN.PackParam = class PackParam { + + static decode(reader, position) { + const $ = new $root.MNN.PackParam(); + $.dataType = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.MNN.StridedSliceParam = class StridedSliceParam { + + static decode(reader, position) { + const $ = new $root.MNN.StridedSliceParam(); + $.Index = reader.int32_(position, 4, 0); + $.T = reader.int32_(position, 6, 0); + $.beginMask = reader.int32_(position, 8, 0); + $.endMask = reader.int32_(position, 10, 0); + $.ellipsisMask = reader.int32_(position, 12, 0); + $.newAxisMask = reader.int32_(position, 14, 0); + $.shrinkAxisMask = reader.int32_(position, 16, 0); + $.fromType = reader.int32_(position, 18, 0); + return $; + } +}; + +$root.MNN.SqueezeParam = class SqueezeParam { + + static decode(reader, position) { + const $ = new $root.MNN.SqueezeParam(); + $.squeezeDims = reader.typedArray(position, 4, Int32Array); + return $; + } +}; + +$root.MNN.CastParam = class CastParam { + + static decode(reader, position) { + const $ = new $root.MNN.CastParam(); + $.srcT = reader.int32_(position, 4, 0); + $.dstT = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.MNN.ReductionType = { + SUM: 0, + ASUM: 1, + SUMSQ: 2, + MEAN: 3, + MAXIMUM: 4, + MINIMUM: 5, + PROD: 6, + ANY: 7, + ALL: 8 +}; + +$root.MNN.ReductionParam = class ReductionParam { + + static decode(reader, position) { + const $ = new $root.MNN.ReductionParam(); + $.operation = reader.int8_(position, 4, 0); + $.dim = reader.typedArray(position, 6, Int32Array); + $.coeff = reader.float32_(position, 8, 0); + $.keepDims = reader.bool_(position, 10, false); + $.dType = reader.int32_(position, 12, 1); + return $; + } +}; + +$root.MNN.Gather = class Gather { + + static decode(reader, position) { + const $ = new $root.MNN.Gather(); + $.Tindices = reader.int32_(position, 4, 0); + $.Tparams = reader.int32_(position, 6, 0); + $.validateIndices = reader.bool_(position, 8, false); + $.axis = reader.int32_(position, 10, 0); + return $; + } +}; + +$root.MNN.ExpandDims = class ExpandDims { + + static decode(reader, position) { + const $ = new $root.MNN.ExpandDims(); + $.T = reader.int32_(position, 4, 0); + $.Tdim = reader.int32_(position, 6, 0); + $.axis = reader.int32_(position, 8, 0); + return $; + } +}; + +$root.MNN.Selu = class Selu { + + static decode(reader, position) { + const $ = new $root.MNN.Selu(); + $.scale = reader.float32_(position, 4, 0); + $.alpha = reader.float32_(position, 6, 0); + return $; + } +}; + +$root.MNN.AsString = class AsString { + + static decode(reader, position) { + const $ = new $root.MNN.AsString(); + $.T = reader.int32_(position, 4, 0); + $.precision = reader.int32_(position, 6, 0); + $.scientific = reader.bool_(position, 8, false); + $.shortest = reader.bool_(position, 10, false); + $.width = reader.int32_(position, 12, 0); + $.fillString = reader.string_(position, 14, null); + return $; + } +}; + +$root.MNN.ReduceJoin = class ReduceJoin { + + static decode(reader, position) { + const $ = new $root.MNN.ReduceJoin(); + $.keepDims = reader.bool_(position, 4, false); + $.separator = reader.string_(position, 6, null); + return $; + } +}; + +$root.MNN.UnaryOpOperation = { + ABS: 0, + NEG: 1, + FLOOR: 2, + CEIL: 3, + SQUARE: 4, + SQRT: 5, + RSQRT: 6, + EXP: 7, + LOG: 8, + SIN: 9, + COS: 10, + TAN: 11, + ASIN: 12, + ACOS: 13, + ATAN: 14, + RECIPROCAL: 15, + LOG1P: 16, + BNLL: 17, + ACOSH: 18, + SINH: 19, + ASINH: 20, + ATANH: 21, + SIGN: 22, + ROUND: 23, + COSH: 24, + ERF: 25, + ERFC: 26, + ERFINV: 27, + EXPM1: 28, + SIGMOID: 29, + TANH: 30, + HARDSWISH: 31, + GELU: 32, + GELU_STANDARD: 33 +}; + +$root.MNN.UnaryOp = class UnaryOp { + + static decode(reader, position) { + const $ = new $root.MNN.UnaryOp(); + $.opType = reader.int32_(position, 4, 0); + $.T = reader.int32_(position, 6, 0); + $.tableInt8 = reader.typedArray(position, 8, Int8Array); + return $; + } +}; + +$root.MNN.TopKV2 = class TopKV2 { + + static decode(reader, position) { + const $ = new $root.MNN.TopKV2(); + $.T = reader.int32_(position, 4, 1); + $.sorted = reader.bool_(position, 6, false); + $.largest = reader.bool_(position, 8, true); + return $; + } +}; + +$root.MNN.CropAndResizeMethod = { + BILINEAR: 0, + NEAREST: 1 +}; + +$root.MNN.CropAndResize = class CropAndResize { + + static decode(reader, position) { + const $ = new $root.MNN.CropAndResize(); + $.extrapolationValue = reader.float32_(position, 4, 0); + $.method = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.MNN.Fill = class Fill { + + static decode(/* reader, position */) { + const $ = new $root.MNN.Fill(); + return $; + } +}; + +$root.MNN.GatherV2 = class GatherV2 { + + static decode(reader, position) { + const $ = new $root.MNN.GatherV2(); + $.Taxis = reader.int32_(position, 4, 0); + $.Tindices = reader.int32_(position, 6, 0); + $.Tparams = reader.int32_(position, 8, 0); + return $; + } +}; + +$root.MNN.NonMaxSuppressionV2 = class NonMaxSuppressionV2 { + + static decode(/* reader, position */) { + const $ = new $root.MNN.NonMaxSuppressionV2(); + return $; + } +}; + +$root.MNN.Range = class Range { + + static decode(reader, position) { + const $ = new $root.MNN.Range(); + $.Tidx = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.Rank = class Rank { + + static decode(/* reader, position */) { + const $ = new $root.MNN.Rank(); + return $; + } +}; + +$root.MNN.Size = class Size { + + static decode(reader, position) { + const $ = new $root.MNN.Size(); + $.outputDataType = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.Transpose = class Transpose { + + static decode(reader, position) { + const $ = new $root.MNN.Transpose(); + $.Tperm = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.SliceTf = class SliceTf { + + static decode(reader, position) { + const $ = new $root.MNN.SliceTf(); + $.T = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.QuantizeMaxMin = class QuantizeMaxMin { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizeMaxMin(); + $.T = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.Crop = class Crop { + + static decode(reader, position) { + const $ = new $root.MNN.Crop(); + $.axis = reader.int32_(position, 4, 2); + $.offset = reader.typedArray(position, 6, Int32Array); + return $; + } +}; + +$root.MNN.SpaceBatch = class SpaceBatch { + + static decode(reader, position) { + const $ = new $root.MNN.SpaceBatch(); + $.blockShape = reader.table(position, 4, $root.MNN.Blob.decode); + $.padding = reader.table(position, 6, $root.MNN.Blob.decode); + return $; + } +}; + +$root.MNN.MatMul = class MatMul { + + static decode(reader, position) { + const $ = new $root.MNN.MatMul(); + $.T = reader.int32_(position, 4, 0); + $.transposeA = reader.bool_(position, 6, false); + $.transposeB = reader.bool_(position, 8, false); + $.weight = reader.typedArray(position, 10, Float32Array); + $.bias = reader.typedArray(position, 12, Float32Array); + return $; + } +}; + +$root.MNN.MomentsParam = class MomentsParam { + + static decode(reader, position) { + const $ = new $root.MNN.MomentsParam(); + $.dim = reader.typedArray(position, 4, Int32Array); + $.keepDims = reader.bool_(position, 6, true); + $.dType = reader.int32_(position, 8, 1); + return $; + } +}; + +$root.MNN.RNNParam = class RNNParam { + + static decode(reader, position) { + const $ = new $root.MNN.RNNParam(); + $.numUnits = reader.int32_(position, 4, 0); + $.isBidirectionalRNN = reader.bool_(position, 6, false); + $.linearBeforeReset = reader.bool_(position, 8, false); + $.keepAllOutputs = reader.bool_(position, 10, false); + $.fwGateWeight = reader.table(position, 12, $root.MNN.Blob.decode); + $.fwGateBias = reader.table(position, 14, $root.MNN.Blob.decode); + $.fwCandidateWeight = reader.table(position, 16, $root.MNN.Blob.decode); + $.fwCandidateBias = reader.table(position, 18, $root.MNN.Blob.decode); + $.fwRecurrentBias = reader.table(position, 20, $root.MNN.Blob.decode); + $.bwGateWeight = reader.table(position, 22, $root.MNN.Blob.decode); + $.bwGateBias = reader.table(position, 24, $root.MNN.Blob.decode); + $.bwCandidateWeight = reader.table(position, 26, $root.MNN.Blob.decode); + $.bwCandidateBias = reader.table(position, 28, $root.MNN.Blob.decode); + $.bwRecurrentBias = reader.table(position, 30, $root.MNN.Blob.decode); + return $; + } +}; + +$root.MNN.BatchMatMulParam = class BatchMatMulParam { + + static decode(reader, position) { + const $ = new $root.MNN.BatchMatMulParam(); + $.adjX = reader.bool_(position, 4, false); + $.adjY = reader.bool_(position, 6, false); + return $; + } +}; + +$root.MNN.DepthToSpaceMode = { + DCR: 0, + CRD: 1 +}; + +$root.MNN.DepthSpaceParam = class DepthSpaceParam { + + static decode(reader, position) { + const $ = new $root.MNN.DepthSpaceParam(); + $.blockSize = reader.int32_(position, 4, 0); + $.mode = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.MNN.ReverseSequenceParam = class ReverseSequenceParam { + + static decode(reader, position) { + const $ = new $root.MNN.ReverseSequenceParam(); + $.batchDim = reader.int32_(position, 4, 0); + $.seqDim = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.MNN.DetectionPostProcessParam = class DetectionPostProcessParam { + + static decode(reader, position) { + const $ = new $root.MNN.DetectionPostProcessParam(); + $.maxDetections = reader.int32_(position, 4, 0); + $.maxClassesPerDetection = reader.int32_(position, 6, 0); + $.detectionsPerClass = reader.int32_(position, 8, 0); + $.nmsScoreThreshold = reader.float32_(position, 10, 0); + $.iouThreshold = reader.float32_(position, 12, 0); + $.numClasses = reader.int32_(position, 14, 0); + $.useRegularNMS = reader.bool_(position, 16, false); + $.centerSizeEncoding = reader.typedArray(position, 18, Float32Array); + return $; + } +}; + +$root.MNN.OneHotParam = class OneHotParam { + + static decode(reader, position) { + const $ = new $root.MNN.OneHotParam(); + $.dType = reader.int32_(position, 4, 1); + $.axis = reader.int32_(position, 6, -1); + return $; + } +}; + +$root.MNN.PadValueMode = { + CONSTANT: 0, + REFLECT: 1, + SYMMETRIC: 2, + EDGE: 3 +}; + +$root.MNN.PadParam = class PadParam { + + static decode(reader, position) { + const $ = new $root.MNN.PadParam(); + $.mode = reader.int8_(position, 4, 0); + return $; + } +}; + +$root.MNN.LayerNorm = class LayerNorm { + + static decode(reader, position) { + const $ = new $root.MNN.LayerNorm(); + $.axis = reader.typedArray(position, 4, Int32Array); + $.epsilon = reader.float32_(position, 6, 0); + $.gamma = reader.typedArray(position, 8, Float32Array); + $.beta = reader.typedArray(position, 10, Float32Array); + $.group = reader.int32_(position, 12, 1); + $.external = reader.int64s_(position, 14); + return $; + } +}; + +$root.MNN.RandomUniform = class RandomUniform { + + static decode(reader, position) { + const $ = new $root.MNN.RandomUniform(); + $.seed = reader.int32_(position, 4, 0); + $.seed2 = reader.int32_(position, 6, 0); + $.type = reader.int32_(position, 8, 1); + $.low = reader.float32_(position, 10, 0); + $.high = reader.float32_(position, 12, 1); + return $; + } +}; + +$root.MNN.TensorArray = class TensorArray { + + static decode(reader, position) { + const $ = new $root.MNN.TensorArray(); + $.dynamic_size = reader.bool_(position, 4, false); + $.identical_element_shapes = reader.bool_(position, 6, false); + $.element_shape = reader.typedArray(position, 8, Int32Array); + $.T = reader.int32_(position, 10, 1); + $.axis = reader.int32_(position, 12, 0); + $.keepdims = reader.bool_(position, 14, true); + $.new_axis = reader.bool_(position, 16, false); + return $; + } +}; + +$root.MNN.LSTMBlockCell = class LSTMBlockCell { + + static decode(reader, position) { + const $ = new $root.MNN.LSTMBlockCell(); + $.cell_clip = reader.float32_(position, 4, 3); + $.forget_bias = reader.float32_(position, 6, 1); + $.use_peephole = reader.bool_(position, 8, false); + return $; + } +}; + +$root.MNN.FusedActivation = { + kTfLiteActNone: 0, + kTfLiteActRelu: 1, + kTfLiteActRelu1: 2, + kTfLiteActRelu6: 3, + kTfLiteActTanh: 4, + kTfLiteActSignBit: 5, + kTfLiteActSigmoid: 6 +}; + +$root.MNN.QuantizedParam = class QuantizedParam { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedParam(); + $.zeroPoint = reader.int32_(position, 4, 0); + $.scale = reader.float32_(position, 6, 0); + return $; + } +}; + +$root.MNN.QuantizedAdd = class QuantizedAdd { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedAdd(); + $.activationType = reader.int8_(position, 4, 0); + $.input1QuantizedParam = reader.table(position, 6, $root.MNN.QuantizedParam.decode); + $.input2QuantizedParam = reader.table(position, 8, $root.MNN.QuantizedParam.decode); + $.outputQuantizedParam = reader.table(position, 10, $root.MNN.QuantizedParam.decode); + return $; + } +}; + +$root.MNN.ModeFormat = { + TENSORFLOW: 0, + TFLITE: 1 +}; + +$root.MNN.QuantizeMode = { + MIN_COMBINED: 0, + MIN_FIRST: 1, + SCALED: 2 +}; + +$root.MNN.Dequantize = class Dequantize { + + static decode(reader, position) { + const $ = new $root.MNN.Dequantize(); + $.inputQuantizedParam = reader.table(position, 4, $root.MNN.QuantizedParam.decode); + $.mode = reader.int8_(position, 6, 0); + $.modelFormat = reader.int8_(position, 8, 0); + $.type = reader.int32_(position, 10, 0); + return $; + } +}; + +$root.MNN.QuantizedAvgPool = class QuantizedAvgPool { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedAvgPool(); + $.kernelX = reader.int32_(position, 4, 0); + $.kernelY = reader.int32_(position, 6, 0); + $.modelFormat = reader.int8_(position, 8, 0); + $.outputActivationMax = reader.int32_(position, 10, 0); + $.outputActivationMin = reader.int32_(position, 12, 0); + $.padType = reader.int8_(position, 14, 0); + $.padX = reader.int32_(position, 16, 0); + $.padY = reader.int32_(position, 18, 0); + $.strideX = reader.int32_(position, 20, 0); + $.strideY = reader.int32_(position, 22, 0); + $.type = reader.int32_(position, 24, 0); + return $; + } +}; + +$root.MNN.QuantizedBiasAdd = class QuantizedBiasAdd { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedBiasAdd(); + $.bias = reader.typedArray(position, 4, Int32Array); + $.inputType = reader.int32_(position, 6, 0); + $.max = reader.int32_(position, 8, 0); + $.min = reader.int32_(position, 10, 0); + $.outputType = reader.int32_(position, 12, 0); + return $; + } +}; + +$root.MNN.QuantizedConcat = class QuantizedConcat { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedConcat(); + $.activationType = reader.int8_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + $.inputScale = reader.typedArray(position, 8, Float32Array); + $.inputZeroPoint = reader.typedArray(position, 10, Int32Array); + $.outputQuantizedParam = reader.table(position, 12, $root.MNN.QuantizedParam.decode); + return $; + } +}; + +$root.MNN.QuantizedLogistic = class QuantizedLogistic { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedLogistic(); + $.inputQuantizedParam = reader.table(position, 4, $root.MNN.QuantizedParam.decode); + $.outputQuantizedParam = reader.table(position, 6, $root.MNN.QuantizedParam.decode); + return $; + } +}; + +$root.MNN.QuantizedMatMul = class QuantizedMatMul { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedMatMul(); + $.transposeA = reader.bool_(position, 4, false); + $.transposeB = reader.bool_(position, 6, false); + return $; + } +}; + +$root.MNN.QuantizedMaxPool = class QuantizedMaxPool { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedMaxPool(); + $.kernelX = reader.int32_(position, 4, 0); + $.kernelY = reader.int32_(position, 6, 0); + $.modelFormat = reader.int8_(position, 8, 0); + $.outputActivationMax = reader.int32_(position, 10, 0); + $.outputActivationMin = reader.int32_(position, 12, 0); + $.padType = reader.int8_(position, 14, 0); + $.padX = reader.int32_(position, 16, 0); + $.padY = reader.int32_(position, 18, 0); + $.strideX = reader.int32_(position, 20, 0); + $.strideY = reader.int32_(position, 22, 0); + $.type = reader.int32_(position, 24, 0); + return $; + } +}; + +$root.MNN.QuantizedRelu = class QuantizedRelu { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedRelu(); + $.type = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.QuantizedRelu6 = class QuantizedRelu6 { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedRelu6(); + $.type = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.MNN.QuantizedReshape = class QuantizedReshape { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedReshape(); + $.dims = reader.typedArray(position, 4, Int32Array); + $.modelFormat = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.MNN.QuantizedSoftmax = class QuantizedSoftmax { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizedSoftmax(); + $.beta = reader.float32_(position, 4, 0); + $.inputScale = reader.float32_(position, 6, 0); + return $; + } +}; + +$root.MNN.QuantizeRoundMode = { + HALF_AWAY_FROM_ZERO: 0, + HALF_TO_EVEN: 1 +}; + +$root.MNN.QuantizeV2 = class QuantizeV2 { + + static decode(reader, position) { + const $ = new $root.MNN.QuantizeV2(); + $.type = reader.int32_(position, 4, 0); + $.mode = reader.int8_(position, 6, 0); + $.roundMode = reader.int8_(position, 8, 0); + return $; + } +}; + +$root.MNN.RequantizationRange = class RequantizationRange { + + static decode(/* reader, position */) { + const $ = new $root.MNN.RequantizationRange(); + return $; + } +}; + +$root.MNN.Requantize = class Requantize { + + static decode(/* reader, position */) { + const $ = new $root.MNN.Requantize(); + return $; + } +}; + +$root.MNN.TfQuantizedConv2D = class TfQuantizedConv2D { + + static decode(reader, position) { + const $ = new $root.MNN.TfQuantizedConv2D(); + $.bias = reader.typedArray(position, 4, Int32Array); + $.biasflag = reader.bool_(position, 6, false); + $.common = reader.table(position, 8, $root.MNN.Convolution2DCommon.decode); + $.weight = reader.typedArray(position, 10, Uint8Array); + $.activationType = reader.int8_(position, 12, 0); + $.multiplier = reader.int32_(position, 14, 0); + $.outMax = reader.int32_(position, 16, 0); + $.outMin = reader.int32_(position, 18, 0); + $.shift = reader.int32_(position, 20, 0); + $.biasQuantizedParam = reader.table(position, 22, $root.MNN.QuantizedParam.decode); + $.depthMultiplier = reader.int32_(position, 24, 0); + $.filterQuantizedParam = reader.table(position, 26, $root.MNN.QuantizedParam.decode); + $.inputQuantizedParam = reader.table(position, 28, $root.MNN.QuantizedParam.decode); + $.modelFormat = reader.int8_(position, 30, 0); + $.outputQuantizedParam = reader.table(position, 32, $root.MNN.QuantizedParam.decode); + return $; + } +}; + +$root.MNN.ExtraInfo = class ExtraInfo { + + static decode(reader, position) { + const $ = new $root.MNN.ExtraInfo(); + $.buffer = reader.typedArray(position, 4, Int8Array); + $.name = reader.string_(position, 6, null); + $.version = reader.string_(position, 8, null); + return $; + } +}; + +$root.MNN.TensorConvertInfo = class TensorConvertInfo { + + static decode(reader, position) { + const $ = new $root.MNN.TensorConvertInfo(); + $.source = reader.int8_(position, 4, 0); + $.dest = reader.int8_(position, 6, 0); + return $; + } +}; + +$root.MNN.SampleMode = { + BILINEAR: 0, + NEAREST: 1 +}; + +$root.MNN.BorderMode = { + ZEROS: 0, + CLAMP: 1, + REFLECTION: 2, + CUBE: 3 +}; + +$root.MNN.GridSample = class GridSample { + + static decode(reader, position) { + const $ = new $root.MNN.GridSample(); + $.mode = reader.int8_(position, 4, 0); + $.paddingMode = reader.int8_(position, 6, 0); + $.alignCorners = reader.bool_(position, 8, false); + $.backward = reader.bool_(position, 10, false); + return $; + } +}; + +$root.MNN.ImageFormatType = { + RGBA: 0, + RGB: 1, + BGR: 2, + GRAY: 3, + BGRA: 4, + YCrCb: 5, + YUV: 6, + HSV: 7, + XYZ: 8, + BGR555: 9, + BGR565: 10, + YUV_NV21: 11, + YUV_NV12: 12, + YUV_I420: 13, + HSV_FULL: 14 +}; + +$root.MNN.FilterType = { + NEAREST: 0, + BILINEAR: 1, + BICUBIC: 2 +}; + +$root.MNN.WrapType = { + CLAMP_TO_EDGE: 0, + ZERO: 1, + REPEAT: 2 +}; + +$root.MNN.ImageProcessParam = class ImageProcessParam { + + static decode(reader, position) { + const $ = new $root.MNN.ImageProcessParam(); + $.filterType = reader.int8_(position, 4, 0); + $.sourceFormat = reader.int32_(position, 6, 0); + $.destFormat = reader.int32_(position, 8, 0); + $.wrap = reader.int8_(position, 10, 0); + $.mean = reader.typedArray(position, 12, Float32Array); + $.normal = reader.typedArray(position, 14, Float32Array); + $.transform = reader.typedArray(position, 16, Float32Array); + $.paddingValue = reader.int8_(position, 18, 0); + $.shape = reader.typedArray(position, 20, Int32Array); + $.outputType = reader.int32_(position, 22, 0); + $.draw = reader.bool_(position, 24, false); + return $; + } +}; + +$root.MNN.OpType = { + AbsVal: 0, + QuantizedAdd: 1, + ArgMax: 2, + AsString: 3, + InstanceNorm: 4, + BatchToSpaceND: 5, + Copy: 6, + BinaryOp: 7, + Bnll: 8, + Cast: 9, + Concat: 10, + Const: 11, + Convolution: 12, + ConvolutionDepthwise: 13, + Crop: 14, + CropAndResize: 15, + ImageProcess: 16, + Deconvolution: 17, + DeconvolutionDepthwise: 18, + Dequantize: 19, + DetectionOutput: 20, + Dropout: 21, + Eltwise: 22, + ELU: 23, + Unique: 24, + Exp: 25, + ExpandDims: 26, + Fill: 27, + Flatten: 28, + Im2Col: 29, + Gather: 30, + GatherV2: 31, + Im2Seq: 32, + InnerProduct: 33, + Input: 34, + Interp: 35, + Log: 36, + LRN: 37, + LSTM: 38, + MatMul: 39, + MVN: 40, + NonMaxSuppression: 41, + NonMaxSuppressionV2: 42, + Normalize: 43, + Pack: 44, + Padding: 45, + Permute: 46, + Pooling: 47, + Power: 48, + PReLU: 49, + PriorBox: 50, + Proposal: 51, + QuantizedAvgPool: 52, + QuantizedBiasAdd: 53, + QuantizedConcat: 54, + QuantizedDepthwiseConv2D: 55, + QuantizedLogistic: 56, + RasterAndInterpolate: 57, + QuantizedMaxPool: 58, + Texture: 59, + RasterDiff: 60, + QuantizedReshape: 61, + QuantizedSoftmax: 62, + QuantizeMaxMin: 63, + QuantizeV2: 64, + Range: 65, + Rank: 66, + ReduceJoin: 67, + Reduction: 68, + ReLU: 69, + ReLU6: 70, + RequantizationRange: 71, + Requantize: 72, + Reshape: 73, + Resize: 74, + RNN: 75, + ROIPooling: 76, + Scale: 77, + Selu: 78, + Seq2Out: 79, + Shape: 80, + Sigmoid: 81, + Size: 82, + Slice: 83, + SliceTf: 84, + Softmax: 85, + SpaceToBatchND: 86, + SpatialProduct: 87, + Col2Im: 88, + Segment: 89, + Squeeze: 90, + StridedSlice: 91, + StringJoin: 92, + StringSplit: 93, + StringToNumber: 94, + TanH: 95, + TfQuantizedConv2D: 96, + Threshold: 97, + Tile: 98, + TopKV2: 99, + Transpose: 100, + UnaryOp: 101, + Unpack: 102, + Where: 103, + Moments: 104, + RNNSequenceGRU: 105, + BatchMatMul: 106, + Unsqueeze: 107, + CosineSimilarity: 108, + DepthToSpace: 109, + SpaceToDepth: 110, + ReverseSequence: 111, + Pooling3D: 112, + Convolution3D: 113, + MatrixBandPart: 114, + GatherND: 115, + DetectionPostProcess: 116, + UnravelIndex: 117, + ScatterNd: 118, + OneHot: 119, + BroadcastTo: 120, + Dilation2D: 121, + Interp3D: 122, + Raster: 128, + ConvertTensor: 129, + ArgMin: 130, + LinSpace: 131, + RandomUniform: 132, + TensorArray: 133, + TensorArraySize: 134, + TensorArrayRead: 135, + TensorArrayWrite: 136, + TensorArrayGather: 137, + TensorArrayScatter: 138, + TensorArraySplit: 139, + TensorArrayConcat: 140, + LSTMBlockCell: 141, + Reverse: 142, + ROIAlign: 143, + RandomNormal: 144, + TensorArrayInsert: 145, + TensorArrayErase: 146, + EyeLike: 147, + CumSum: 148, + Det: 149, + CumProd: 150, + ScatterElements: 151, + GatherElements: 152, + Svd: 153, + Histogram: 154, + Plugin: 256, + Select: 257, + ZerosLike: 258, + Broastcast: 259, + SetDiff1D: 260, + ReluGrad: 261, + Identity: 262, + PoolGrad: 263, + SoftmaxGrad: 264, + Conv2DBackPropFilter: 265, + TrainableParam: 266, + BatchNorm: 267, + ConvTranspose3D: 268, + ZeroGrad: 269, + Extra: 512, + ConvInt8: 513, + Int8ToFloat: 514, + DepthwiseConvInt8: 515, + PoolInt8: 516, + FloatToInt8: 517, + EltwiseInt8: 518, + While: 600, + If: 601, + LayerNorm: 603, + GridSample: 604 +}; + +$root.MNN.Plugin = class Plugin { + + static decode(reader, position) { + const $ = new $root.MNN.Plugin(); + $.type = reader.string_(position, 4, null); + $.attr = reader.tableArray(position, 6, $root.MNN.Attribute.decode); + return $; + } +}; + +$root.MNN.Extra = class Extra { + + static decode(reader, position) { + const $ = new $root.MNN.Extra(); + $.type = reader.string_(position, 4, null); + $.engine = reader.string_(position, 6, null); + $.info = reader.typedArray(position, 8, Int8Array); + $.attr = reader.tableArray(position, 10, $root.MNN.Attribute.decode); + $.vector = reader.bool_(position, 12, false); + return $; + } +}; + +$root.MNN.StringVec = class StringVec { + + static decode(reader, position) { + const $ = new $root.MNN.StringVec(); + $.data = reader.strings_(position, 4); + return $; + } +}; + +$root.MNN.WhileParam = class WhileParam { + + static decode(reader, position) { + const $ = new $root.MNN.WhileParam(); + $.cond_graph = reader.string_(position, 4, null); + $.body_graph = reader.string_(position, 6, null); + $.aliases_inputs = reader.tableArray(position, 8, $root.MNN.StringVec.decode); + $.aliases_outputs = reader.strings_(position, 10); + $.aliases_updates = reader.tableArray(position, 12, $root.MNN.StringVec.decode); + return $; + } +}; + +$root.MNN.IfParam = class IfParam { + + static decode(reader, position) { + const $ = new $root.MNN.IfParam(); + $.then_graph = reader.string_(position, 4, null); + $.else_graph = reader.string_(position, 6, null); + $.aliases_inputs = reader.tableArray(position, 8, $root.MNN.StringVec.decode); + $.aliases_outputs = reader.tableArray(position, 10, $root.MNN.StringVec.decode); + return $; + } +}; + +$root.MNN.RegionCommand = class RegionCommand { + + static decode(reader, position) { + const $ = new $root.MNN.RegionCommand(); + $.op = reader.table(position, 4, $root.MNN.Op.decode); + $.steps = reader.typedArray(position, 6, Int32Array); + $.size = reader.typedArray(position, 8, Int32Array); + $.indexes = reader.typedArray(position, 10, Int32Array); + $.view = reader.tableArray(position, 12, $root.MNN.View.decode); + $.fuse = reader.int32_(position, 14, -1); + $.iterIndexes = reader.typedArray(position, 16, Int32Array); + return $; + } +}; + +$root.MNN.LoopParam = class LoopParam { + + static decode(reader, position) { + const $ = new $root.MNN.LoopParam(); + $.tensorNumber = reader.int32_(position, 4, 0); + $.outputIndexes = reader.typedArray(position, 6, Int32Array); + $.inputIndexes = reader.typedArray(position, 8, Int32Array); + $.extraTensorInfos = reader.tableArray(position, 10, $root.MNN.TensorDescribe.decode); + $.parallel = reader.bool_(position, 12, true); + $.loopNumber = reader.int32_(position, 14, 0); + $.commands = reader.tableArray(position, 16, $root.MNN.RegionCommand.decode); + $.initCommand = reader.tableArray(position, 18, $root.MNN.RegionCommand.decode); + return $; + } +}; + +$root.MNN.OpParameter = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.MNN.QuantizedAdd.decode(reader, position); + case 2: return $root.MNN.ArgMax.decode(reader, position); + case 3: return $root.MNN.AsString.decode(reader, position); + case 4: return $root.MNN.Axis.decode(reader, position); + case 5: return $root.MNN.BatchNorm.decode(reader, position); + case 6: return $root.MNN.BinaryOp.decode(reader, position); + case 7: return $root.MNN.Blob.decode(reader, position); + case 8: return $root.MNN.CastParam.decode(reader, position); + case 9: return $root.MNN.Convolution2D.decode(reader, position); + case 10: return $root.MNN.Crop.decode(reader, position); + case 11: return $root.MNN.CropAndResize.decode(reader, position); + case 12: return $root.MNN.Dequantize.decode(reader, position); + case 13: return $root.MNN.DetectionOutput.decode(reader, position); + case 14: return $root.MNN.Eltwise.decode(reader, position); + case 15: return $root.MNN.ExpandDims.decode(reader, position); + case 16: return $root.MNN.Fill.decode(reader, position); + case 17: return $root.MNN.Flatten.decode(reader, position); + case 18: return $root.MNN.Gather.decode(reader, position); + case 19: return $root.MNN.GatherV2.decode(reader, position); + case 20: return $root.MNN.InnerProduct.decode(reader, position); + case 21: return $root.MNN.Input.decode(reader, position); + case 22: return $root.MNN.Interp.decode(reader, position); + case 23: return $root.MNN.LRN.decode(reader, position); + case 24: return $root.MNN.LSTM.decode(reader, position); + case 25: return $root.MNN.MatMul.decode(reader, position); + case 26: return $root.MNN.NonMaxSuppressionV2.decode(reader, position); + case 27: return $root.MNN.Normalize.decode(reader, position); + case 28: return $root.MNN.PackParam.decode(reader, position); + case 29: return $root.MNN.Permute.decode(reader, position); + case 30: return $root.MNN.Plugin.decode(reader, position); + case 31: return $root.MNN.Pool.decode(reader, position); + case 32: return $root.MNN.PRelu.decode(reader, position); + case 33: return $root.MNN.PriorBox.decode(reader, position); + case 34: return $root.MNN.Proposal.decode(reader, position); + case 35: return $root.MNN.QuantizedAvgPool.decode(reader, position); + case 36: return $root.MNN.QuantizedBiasAdd.decode(reader, position); + case 37: return $root.MNN.QuantizedConcat.decode(reader, position); + case 38: return $root.MNN.QuantizedLogistic.decode(reader, position); + case 39: return $root.MNN.QuantizedMatMul.decode(reader, position); + case 40: return $root.MNN.QuantizedMaxPool.decode(reader, position); + case 41: return $root.MNN.QuantizedRelu.decode(reader, position); + case 42: return $root.MNN.QuantizedRelu6.decode(reader, position); + case 43: return $root.MNN.QuantizedReshape.decode(reader, position); + case 44: return $root.MNN.QuantizedSoftmax.decode(reader, position); + case 45: return $root.MNN.QuantizeMaxMin.decode(reader, position); + case 46: return $root.MNN.QuantizeV2.decode(reader, position); + case 47: return $root.MNN.Range.decode(reader, position); + case 48: return $root.MNN.Rank.decode(reader, position); + case 49: return $root.MNN.ReduceJoin.decode(reader, position); + case 50: return $root.MNN.ReductionParam.decode(reader, position); + case 51: return $root.MNN.Relu.decode(reader, position); + case 52: return $root.MNN.Relu6.decode(reader, position); + case 53: return $root.MNN.RequantizationRange.decode(reader, position); + case 54: return $root.MNN.Requantize.decode(reader, position); + case 55: return $root.MNN.Reshape.decode(reader, position); + case 56: return $root.MNN.Resize.decode(reader, position); + case 57: return $root.MNN.RoiParameters.decode(reader, position); + case 58: return $root.MNN.Scale.decode(reader, position); + case 59: return $root.MNN.Selu.decode(reader, position); + case 60: return $root.MNN.Size.decode(reader, position); + case 61: return $root.MNN.Slice.decode(reader, position); + case 62: return $root.MNN.SliceTf.decode(reader, position); + case 63: return $root.MNN.SpaceBatch.decode(reader, position); + case 64: return $root.MNN.SqueezeParam.decode(reader, position); + case 65: return $root.MNN.StridedSliceParam.decode(reader, position); + case 66: return $root.MNN.TensorConvertInfo.decode(reader, position); + case 67: return $root.MNN.TfQuantizedConv2D.decode(reader, position); + case 68: return $root.MNN.TopKV2.decode(reader, position); + case 69: return $root.MNN.Transpose.decode(reader, position); + case 70: return $root.MNN.UnaryOp.decode(reader, position); + case 71: return $root.MNN.MomentsParam.decode(reader, position); + case 72: return $root.MNN.RNNParam.decode(reader, position); + case 73: return $root.MNN.BatchMatMulParam.decode(reader, position); + case 74: return $root.MNN.QuantizedFloatParam.decode(reader, position); + case 75: return $root.MNN.DepthSpaceParam.decode(reader, position); + case 76: return $root.MNN.EltwiseInt8.decode(reader, position); + case 77: return $root.MNN.ReverseSequenceParam.decode(reader, position); + case 78: return $root.MNN.Extra.decode(reader, position); + case 79: return $root.MNN.Pool3D.decode(reader, position); + case 80: return $root.MNN.Convolution3D.decode(reader, position); + case 81: return $root.MNN.ELU.decode(reader, position); + case 82: return $root.MNN.DetectionPostProcessParam.decode(reader, position); + case 83: return $root.MNN.OneHotParam.decode(reader, position); + case 84: return $root.MNN.PadParam.decode(reader, position); + case 85: return $root.MNN.WhileParam.decode(reader, position); + case 86: return $root.MNN.IfParam.decode(reader, position); + case 87: return $root.MNN.RandomUniform.decode(reader, position); + case 88: return $root.MNN.LayerNorm.decode(reader, position); + case 89: return $root.MNN.TensorArray.decode(reader, position); + case 90: return $root.MNN.LSTMBlockCell.decode(reader, position); + case 91: return $root.MNN.GridSample.decode(reader, position); + case 92: return $root.MNN.LoopParam.decode(reader, position); + case 93: return $root.MNN.ImageProcessParam.decode(reader, position); + case 94: return $root.MNN.CumSum.decode(reader, position); + default: return undefined; + } + } +}; + +$root.MNN.Op = class Op { + + static decode(reader, position) { + const $ = new $root.MNN.Op(); + $.inputIndexes = reader.typedArray(position, 4, Int32Array); + $.main = reader.union(position, 6, $root.MNN.OpParameter.decode); + $.name = reader.string_(position, 10, null); + $.outputIndexes = reader.typedArray(position, 12, Int32Array); + $.type = reader.int32_(position, 14, 0); + $.defaultDimentionFormat = reader.int8_(position, 16, 1); + return $; + } +}; + +$root.MNN.View = class View { + + static decode(reader, position) { + const $ = new $root.MNN.View(); + $.offset = reader.int32_(position, 4, 0); + $.stride = reader.typedArray(position, 6, Int32Array); + return $; + } +}; + +$root.MNN.Region = class Region { + + static decode(reader, position) { + const $ = new $root.MNN.Region(); + $.src = reader.table(position, 4, $root.MNN.View.decode); + $.dst = reader.table(position, 6, $root.MNN.View.decode); + $.size = reader.typedArray(position, 8, Int32Array); + $.origin = reader.int32_(position, 10, 0); + return $; + } +}; + +$root.MNN.TensorDescribe = class TensorDescribe { + + static decode(reader, position) { + const $ = new $root.MNN.TensorDescribe(); + $.blob = reader.table(position, 4, $root.MNN.Blob.decode); + $.index = reader.int32_(position, 6, 0); + $.name = reader.string_(position, 8, null); + $.regions = reader.tableArray(position, 10, $root.MNN.Region.decode); + $.quantInfo = reader.table(position, 12, $root.MNN.TensorQuantInfo.decode); + return $; + } +}; + +$root.MNN.ForwardType = { + CPU: 0, + METAL: 1, + OPENCL: 2, + OPENGLES: 3, + VULKAN: 4 +}; + +$root.MNN.Usage = { + INFERENCE: 0, + TRAIN: 1, + INFERENCE_STATIC: 2 +}; + +$root.MNN.SubGraphProto = class SubGraphProto { + + static decode(reader, position) { + const $ = new $root.MNN.SubGraphProto(); + $.name = reader.string_(position, 4, null); + $.inputs = reader.typedArray(position, 6, Int32Array); + $.outputs = reader.typedArray(position, 8, Int32Array); + $.tensors = reader.strings_(position, 10); + $.nodes = reader.tableArray(position, 12, $root.MNN.Op.decode); + $.extraTensorDescribe = reader.tableArray(position, 14, $root.MNN.TensorDescribe.decode); + return $; + } +}; + +$root.MNN.TensorQuantInfo = class TensorQuantInfo { + + static decode(reader, position) { + const $ = new $root.MNN.TensorQuantInfo(); + $.scale = reader.float32_(position, 4, 0); + $.zero = reader.float32_(position, 6, 0); + $.min = reader.float32_(position, 8, -128); + $.max = reader.float32_(position, 10, 127); + $.type = reader.int32_(position, 12, 0); + return $; + } +}; + +$root.MNN.Net = class Net { + + static create(reader) { + return $root.MNN.Net.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.MNN.Net(); + $.bizCode = reader.string_(position, 4, null); + $.extraTensorDescribe = reader.tableArray(position, 6, $root.MNN.TensorDescribe.decode); + $.extraInfo = reader.table(position, 8, $root.MNN.ExtraInfo.decode); + $.oplists = reader.tableArray(position, 10, $root.MNN.Op.decode); + $.outputName = reader.strings_(position, 12); + $.preferForwardType = reader.int8_(position, 14, 0); + $.sourceType = reader.int8_(position, 16, 0); + $.tensorName = reader.strings_(position, 18); + $.tensorNumber = reader.int32_(position, 20, 0); + $.usage = reader.int8_(position, 22, 0); + $.subgraphs = reader.tableArray(position, 24, $root.MNN.SubGraphProto.decode); + $.mnn_uuid = reader.string_(position, 26, null); + return $; + } +}; diff --git a/mnn.js b/mnn.js new file mode 100644 index 00000000000..93f18b9f631 --- /dev/null +++ b/mnn.js @@ -0,0 +1,393 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const mnn = {}; + +mnn.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'mnn') { + const stream = context.stream; + if (stream && stream.length >= 4) { + const buffer = stream.peek(4); + const reader = flatbuffers.BinaryReader.open(buffer); + if (reader.root === 0x00000018 || reader.root === 0x0000001C || reader.root === 0x00000020) { + return 'mnn.flatbuffers'; + } + } + } + return null; + } + + async open(context) { + await context.require('./mnn-schema'); + let net = null; + try { + mnn.schema = flatbuffers.get('mnn').MNN; + const stream = context.stream; + const reader = flatbuffers.BinaryReader.open(stream); + net = mnn.schema.Net.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mnn.Error(`File format is not mnn.Net (${message.replace(/\.$/, '')}).`); + } + const metadata = await context.metadata('mnn-metadata.json'); + return new mnn.Model(metadata, net); + } +}; + +mnn.Model = class { + + constructor(metadata, net) { + this.format = 'MNN v2'; + const sources = new Map([ + [ mnn.schema.NetSource.CAFFE, 'Caffe' ], + [ mnn.schema.NetSource.TENSORFLOW, 'TensorFlow' ], + [ mnn.schema.NetSource.TFLITE, 'TensorFlow Lite' ], + [ mnn.schema.NetSource.ONNX, 'ONNX' ], + [ mnn.schema.NetSource.TORCH, 'Torch' ] + ]); + if (!sources.has(net.sourceType)) { + throw new mnn.Error(`Unsupported model source '${net.sourceType}'.`); + } + this.metadata = new Map(); + this.metadata.set('source', sources.get(net.sourceType)); + this.graphs = [ new mnn.Graph(metadata, net) ]; + } +}; + +mnn.Graph = class { + + constructor(metadata, net) { + this.name = ''; + this.nodes = []; + this.inputs = []; + this.outputs = []; + for (let i = 0; i < net.tensorName.length; i++) { + if (net.tensorName[i] === '') { + net.tensorName[i] = `\n${i}`; + } + } + const inputs = new Map(); + for (const op of net.oplists) { + for (const input of op.inputIndexes) { + inputs.set(input, (inputs.get(input) || 0) + 1); + } + } + const consts = new Map(); + const oplists = net.oplists.filter((op) => { + if (op.type === mnn.schema.OpType.Const && + op.inputIndexes.length === 0 && + op.outputIndexes.length === 1 && + op.main instanceof mnn.schema.Blob && + inputs.get(op.outputIndexes[0]) === 1) { + consts.set(op.outputIndexes[0], op); + return false; + } + return true; + }); + const values = new Map(); + values.map = (index) => { + if (!values.has(index)) { + const name = net.tensorName[index]; + const op = consts.get(index); + if (op) { + const tensor = op ? mnn.Utility.createTensor(op.main, 'Const') : null; + values.set(index, new mnn.Value(name, null, tensor)); + } else { + const extraTensorDescribe = net.extraTensorDescribe[index]; + const blob = extraTensorDescribe ? extraTensorDescribe.blob : null; + const type = blob && blob.dims && blob.dims.length > 0 ? new mnn.TensorType(blob.dataType, new mnn.TensorShape(blob.dims), blob.dataFormat) : null; + values.set(index, new mnn.Value(name, type, null)); + } + } + return values.get(index); + }; + + for (const op of oplists) { + if (op.type === mnn.schema.OpType.Input) { + const args = Array.from(op.outputIndexes).map((index) => values.map(index)); + const argument = new mnn.Argument(op.name, args); + this.inputs.push(argument); + } else { + const node = new mnn.Node(metadata, op, net, values); + this.nodes.push(node); + } + } + + for (let i = 0; i < net.tensorName.length; i++) { + if (!inputs.has(i)) { + const value = values.map(i); + const argument = new mnn.Argument(value.name, [ value ]); + this.outputs.push(argument); + } + } + } +}; + +mnn.Node = class { + + constructor(metadata, op, net, values) { + const type = mnn.Utility.enum('OpType', op.type) || `(${op.type})`; + this.type = metadata.type(type) || { name: type }; + this.name = op.name || ''; + this.attributes = []; + this.inputs = []; + this.outputs = []; + this.chains = []; + if (op.inputIndexes && op.inputIndexes.length > 0) { + const argument = new mnn.Argument('input', Array.from(op.inputIndexes).map((index) => values.map(index))); + this.inputs.push(argument); + } + if (op.outputIndexes && op.outputIndexes.length > 0) { + const argument = new mnn.Argument('output', Array.from(op.outputIndexes).map((index) => values.map(index))); + this.outputs.push(argument); + } + const param = op.main; + if (param) { + const parameters = [ param ]; + if (param instanceof mnn.schema.Blob) { + const tensor = mnn.Utility.createTensor(param, 'Blob'); + const value = new mnn.Value('', null, tensor); + const argument = new mnn.Argument('value', [ value ]); + this.inputs.push(argument); + parameters.splice(0, parameters.length); + } else if (param instanceof mnn.schema.Convolution2D) { + const common = param.common; + const outputCount = common.outputCount; + const inputCount = common.inputCount; + const kernelX = common.kernelX; + const kernelY = common.kernelY; + this._buildTensor('weight', mnn.schema.DataType.DT_FLOAT, [ outputCount, inputCount, kernelX, kernelY ], param.weight); + this._buildTensor('bias', mnn.schema.DataType.DT_FLOAT, [ outputCount ], param.bias); + delete param.weight; + delete param.bias; + delete param.quanParameter; + delete param.symmetricQuan; + } else if (param instanceof mnn.schema.InnerProduct) { + const outputCount = param.outputCount; + const inputCount = param.weightSize / outputCount; + this._buildTensor('weight', mnn.schema.DataType.DT_FLOAT, [ outputCount, inputCount ], param.weight); + this._buildTensor('bias', mnn.schema.DataType.DT_FLOAT, [ outputCount ], param.bias); + delete param.weight; + delete param.bias; + delete param.quanParameter; + } else if (param instanceof mnn.schema.Scale) { + const scaleDataCount = param.channels; + this._buildTensor('scale', mnn.schema.DataType.DT_FLOAT, [ scaleDataCount ], param.scaleData); + this._buildTensor('bias', mnn.schema.DataType.DT_FLOAT, [ scaleDataCount ], param.biasData); + delete param.scaleData; + delete param.biasData; + } else if (param instanceof mnn.schema.BatchNorm) { + const channels = param.channels; + this._buildTensor('mean', mnn.schema.DataType.DT_FLOAT, [ channels ], param.meanData); + this._buildTensor('slope', mnn.schema.DataType.DT_FLOAT, [ channels ], param.slopeData); + this._buildTensor('variance', mnn.schema.DataType.DT_FLOAT, [ channels ], param.varData); + this._buildTensor('bias', mnn.schema.DataType.DT_FLOAT, [ channels ], param.biasData); + delete param.slopeData; + delete param.meanData; + delete param.varData; + delete param.biasData; + } else if (param instanceof mnn.schema.PRelu) { + this._buildTensor('slope', mnn.schema.DataType.DT_FLOAT, [ param.slopeCount ], param.slope); + delete param.slopeCount; + } else if (param instanceof mnn.schema.Normalize) { + this._buildTensor('scale', mnn.schema.DataType.DT_FLOAT, [ param.scale.length ], param.scale); + delete param.scale; + } + while (parameters.length > 0) { + const parameter = parameters.shift(); + for (const [key, value] of Object.entries(parameter)) { + if (Object.keys(mnn.schema).find((key) => mnn.schema[key].prototype && value instanceof mnn.schema[key])) { + parameters.push(value); + continue; + } + const attribute = new mnn.Attribute(metadata.attribute(type, key), key, value); + this.attributes.push(attribute); + } + } + } + } + + _buildTensor(name, dataType, dimensions, value) { + const shape = new mnn.TensorShape(dimensions); + const type = new mnn.TensorType(dataType, shape); + const tensor = new mnn.Tensor('Weight', type, value); + const argument = new mnn.Argument(name, [ new mnn.Value('', null, tensor) ]); + this.inputs.push(argument); + } +}; + +mnn.Attribute = class { + + constructor(metadata, name, value, visible) { + this.type = null; + this.value = ArrayBuffer.isView(value) ? Array.from(value) : value; + this.name = name; + this.visible = visible ? true : false; + if (metadata && metadata.type) { + this.type = metadata.type; + switch (this.type) { + case 'DataType': + this.value = mnn.Utility.dataType(this.value); + break; + default: + this.value = mnn.Utility.enum(this.type, this.value); + break; + } + } + } +}; + +mnn.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +mnn.Value = class { + + constructor(name, type, initializer) { + this.name = name; + this.type = type ? type : initializer ? initializer.type : null; + this.initializer = initializer || null; + } +}; + +mnn.Tensor = class { + + constructor(category, type, data) { + this.category = category; + this.type = type; + switch (type.dataType) { + case 'int32': + case 'float32': + this.encoding = '|'; + this.values = data ? data.slice(0) : null; + break; + case 'uint8': + case 'float16': + this.encoding = '<'; + this.values = data ? data.slice(0) : null; + break; + default: + throw new mnn.Error(`Unsupported data type '${type.dataType}'.`); + } + } +}; + +mnn.TensorType = class { + + constructor(dataType, shape, format) { + this.dataType = mnn.Utility.dataType(dataType); + this.shape = shape; + if (format) { + switch (format) { + case mnn.schema.MNN_DATA_FORMAT.NCHW: this.denotation = 'NCHW'; break; + case mnn.schema.MNN_DATA_FORMAT.NHWC: this.denotation = 'NHWC'; break; + case mnn.schema.MNN_DATA_FORMAT.NC4HW4: this.denotation = 'NC4HW4'; break; + case mnn.schema.MNN_DATA_FORMAT.NHWC4: this.denotation = 'NHWC4'; break; + default: throw new mnn.Error(`Unsupported tensor type format '${format}'.`); + } + } + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +mnn.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.from(dimensions); + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`; + } + return ''; + } +}; + +mnn.Utility = class { + + static dataType(type) { + switch (type) { + case mnn.schema.DataType.DT_INVALID: return '?'; + case mnn.schema.DataType.DT_FLOAT: return 'float32'; + case mnn.schema.DataType.DT_DOUBLE: return 'float64'; + case mnn.schema.DataType.DT_INT32: return 'int32'; + case mnn.schema.DataType.DT_UINT8: return 'uint8'; + case mnn.schema.DataType.DT_INT16: return 'int16'; + case mnn.schema.DataType.DT_INT8: return 'int8'; + case mnn.schema.DataType.DT_STRING: return 'string'; + case mnn.schema.DataType.DT_COMPLEX64: return 'complex64'; + case mnn.schema.DataType.DT_INT64: return 'int64'; + case mnn.schema.DataType.DT_BOOL: return 'boolean'; + case mnn.schema.DataType.DT_QINT8: return 'qint8'; + case mnn.schema.DataType.DT_QUINT8: return 'quint8'; + case mnn.schema.DataType.DT_QINT32: return 'qint32'; + case mnn.schema.DataType.DT_BFLOAT16: return 'bfloat16'; + case mnn.schema.DataType.DT_QINT16: return 'qint16'; + case mnn.schema.DataType.DT_QUINT16: return 'quint16'; + case mnn.schema.DataType.DT_UINT16: return 'uint16'; + case mnn.schema.DataType.DT_COMPLEX128: return 'complex128'; + case mnn.schema.DataType.DT_HALF: return 'float16'; + case mnn.schema.DataType.DT_RESOURCE: return 'resource'; + case mnn.schema.DataType.DT_VARIANT: return 'variant'; + default: throw new mnn.Error(`Unsupported data type '${JSON.stringify(type)}'.`); + } + } + + static enum(name, value) { + const type = name && mnn.schema ? mnn.schema[name] : undefined; + if (type) { + mnn.Utility._enumKeyMap = mnn.Utility._enumKeyMap || new Map(); + if (!mnn.Utility._enumKeyMap.has(name)) { + const map = new Map(); + for (const key of Object.keys(type)) { + map.set(type[key], key); + } + mnn.Utility._enumKeyMap.set(name, map); + } + const map = mnn.Utility._enumKeyMap.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value.toString(); + } + + static createTensor(param, category) { + const shape = new mnn.TensorShape(param.dims); + const type = new mnn.TensorType(param.dataType, shape, param.dataFormat); + let data = null; + switch (type.dataType) { + case 'uint8': data = param.uint8s; break; + case 'int8': data = param.int8s; break; + case 'int32': data = param.int32s; break; + case 'int64': data = param.int64s; break; + case 'float16': data = param.uint8s; break; + case 'float32': data = param.float32s; break; + default: throw new mnn.Error(`Unsupported blob data type '${JSON.stringify(type.dataType)}'.`); + } + return new mnn.Tensor(category, type, data); + } +}; + +mnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MNN model.'; + } +}; + +export const ModelFactory = mnn.ModelFactory; + diff --git a/modular.js b/modular.js new file mode 100644 index 00000000000..33d73f90616 --- /dev/null +++ b/modular.js @@ -0,0 +1,154 @@ + +const modular = {}; + +modular.ModelFactory = class { + + match(context) { + const obj = context.peek('json'); + if (obj && obj.signature == "netron:modular") { + return obj; + } + return null; + } + + async open(context, target) { + return new modular.Model(target); + } +}; + +modular.Model = class { + + constructor(obj) { + this._graphs = obj.graphs.map((graph) => new modular.Graph(graph)); + } + + get format() { + return 'Modular'; + } + + get graphs() { + return this._graphs; + } +}; + +modular.Graph = class { + + constructor(graph) { + this._nodes = Array.from(graph.nodes.map((node) => new modular.Node(node))); + this._inputs = []; + this._outputs = []; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +modular.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = Array.from(value.map((value) => new modular.Value(value.toString(), name))); + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +modular.Value = class { + + constructor(name, value) { + if (typeof name !== 'string') { + throw new modular.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +modular.Node = class { + + constructor(node) { + this._name = node.type.name; + if (node.type.category == 'List') { + this._category = 'Data'; + } else if (node.type.category == 'ControlFlow') { + this._category == 'Control'; + } else { + this._category = node.type.category; + } + this._type = { name: this._name, category: this._category }; + this._attributes = node.attributes ? + Array.from(node.attributes).map((attribute) => new modular.Attribute(attribute.name, attribute.value)) : + []; + this._inputs = Array.from(node.inputs.map((input) => new modular.Argument(input.name, input.arguments))); + this._outputs = Array.from(node.outputs.map((output) => new modular.Argument(output.name, output.arguments))); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +modular.Attribute = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +modular.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Modular model.'; + } +}; + +export const ModelFactory = modular.ModelFactory; diff --git a/mslite-metadata.json b/mslite-metadata.json new file mode 100644 index 00000000000..f0e833c5a66 --- /dev/null +++ b/mslite-metadata.json @@ -0,0 +1,3173 @@ +[ + { + "name": "Abs", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "AbsGrad" + }, + { + "name": "Activation", + "category": "Activation", + "attributes": [ + { "name": "type", "type": "ActivationType" }, + { "name": "alpha", "type": "float32", "default": 0.2 }, + { "name": "min_val", "type": "float32", "default": -1 }, + { "name": "max_val", "type": "float32", "default": 1 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" }, + { "name": "approximate", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ActivationGrad", + "attributes": [ + { "name": "type", "type": "ActivationType" }, + { "name": "alpha", "type": "float32", "default": 0.2 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "yt" }, + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Adam", + "attributes": [ + { "name": "useNesterov", "type": "boolean", "default": false }, + { "name": "use_locking", "type": "boolean", "default": false }, + { "name": "use_nesterov", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "weight" }, + { "name": "m" }, + { "name": "v" }, + { "name": "beta1_power" }, + { "name": "beta22_power" }, + { "name": "learning_rate" }, + { "name": "beta1" }, + { "name": "beta2" }, + { "name": "eps" }, + { "name": "gradient" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "AdamWeightDecay", + "attributes": [ + { "name": "use_locking", "type": "boolean", "default": false } + ] + }, + { + "name": "Add", + "attributes": [ + { "name": "ActivationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "AdderFusion", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "AddFold" + }, + { + "name": "AddFusion", + "attributes": [ + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "AddGrad", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "dx1" }, + { "name": "dx2" } + ] + }, + { + "name": "AddN", + "attributes": [ + { "name": "N", "type": "int32", "default": 0 } + ], + "outputs": [ + { "name": "sums" } + ] + }, + { + "name": "Affine", + "attributes": [ + { "name": "context", "type": "int64[]", "default": 0 }, + { "name": "output_dim", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" }, + { "name": "transpose_a", "type": "boolean", "default": false }, + { "name": "transpose_b", "type": "boolean", "default": false } + ] + }, + { + "name": "All", + "attributes": [ + { "name": "keepDims", "type": "int32", "default": 0 }, + { "name": "keep_dims", "type": "int64", "default": 0 } + ] + }, + { + "name": "AllGather", + "attributes": [ + { "name": "group", "type": "string", "default": null }, + { "name": "rank_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "ApplyMomentum", + "attributes": [ + { "name": "gradientScale", "type": "float32", "default": 0 }, + { "name": "useNesterov", "type": "boolean", "default": false }, + { "name": "use_nesterov", "type": "boolean", "default": false }, + { "name": "use_locking", "type": "boolean", "default": false }, + { "name": "gradient_scale", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "weight" }, + { "name": "accumulate" }, + { "name": "learning_rate" }, + { "name": "gradient" }, + { "name": "moment" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ArgMax", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "outMaxValue", "type": "boolean", "default": false }, + { "name": "topK", "type": "int32", "default": 1 }, + { "name": "keepDims", "type": "boolean", "default": false }, + { "name": "axisType", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ArgMaxFusion", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "top_k", "type": "int64", "default": 1 }, + { "name": "keep_dims", "type": "boolean", "default": false }, + { "name": "out_max_value", "type": "boolean", "default": false } + ] + }, + { + "name": "ArgMin", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "outMaxValue", "type": "boolean", "default": false }, + { "name": "topK", "type": "int32", "default": 1 }, + { "name": "keepDims", "type": "boolean", "default": false }, + { "name": "axisType", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ArgMinFusion", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "top_k", "type": "int64", "default": 0 }, + { "name": "keep_dims", "type": "boolean", "default": false }, + { "name": "out_max_value", "type": "boolean", "default": false } + ] + }, + { + "name": "Asin", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Assert", + "attributes": [ + { "name": "summarize", "type": "int64", "default": 0 } + ] + }, + { + "name": "Assign", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "AssignAdd" + }, + { + "name": "Atan", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Attention", + "attributes": [ + { "name": "head_num", "type": "int64", "default": 0 }, + { "name": "head_size", "type": "int64", "default": 0 }, + { "name": "cross", "type": "boolean", "default": false }, + { "name": "scale", "type": "float32", "default": 0 } + ] + }, + { + "name": "AudioSpectrogram", + "attributes": [ + { "name": "windowSize", "type": "int32", "default": 0 }, + { "name": "stride", "type": "int64", "default": 0 }, + { "name": "magSquare", "type": "boolean", "default": false }, + { "name": "window_size", "type": "int64", "default": 0 }, + { "name": "mag_square", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "spectrogram" } + ] + }, + { + "name": "AvgPoolFusion", + "category": "Pool", + "attributes": [ + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 }, + { "name": "pad", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "round_mode", "type": "RoundMode", "default": "FLOOR" }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "global", "type": "boolean", "default": false }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "AvgPoolGrad", + "attributes": [ + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "format", "type": "Format", "default": "NCHW" } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0.00001 }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "is_training", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BatchNormFold" + }, + { + "name": "BatchNormGrad", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "is_training", "type": "boolean", "default": false } + ] + }, + { + "name": "BatchToSpace", + "attributes": [ + { "name": "blockShape", "type": "int32[]" }, + { "name": "crops", "type": "Vec2D" }, + { "name": "block_size", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BatchToSpaceND", + "attributes": [ + { "name": "blockShape", "type": "int32[]" }, + { "name": "crops", "type": "Vec2D" }, + { "name": "block_shape", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BiasAdd", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32[]" }, + { "name": "format", "type": "Format", "default": "NCHW" } + ], + "inputs": [ + { "name": "value" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BiasAddGrad" + }, + { + "name": "BiasGrad", + "attributes": [ + { "name": "axis", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BinaryCrossEntropy", + "attributes": [ + { "name": "reduction", "type": "Reduction", "default": 1 } + ] + }, + { + "name": "BinaryCrossEntropyGrad", + "attributes": [ + { "name": "reduction", "type": "Reduction", "default": 1 } + ] + }, + { + "name": "BlackBox", + "attributes": [ + { "name": "id", "type": "string" }, + { "name": "size", "type": "int32", "default": 0 }, + { "name": "address", "type": "ubyte[]" } + ] + }, + { + "name": "BNGrad", + "attributes": [ + { "name": "eps", "type": "float32", "default": 0 }, + { "name": "momentum", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "yt" }, + { "name": "x" }, + { "name": "scale" } + ], + "outputs": [ + { "name": "dx" }, + { "name": "scale" }, + { "name": "bias" } + ] + }, + { + "name": "Broadcast", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BroadcastTo", + "attributes": [ + { "name": "dst_shape", "type": "int32[]" }, + { "name": "shape", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Call", + "attributes": [ + { "name": "is_tail_call", "type": "boolean", "default": true } + ] + }, + { + "name": "Cast", + "attributes": [ + { "name": "srcT", "type": "int32", "default": 0 }, + { "name": "dstT", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Ceil", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Clip", + "attributes": [ + { "name": "max", "type": "float32", "default": 0 }, + { "name": "min", "type": "float32", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "n", "type": "int32", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Constant", + "category": "Constant", + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConstantOfShape", + "attributes": [ + { "name": "dataType", "type": "int32", "default": 0 }, + { "name": "value", "type": "float32[]", "default": 0 }, + { "name": "data_type", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ControlDepend", + "attributes": [ + { "name": "depend_mode", "type": "int64", "default": 0 } + ] + }, + { + "name": "Conv2D", + "category": "Layer", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Conv2DBackpropFilterFusion", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "Conv2DBackpropInputFusion", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad", "type": "int64[]", "default": 0 }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "Conv2DFusion", + "category": "Layer", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Conv2DGradFilter", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "dy" }, + { "name": "x" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "dw" } + ] + }, + { + "name": "Conv2DGradInput", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "dy" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "dx" } + ] + }, + { + "name": "Conv2dTransposeFusion", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad", "type": "int64[]", "default": 0 }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" }, + { "name": "output_paddings", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "Cos", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Crop", + "category": "Data", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "offsets", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "CropAndResize", + "attributes": [ + { "name": "method", "type": "ResizeMethod", "default": "LINEAR" }, + { "name": "extrapolation_value", "type": "float32", "default": 0 } + ] + }, + { + "name": "CumSum", + "attributes": [ + { "name": "exclusive", "type": "boolean", "default": false }, + { "name": "reverse", "type": "boolean", "default": false } + ] + }, + { + "name": "Custom", + "attributes": [ + { "name": "custom", "type": "ubyte[]" }, + { "name": "type", "type": "string", "default": null }, + { "name": "attr", "type": "Attribute[]" } + ] + }, + { + "name": "CustomExtractFeatures", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "label" }, + { "name": "weight" } + ] + }, + { + "name": "CustomNormalize", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "CustomPredict", + "attributes": [ + { "name": "outputNum", "type": "int32", "default": 0 }, + { "name": "weightThreshold", "type": "float32", "default": 0 }, + { "name": "output_num", "type": "int64", "default": 0 }, + { "name": "weight_threshold", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "key" }, + { "name": "label" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "label" }, + { "name": "weight" } + ] + }, + { + "name": "DeConv2D", + "category": "Layer", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DeConv2DGradFilter", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "dy" }, + { "name": "x" } + ], + "outputs": [ + { "name": "dw" } + ] + }, + { + "name": "DeDepthwiseConv2D", + "category": "Layer", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelMultipiler", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Depend" + }, + { + "name": "DepthToSpace", + "attributes": [ + { "name": "blockSize", "type": "int32", "default": 0 }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "block_size", "type": "int64", "default": 0 }, + { "name": "mode", "type": "string", "default": null } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DepthwiseConv2D", + "category": "Layer", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelMultipiler", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DetectionPostProcess", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "inputSize", "type": "int32", "default": 0 }, + { "name": "hScale", "type": "float32", "default": 0 }, + { "name": "wScale", "type": "float32", "default": 0 }, + { "name": "xScale", "type": "float32", "default": 0 }, + { "name": "yScale", "type": "float32", "default": 0 }, + { "name": "NmsIouThreshold", "type": "float32", "default": 0 }, + { "name": "NmsScoreThreshold", "type": "float32", "default": 0 }, + { "name": "MaxDetections", "type": "int64", "default": 0 }, + { "name": "DetectionsPerClass", "type": "int64", "default": 0 }, + { "name": "MaxClassesPerDetection", "type": "int64", "default": 0 }, + { "name": "NumClasses", "type": "int64", "default": 0 }, + { "name": "UseRegularNms", "type": "boolean", "default": false }, + { "name": "OutQuantized", "type": "boolean", "default": false }, + { "name": "input_size", "type": "int64", "default": 0 }, + { "name": "scale", "type": "float32[]", "default": 0 }, + { "name": "nms_iou_threshold", "type": "float32", "default": 0 }, + { "name": "nms_score_threshold", "type": "float32", "default": 0 }, + { "name": "max_detections", "type": "int64", "default": 0 }, + { "name": "detections_per_class", "type": "int64", "default": 0 }, + { "name": "max_classes_per_detection", "type": "int64", "default": 0 }, + { "name": "num_classes", "type": "int64", "default": 0 }, + { "name": "use_regular_nms", "type": "boolean", "default": false }, + { "name": "out_quantized", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "box" }, + { "name": "score" }, + { "name": "anchor" } + ], + "outputs": [ + { "name": "box" }, + { "name": "class" }, + { "name": "score" }, + { "name": "num" } + ] + }, + { + "name": "Div", + "attributes": [ + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "DivFusion", + "attributes": [ + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "DivGrad", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "dx1" }, + { "name": "dx2" } + ] + }, + { + "name": "Dropout", + "category": "Layer", + "attributes": [ + { "name": "ratio", "type": "float32", "default": 0.5 }, + { "name": "keep_prob", "type": "float32", "default": 0.5 } + ] + }, + { + "name": "DropoutGrad", + "attributes": [ + { "name": "ratio", "type": "float32", "default": 0.5 }, + { "name": "keep_prob", "type": "float32", "default": 0 } + ] + }, + { + "name": "DynamicQuant", + "attributes": [ + { "name": "symmetric", "type": "boolean", "default": false }, + { "name": "dst_type", "type": "int64", "default": 32 }, + { "name": "activation_channel", "type": "boolean", "default": false }, + { "name": "prefer_axis", "type": "int64", "default": 0 }, + { "name": "transpose", "type": "boolean", "default": false } + ] + }, + { + "name": "Eltwise", + "attributes": [ + { "name": "mode", "type": "EltwiseMode", "default": "PROD" } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "Elu", + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "EmbeddingLookup", + "attributes": [ + { "name": "maxNorm", "type": "float32", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "EmbeddingLookupFusion", + "attributes": [ + { "name": "max_norm", "type": "float32", "default": 0 } + ] + }, + { + "name": "EmbeddingLookupSparse", + "attributes": [ + { "name": "spIds", "type": "int32[]" }, + { "name": "spWeights", "type": "float32[]" }, + { "name": "maxNortm", "type": "float32", "default": 0 } + ] + }, + { + "name": "Equal", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "Erf" + }, + { + "name": "Exp", + "attributes": [ + { "name": "base", "type": "float32", "default": -1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ExpandDims", + "attributes": [ + { "name": "dim", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ExpFusion", + "attributes": [ + { "name": "base", "type": "float32", "default": -1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "FakeQuantWithMinMax", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FakeQuantWithMinMaxPerChannel", + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FakeQuantWithMinMaxVars", + "attributes": [ + { "name": "narrowRange", "type": "boolean", "default": false }, + { "name": "numBits", "type": "int32", "default": 0 }, + { "name": "num_bits", "type": "int64", "default": 0 }, + { "name": "narrow_range", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannel", + "attributes": [ + { "name": "num_bits", "type": "int64", "default": 0 }, + { "name": "narrow_range", "type": "boolean", "default": false } + ] + }, + { + "name": "FftImag" + }, + { + "name": "FftReal" + }, + { + "name": "Fill", + "attributes": [ + { "name": "dims", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Flatten", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FlattenGrad" + }, + { + "name": "Floor", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "FloorDiv", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "FloorMod", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "FormatTranspose", + "attributes": [ + { "name": "src_format", "type": "Format", "default": "NHWC" }, + { "name": "dst_format", "type": "Format", "default": "NHWC" } + ] + }, + { + "name": "FullConnection", + "category": "Layer", + "attributes": [ + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "useAxis", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "use_axis", "type": "boolean", "default": false }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FusedBatchNorm", + "category": "Normalization", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0.00001 }, + { "name": "momentum", "type": "float32", "default": 0.9 }, + { "name": "spatial", "type": "int32", "default": 1 }, + { "name": "mode", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" }, + { "name": "mean" }, + { "name": "var" } + ], + "outputs": [ + { "name": "output" }, + { "name": "scale" }, + { "name": "bias" }, + { "name": "mean" }, + { "name": "var" } + ] + }, + { + "name": "Gather", + "category": "Transform", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "batchDims", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "indices" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "GatherD" + }, + { + "name": "GatherNd", + "attributes": [ + { "name": "batchDims", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "indices" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "GenOP", + "attributes": [ + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "min_val", "type": "float32", "default": 0 }, + { "name": "max_val", "type": "float32", "default": 0 }, + { "name": "is_training", "type": "boolean", "default": false }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "stride", "type": "int64[]", "default": 0 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "pad_list", "type": "int64[]", "default": 0 }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "group", "type": "int64", "default": 0 }, + { "name": "in_channel", "type": "int64", "default": 0 }, + { "name": "out_channel", "type": "int64", "default": 0 }, + { "name": "eltwise_mode", "type": "EltwiseMode", "default": "PROD" }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "use_axis", "type": "boolean", "default": false }, + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "epsilon", "type": "float32", "default": 0.0001 }, + { "name": "momentum", "type": "float32", "default": 0.9 }, + { "name": "transpose_a", "type": "boolean", "default": false }, + { "name": "transpose_b", "type": "boolean", "default": false }, + { "name": "pad", "type": "int64[]", "default": 0 }, + { "name": "round_mode", "type": "RoundMode", "default": "FLOOR" }, + { "name": "global", "type": "boolean", "default": false }, + { "name": "channel_shared", "type": "boolean", "default": false }, + { "name": "axes", "type": "int64[]", "default": 0 }, + { "name": "keep_dims", "type": "boolean", "default": false }, + { "name": "reduce_mode", "type": "ReduceMode", "default": "ReduceMean" }, + { "name": "reduce_to_end", "type": "boolean", "default": false }, + { "name": "coeff", "type": "float32", "default": 0 } + ] + }, + { + "name": "GLU", + "attributes": [ + { "name": "axis", "type": "int64", "default": -1 } + ] + }, + { + "name": "Greater", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "GreaterEqual", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "GroupConv2DGradInput", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "channelIn", "type": "int32", "default": 0 }, + { "name": "channelOut", "type": "int32", "default": 0 }, + { "name": "kernelW", "type": "int32", "default": 0 }, + { "name": "kernelH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "dilateW", "type": "int32", "default": 0 }, + { "name": "dilateH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "hasBias", "type": "boolean", "default": false }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "dy" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "dx" } + ] + }, + { + "name": "GroupNormFusion", + "attributes": [ + { "name": "num_groups", "type": "int64", "default": 0 }, + { "name": "epsilon", "type": "float32", "default": 0.00001 }, + { "name": "affine", "type": "boolean", "default": true } + ] + }, + { + "name": "GRU", + "attributes": [ + { "name": "bidirectional", "type": "boolean", "default": false } + ] + }, + { + "name": "HashtableLookup", + "inputs": [ + { "name": "input" }, + { "name": "key" }, + { "name": "value" } + ], + "outputs": [ + { "name": "output" }, + { "name": "hit" } + ] + }, + { + "name": "Identity", + "category": "Control" + }, + { + "name": "If" + }, + { + "name": "InstanceNorm", + "category": "Normalization", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0.00001 } + ], + "inputs": [ + { "name": "src" }, + { "name": "scale" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "InvertPermutation" + }, + { + "name": "IsFinite" + }, + { + "name": "L2Norm", + "category": "Normalization", + "attributes": [ + { "name": "axis", "type": "int32[]" }, + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "ActivationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "L2NormalizeFusion", + "attributes": [ + { "name": "axis", "type": "int64[]", "default": 0 }, + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "LayerNorm", + "category": "Normalization", + "attributes": [ + { "name": "normalizedShape", "type": "int32[]" }, + { "name": "epsilon", "type": "float32", "default": 0.00001 }, + { "name": "elementwiseAffine", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "src" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LayerNormFusion", + "attributes": [ + { "name": "begin_norm_axis", "type": "int64", "default": 0 }, + { "name": "epsilon", "type": "float32", "default": 0.00001 }, + { "name": "elementwise_affine", "type": "boolean", "default": false }, + { "name": "begin_params_axis", "type": "int64", "default": 0 } + ] + }, + { + "name": "LayerNormGrad", + "attributes": [ + { "name": "begin_norm_axis", "type": "int64", "default": 0 }, + { "name": "begin_params_axis", "type": "int64", "default": 0 } + ] + }, + { + "name": "LeakyRelu", + "attributes": [ + { "name": "negative_slope", "type": "float32", "default": 0 } + ] + }, + { + "name": "LeakyReLU", + "category": "Activation", + "attributes": [ + { "name": "negativeSlope", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Less", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "LessEqual", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "LinSpace" + }, + { + "name": "LocalResponseNormalization", + "category": "Normalization", + "attributes": [ + { "name": "depth_radius", "type": "int32", "default": 0 }, + { "name": "bias", "type": "float32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Log", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Log1p" + }, + { + "name": "LogGrad", + "inputs": [ + { "name": "dy" }, + { "name": "in_x" } + ], + "outputs": [ + { "name": "dx" } + ] + }, + { + "name": "LogicalAnd", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "LogicalNot", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogicalOr", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "LogicalXor", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "LogSoftmax", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 } + ] + }, + { + "name": "Loop", + "attributes": [ + { "name": "subGraphIndex", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LpNormalization", + "category": "Normalization", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "p", "type": "int64", "default": 0 } + ] + }, + { + "name": "Lrn", + "category": "Normalization", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 }, + { "name": "bias", "type": "float32", "default": 1 }, + { "name": "size", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LRN", + "attributes": [ + { "name": "depth_radius", "type": "int64", "default": 0 }, + { "name": "bias", "type": "float32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 }, + { "name": "norm_region", "type": "string", "default": null } + ] + }, + { + "name": "LshProjection", + "attributes": [ + { "name": "type", "type": "LshProjectionType", "default": "UNKNOWN" } + ], + "inputs": [ + { "name": "hash" }, + { "name": "in_data" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Lstm", + "category": "Layer", + "attributes": [ + { "name": "bidirection", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight_i" }, + { "name": "weight_h" }, + { "name": "bias" }, + { "name": "hidden_state" }, + { "name": "cell_state" } + ], + "outputs": [ + { "name": "output" }, + { "name": "hidden_state" }, + { "name": "cell_state" } + ] + }, + { + "name": "LSTM", + "attributes": [ + { "name": "bidirectional", "type": "boolean", "default": false }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "input_size", "type": "int64", "default": 0 }, + { "name": "hidden_size", "type": "int64", "default": 0 }, + { "name": "num_layers", "type": "int64", "default": 0 }, + { "name": "num_directions", "type": "int64", "default": 0 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "zoneout_cell", "type": "float32", "default": 0 }, + { "name": "zoneout_hidden", "type": "float32", "default": 0 } + ] + }, + { + "name": "LSTMGrad", + "attributes": [ + { "name": "bidirectional", "type": "boolean", "default": false }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "input_size", "type": "int64", "default": 0 }, + { "name": "hidden_size", "type": "int64", "default": 0 }, + { "name": "num_layers", "type": "int64", "default": 0 }, + { "name": "num_directions", "type": "int64", "default": 0 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "zoneout_cell", "type": "float32", "default": 0 }, + { "name": "zoneout_hidden", "type": "float32", "default": 0 } + ] + }, + { + "name": "LSTMGradData", + "attributes": [ + { "name": "bidirectional", "type": "boolean", "default": false }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "input_size", "type": "int64", "default": 0 }, + { "name": "hidden_size", "type": "int64", "default": 0 }, + { "name": "num_layers", "type": "int64", "default": 0 }, + { "name": "num_directions", "type": "int64", "default": 0 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "zoneout_cell", "type": "float32", "default": 0 }, + { "name": "zoneout_hidden", "type": "float32", "default": 0 } + ] + }, + { + "name": "LSTMGradWeight", + "attributes": [ + { "name": "bidirectional", "type": "boolean", "default": false }, + { "name": "has_bias", "type": "boolean", "default": false }, + { "name": "input_size", "type": "int64", "default": 0 }, + { "name": "hidden_size", "type": "int64", "default": 0 }, + { "name": "num_layers", "type": "int64", "default": 0 }, + { "name": "num_directions", "type": "int64", "default": 0 }, + { "name": "dropout", "type": "float32", "default": 0 }, + { "name": "zoneout_cell", "type": "float32", "default": 0 }, + { "name": "zoneout_hidden", "type": "float32", "default": 0 } + ] + }, + { + "name": "make_tuple" + }, + { + "name": "MakeTuple" + }, + { + "name": "MatMul", + "attributes": [ + { "name": "broadcast", "type": "boolean", "default": false }, + { "name": "transposeA", "type": "boolean", "default": false }, + { "name": "transposeB", "type": "boolean", "default": false }, + { "name": "transpose_a", "type": "boolean", "default": false }, + { "name": "transpose_b", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "MatMulFusion", + "attributes": [ + { "name": "transpose_a", "type": "boolean", "default": false }, + { "name": "transpose_b", "type": "boolean", "default": false }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "MatrixDiag", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 }, + { "name": "numRows", "type": "int32", "default": 0 }, + { "name": "numCols", "type": "int32", "default": 0 }, + { "name": "paddingValue", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Maximum", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "MaximumGrad", + "attributes": [ + { "name": "grad_x", "type": "boolean", "default": false }, + { "name": "grad_y", "type": "boolean", "default": false } + ] + }, + { + "name": "MaxPoolFusion", + "category": "Pool", + "attributes": [ + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 }, + { "name": "pad", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "round_mode", "type": "RoundMode", "default": "FLOOR" }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "global", "type": "boolean", "default": false }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MaxPoolGrad", + "attributes": [ + { "name": "kernel_size", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 }, + { "name": "pad_mode", "type": "PadMode", "default": "PAD" }, + { "name": "format", "type": "Format", "default": "NCHW" } + ] + }, + { + "name": "Mean", + "attributes": [ + { "name": "axis", "type": "int32[]" }, + { "name": "keepDims", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Merge" + }, + { + "name": "Mfcc", + "attributes": [ + { "name": "freqUpperLimit", "type": "float32", "default": 0 }, + { "name": "freqLowerLimit", "type": "float32", "default": 0 }, + { "name": "filterBankChannelNum", "type": "int32", "default": 0 }, + { "name": "dctCoeffNum", "type": "int32", "default": 0 }, + { "name": "freq_upper_limit", "type": "float32", "default": 0 }, + { "name": "freq_lower_limit", "type": "float32", "default": 0 }, + { "name": "filter_bank_channel_num", "type": "int64", "default": 0 }, + { "name": "dct_coeff_num", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Min", + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Minimum", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "MinimumGrad", + "attributes": [ + { "name": "grad_x", "type": "boolean", "default": false }, + { "name": "grad_y", "type": "boolean", "default": false } + ] + }, + { + "name": "Mod" + }, + { + "name": "Mul", + "attributes": [ + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "MulFold" + }, + { + "name": "MulFusion", + "attributes": [ + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "MulGrad", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "dx1" }, + { "name": "dx2" } + ] + }, + { + "name": "Nchw2Nhwc", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Neg", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "NegGrad", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NetOutput" + }, + { + "name": "Nhwc2Nchw", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NLLLoss", + "attributes": [ + { "name": "reduction", "type": "Reduction", "default": "REDUCTION_SUM" } + ] + }, + { + "name": "NLLLossGrad", + "attributes": [ + { "name": "reduction", "type": "Reduction", "default": "REDUCTION_SUM" } + ] + }, + { + "name": "NonMaxSuppression", + "attributes": [ + { "name": "centerPointBox", "type": "int32", "default": 0 }, + { "name": "center_point_box", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "box" }, + { "name": "score" }, + { "name": "max_output" }, + { "name": "iou_threshold" }, + { "name": "core_threshold" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NonZero" + }, + { + "name": "NotEqual", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "OneHot", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "indices" }, + { "name": "depth" }, + { "name": "on_value" }, + { "name": "off_value" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "OnesLike" + }, + { + "name": "OnnxInt8Dequantize" + }, + { + "name": "OnnxInt8Quantize" + }, + { + "name": "Pad", + "category": "Tensor", + "attributes": [ + { "name": "paddings", "type": "int32[]" }, + { "name": "paddingMode", "type": "PaddingMode" }, + { "name": "constantValue", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "PadFusion", + "attributes": [ + { "name": "paddings", "type": "Vec2D" }, + { "name": "padding_mode", "type": "PaddingMode", "default": "CONSTANT" }, + { "name": "constant_value", "type": "float32", "default": 0 } + ] + }, + { + "name": "Partial", + "attributes": [ + { "name": "subGraphIndex", "type": "int32", "default": 0 } + ] + }, + { + "name": "PartialFusion", + "attributes": [ + { "name": "sub_graph_index", "type": "int64", "default": 0 } + ] + }, + { + "name": "Permute", + "attributes": [ + { "name": "order", "type": "int64[]" } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "poolingMode", "type": "PoolMode" }, + { "name": "global", "type": "boolean", "default": false }, + { "name": "windowW", "type": "int32", "default": 0 }, + { "name": "windowH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "roundMode", "type": "RoundMode" }, + { "name": "activationType", "type": "ActivationType" }, + { "name": "avgMode", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "PoolingGrad", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "poolingMode", "type": "PoolMode" }, + { "name": "global", "type": "boolean", "default": false }, + { "name": "windowW", "type": "int32", "default": 0 }, + { "name": "windowH", "type": "int32", "default": 0 }, + { "name": "strideW", "type": "int32", "default": 0 }, + { "name": "strideH", "type": "int32", "default": 0 }, + { "name": "padMode", "type": "PadMode" }, + { "name": "padUp", "type": "int32", "default": 0 }, + { "name": "padDown", "type": "int32", "default": 0 }, + { "name": "padLeft", "type": "int32", "default": 0 }, + { "name": "padRight", "type": "int32", "default": 0 }, + { "name": "roundMode", "type": "RoundMode" } + ], + "inputs": [ + { "name": "input" }, + { "name": "dx" }, + { "name": "dy" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Power", + "attributes": [ + { "name": "base", "type": "float32", "default": 0 }, + { "name": "scale", "type": "float32", "default": 0 }, + { "name": "shift", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "PowerGrad", + "attributes": [ + { "name": "power", "type": "float32", "default": 0 }, + { "name": "scale", "type": "float32", "default": 0 }, + { "name": "shift", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "dy" }, + { "name": "x" } + ], + "outputs": [ + { "name": "dx" } + ] + }, + { + "name": "PowFusion", + "attributes": [ + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "Prelu", + "category": "Activation", + "attributes": [ + { "name": "channelShared", "type": "boolean", "default": false }, + { "name": "slope", "type": "float32[]" } + ], + "inputs": [ + { "name": "input" }, + { "name": "negative_slope" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "PReLUFusion", + "attributes": [ + { "name": "channel_shared", "type": "boolean", "default": false } + ] + }, + { + "name": "PriorBox", + "attributes": [ + { "name": "min_sizes", "type": "int64[]", "default": 0 }, + { "name": "max_sizes", "type": "int64[]", "default": 0 }, + { "name": "aspect_ratios", "type": "float32[]", "default": 0 }, + { "name": "variances", "type": "float32[]", "default": 0 }, + { "name": "image_size_w", "type": "int64", "default": 0 }, + { "name": "image_size_h", "type": "int64", "default": 0 }, + { "name": "step_w", "type": "float32", "default": 0 }, + { "name": "step_h", "type": "float32", "default": 0 }, + { "name": "clip", "type": "boolean", "default": true }, + { "name": "flip", "type": "boolean", "default": true }, + { "name": "offset", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "fmap" }, + { "name": "image" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Proposal", + "attributes": [ + { "name": "feat_stride", "type": "float32", "default": 0 }, + { "name": "base_size", "type": "float32", "default": 0 }, + { "name": "min_size", "type": "float32", "default": 0 }, + { "name": "ratio", "type": "float32[]" }, + { "name": "scale", "type": "float32[]" }, + { "name": "pre_nms_topn", "type": "int32", "default": 0 }, + { "name": "post_nms_topn", "type": "int32", "default": 0 }, + { "name": "nms_thresh", "type": "float32", "default": 0 } + ] + }, + { + "name": "QuantDTypeCast", + "attributes": [ + { "name": "srcT", "type": "int32", "default": 0 }, + { "name": "dstT", "type": "int32", "default": 0 }, + { "name": "src_t", "type": "int64", "default": 0 }, + { "name": "dst_t", "type": "int64", "default": 0 }, + { "name": "axis", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "RaggedRange" + }, + { + "name": "RandomNormal", + "attributes": [ + { "name": "seed", "type": "float32", "default": 0 }, + { "name": "mean", "type": "float32", "default": 0 }, + { "name": "scale", "type": "float32", "default": 0 } + ] + }, + { + "name": "RandomStandardNormal", + "attributes": [ + { "name": "seed", "type": "int64", "default": 0 }, + { "name": "seed2", "type": "int64", "default": 0 } + ] + }, + { + "name": "Range", + "attributes": [ + { "name": "dType", "type": "int32", "default": 0 }, + { "name": "start", "type": "int64", "default": 0 }, + { "name": "limit", "type": "int64", "default": 0 }, + { "name": "delta", "type": "int64", "default": 0 }, + { "name": "d_type", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Rank", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "RealDiv", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "Reciprocal" + }, + { + "name": "Reduce", + "attributes": [ + { "name": "axes", "type": "int32[]" }, + { "name": "keepDims", "type": "int32", "default": 0 }, + { "name": "mode", "type": "ReduceMode" }, + { "name": "reduceToEnd", "type": "boolean", "default": false }, + { "name": "coeff", "type": "float32", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReduceFusion", + "attributes": [ + { "name": "keep_dims", "type": "boolean", "default": false }, + { "name": "mode", "type": "ReduceMode", "default": "ReduceMean" }, + { "name": "reduce_to_end", "type": "boolean", "default": false }, + { "name": "coeff", "type": "float32", "default": 0 } + ] + }, + { + "name": "ReduceScatter", + "attributes": [ + { "name": "group", "type": "string", "default": null }, + { "name": "mode", "type": "ReduceMode", "default": "ReduceMean" }, + { "name": "rank_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "shape", "type": "int64[]" } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Resize", + "attributes": [ + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "method", "type": "ResizeMethod", "default": 0 }, + { "name": "newHeight", "type": "int64", "default": 0 }, + { "name": "newWidth", "type": "int64", "default": 0 }, + { "name": "alignCorners", "type": "boolean", "default": false }, + { "name": "preserveAspectRatio", "type": "boolean", "default": false }, + { "name": "coordinateTransformMode", "type": "CoordinateTransformMode" }, + { "name": "cubicCoeff", "type": "float32", "default": 0 }, + { "name": "excludeOutside", "type": "int32", "default": 0 }, + { "name": "extrapolationValue", "float32": "float32", "default": 0 }, + { "name": "nearestMode", "type": "NearestMode" }, + { "name": "new_height", "type": "int64", "default": 0 }, + { "name": "new_width", "type": "int64", "default": 0 }, + { "name": "preserve_aspect_ratio", "type": "boolean", "default": false }, + { "name": "coordinate_transform_mode", "type": "CoordinateTransformMode", "default": "ASYMMETRIC" }, + { "name": "cubic_coeff", "type": "float32", "default": 0 }, + { "name": "exclude_outside", "type": "int64", "default": 0 }, + { "name": "extrapolation_value", "type": "float32", "default": 0 }, + { "name": "nearest_mode", "type": "NearestMode", "default": "NORMAL" } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ResizeGrad", + "attributes": [ + { "name": "method", "type": "ResizeMethod", "default": "LINEAR" }, + { "name": "align_corners", "type": "boolean", "default": false } + ] + }, + { + "name": "Return" + }, + { + "name": "Reverse", + "attributes": [ + { "name": "axis", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReverseSequence", + "attributes": [ + { "name": "seqAxis", "type": "int32", "default": 0 }, + { "name": "batchAxis", "type": "int32", "default": 0 }, + { "name": "seq_dim", "type": "int64", "default": 0 }, + { "name": "batch_dim", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "seq_lengths" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ReverseV2", + "attributes": [ + { "name": "axis", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "Rfft", + "attributes": [ + { "name": "fftLength", "type": "int32", "default": 0 }, + { "name": "fft_length", "type": "int64", "default": 0 } + ] + }, + { + "name": "ROIPooling", + "attributes": [ + { "name": "pooledH", "type": "int32", "default": 0 }, + { "name": "pooledW", "type": "int32", "default": 0 }, + { "name": "scale", "type": "float32", "default": 0 }, + { "name": "pooled_h", "type": "int64", "default": 0 }, + { "name": "pooled_w", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "roi" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Round", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Rsqrt", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RsqrtGrad" + }, + { + "name": "Scale", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "offset" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ScaleFusion", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "Scatter", + "inputs": [ + { "name": "shape" }, + { "name": "indices" }, + { "name": "update" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ScatterElements", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 } + ] + }, + { + "name": "ScatterNd" + }, + { + "name": "ScatterND", + "inputs": [ + { "name": "shape" }, + { "name": "indices" }, + { "name": "update" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ScatterNdUpdate" + }, + { + "name": "Select", + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Sgd", + "attributes": [ + { "name": "weightDecay", "type": "float32", "default": 0 }, + { "name": "dampening", "type": "float32", "default": 0 }, + { "name": "useNesterov", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "weight" }, + { "name": "gradient" }, + { "name": "learning_rate" }, + { "name": "accumulate" }, + { "name": "moment" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SGD", + "attributes": [ + { "name": "nesterov", "type": "boolean", "default": false }, + { "name": "dampening", "type": "float32", "default": 0 }, + { "name": "weight_decay", "type": "float32", "default": 0 } + ] + }, + { + "name": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SigmoidCrossEntropyWithLogits" + }, + { + "name": "SigmoidCrossEntropyWithLogitsGrad" + }, + { + "name": "Sin", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Size" + }, + { + "name": "SkipGram", + "attributes": [ + { "name": "includeAllGrams", "type": "boolean", "default": false }, + { "name": "maxSkipSize", "type": "int32", "default": 0 }, + { "name": "ngramSize", "type": "int32", "default": 0 }, + { "name": "include_all_grams", "type": "boolean", "default": false }, + { "name": "max_skip_size", "type": "int64", "default": 0 }, + { "name": "ngram_size", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "attributes": [ + { "name": "format", "type": "Format" }, + { "name": "axes", "type": "int32[]" }, + { "name": "begin", "type": "int32[]" }, + { "name": "size", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SliceFusion", + "attributes": [ + { "name": "axes", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "SmoothL1Loss", + "attributes": [ + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "SmoothL1LossGrad", + "attributes": [ + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "SoftMax", + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": -1 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SoftmaxCrossEntropy", + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" }, + { "name": "grads" } + ] + }, + { + "name": "SoftmaxCrossEntropyWithLogits", + "category": "Activation" + }, + { + "name": "SpaceToBatch", + "attributes": [ + { "name": "blockShape", "type": "int32[]" }, + { "name": "paddings", "type": "Vec2D" }, + { "name": "block_size", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "SpaceToBatchND", + "attributes": [ + { "name": "blockShape", "type": "int32[]" }, + { "name": "paddings", "type": "Vec2D" }, + { "name": "block_shape", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SpaceToDepth", + "attributes": [ + { "name": "blockSize", "type": "int32", "default": 0 }, + { "name": "format", "type": "Format", "default": "NCHW" }, + { "name": "block_size", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SparseFillEmptyRows" + }, + { + "name": "SparseReshape" + }, + { + "name": "SparseSegmentSum" + }, + { + "name": "SparseSoftmaxCrossEntropyWithLogits", + "attributes": [ + { "name": "grad", "type": "boolean", "default": false }, + { "name": "is_grad", "type": "boolean", "default": false } + ] + }, + { + "name": "SparseToDense", + "attributes": [ + { "name": "validateIndices", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "sparse_indices" }, + { "name": "out_shape" }, + { "name": "sparse_values" }, + { "name": "default_value" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Splice", + "attributes": [ + { "name": "context", "type": "int64[]", "default": 0 }, + { "name": "forward_indexes", "type": "int64[]", "default": 0 }, + { "name": "output_dim", "type": "int64", "default": 0 } + ] + }, + { + "name": "Split", + "category": "Tensor", + "attributes": [ + { "name": "numberSplit", "type": "int32", "default": 0 }, + { "name": "sizeSplits", "type": "int32[]" }, + { "name": "splitDim", "type": "int32", "default": 0 }, + { "name": "output_num", "type": "int64", "default": 0 }, + { "name": "size_splits", "type": "int64[]", "default": 0 }, + { "name": "axis", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "SplitWithOverlap", + "attributes": [ + { "name": "number_split", "type": "int64", "default": 0 }, + { "name": "ratio", "type": "int64[]", "default": 0 }, + { "name": "extend_top", "type": "int64[]", "default": 0 }, + { "name": "extend_bottom", "type": "int64[]", "default": 0 }, + { "name": "split_dim", "type": "int64", "default": 0 }, + { "name": "stride", "type": "int64", "default": 0 }, + { "name": "pad_top", "type": "int64", "default": 0 }, + { "name": "trans_format", "type": "boolean", "default": false }, + { "name": "split_stride", "type": "int64", "default": 0 } + ] + }, + { + "name": "Sqrt", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SqrtGrad" + }, + { + "name": "Square", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SquaredDifference", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "Squeeze", + "category": "Transform", + "attributes": [ + { "name": "axis", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Stack", + "attributes": [ + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "n", "type": "int32", "default": 0 }, + { "name": "isScale", "type": "int32[]" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "attributes": [ + { "name": "beginMask", "type": "int32", "default": 0 }, + { "name": "endMask", "type": "int32", "default": 0 }, + { "name": "ellipsisMask", "type": "int32", "default": 0 }, + { "name": "newAxisMask", "type": "int32", "default": 0 }, + { "name": "shrinkAxisMask", "type": "int32", "default": 0 }, + { "name": "begin", "type": "int32[]" }, + { "name": "end", "type": "int32[]" }, + { "name": "stride", "type": "int32[]" }, + { "name": "isScale", "type": "int32[]" }, + { "name": "begin_mask", "type": "int64", "default": 0 }, + { "name": "end_mask", "type": "int64", "default": 0 }, + { "name": "ellipsis_mask", "type": "int64", "default": 0 }, + { "name": "new_axis_mask", "type": "int64", "default": 0 }, + { "name": "shrink_axis_mask", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "begins" }, + { "name": "ends" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "StridedSliceGrad", + "attributes": [ + { "name": "begin_mask", "type": "int64", "default": 0 }, + { "name": "end_mask", "type": "int64", "default": 0 }, + { "name": "ellipsis_mask", "type": "int64", "default": 0 }, + { "name": "new_axis_mask", "type": "int64", "default": 0 }, + { "name": "shrink_axis_mask", "type": "int64", "default": 0 } + ] + }, + { + "name": "Sub", + "attributes": [ + { "name": "activationType", "type": "ActivationType" } + ], + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "SubFusion", + "attributes": [ + { "name": "activation_type", "type": "ActivationType", "default": "NO_ACTIVATION" } + ] + }, + { + "name": "SubGrad", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "dx1" }, + { "name": "dx2" } + ] + }, + { + "name": "Switch" + }, + { + "name": "SwitchLayer" + }, + { + "name": "Tan", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "TensorArray", + "attributes": [ + { "name": "dynamic_size", "type": "boolean", "default": false }, + { "name": "identical_element_shapes", "type": "boolean", "default": false }, + { "name": "element_shape", "type": "int32[]", "default": 0 }, + { "name": "data_type", "type": "int32", "default": 0 } + ] + }, + { + "name": "TensorArrayRead" + }, + { + "name": "TensorArrayWrite" + }, + { + "name": "TensorListFromTensor", + "attributes": [ + { "name": "element_dtype", "type": "int64", "default": 0 }, + { "name": "shape_type", "type": "int64", "default": 0 } + ] + }, + { + "name": "TensorListGetItem", + "attributes": [ + { "name": "elementDType", "type": "int32", "default": 0 }, + { "name": "element_dtype", "type": "int64", "default": 0 } + ] + }, + { + "name": "TensorListReserve", + "attributes": [ + { "name": "elementDType", "type": "int32", "default": 0 }, + { "name": "element_dtype", "type": "int64", "default": 0 }, + { "name": "shape_type", "type": "int64", "default": 0 } + ] + }, + { + "name": "TensorListSetItem", + "attributes": [ + { "name": "element_dtype", "type": "int64", "default": 0 } + ] + }, + { + "name": "TensorListStack", + "attributes": [ + { "name": "numElements", "type": "int32", "default": 0 }, + { "name": "elementDType", "type": "int32", "default": 0 }, + { "name": "num_elements", "type": "int64", "default": 0 }, + { "name": "element_dtype", "type": "int64", "default": 0 } + ] + }, + { + "name": "TensorScatterAdd" + }, + { + "name": "TfReduce", + "attributes": [ + { "name": "type", "type": "ReduceType" } + ] + }, + { + "name": "Tile", + "attributes": [ + { "name": "multiples", "type": "int32[]" }, + { "name": "dims", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "TileFusion", + "attributes": [ + { "name": "dims", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "ToFormat", + "attributes": [ + { "name": "srcT", "type": "int32", "default": 0 }, + { "name": "dstT", "type": "int32", "default": 0 } + ] + }, + { + "name": "TopK", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 }, + { "name": "sorted", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output_data" }, + { "name": "output_index" } + ] + }, + { + "name": "TopKFusion", + "attributes": [ + { "name": "sorted", "type": "boolean", "default": true }, + { "name": "axis", "type": "int64", "default": 0 }, + { "name": "largest", "type": "int64", "default": 0 } + ] + }, + { + "name": "Transpose", + "category": "Transform", + "attributes": [ + { "name": "perm", "type": "int32[]" }, + { "name": "conjugate", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "perm" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Tril" + }, + { + "name": "Triu" + }, + { + "name": "TupleGetItem", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "UniformReal", + "attributes": [ + { "name": "seed", "type": "int64", "default": 0 }, + { "name": "seed2", "type": "int64", "default": 0 } + ] + }, + { + "name": "Unique", + "attributes": [ + { "name": "outType", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output0" }, + { "name": "output1" } + ] + }, + { + "name": "UnsortedSegmentSum", + "attributes": [ + { "name": "numSegments", "type": "int32", "default": 0 } + ] + }, + { + "name": "Unsqueeze", + "category": "Transform", + "attributes": [ + { "name": "axis", "type": "int64[]", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Unstack", + "attributes": [ + { "name": "num", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int64", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "Upsample", + "category": "Data", + "attributes": [ + { "name": "mode", "type": "string" }, + { "name": "scales", "type": "float32[]" } + ], + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "Where", + "attributes": [ + { "name": "condition", "type": "boolean[]" } + ], + "inputs": [ + { "name": "input" }, + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "While", + "attributes": [ + { "name": "condSubgraphIndex", "type": "int32", "default": 0 }, + { "name": "bodySubgraphIndex", "type": "int32", "default": 0 }, + { "name": "cond_subgraph_index", "type": "int64", "default": 0 }, + { "name": "body_subgraph_index", "type": "int64", "default": 0 } + ] + }, + { + "name": "ZerosLike", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } +] \ No newline at end of file diff --git a/mslite-schema.js b/mslite-schema.js new file mode 100644 index 00000000000..f33d7945b74 --- /dev/null +++ b/mslite-schema.js @@ -0,0 +1,4602 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('mslite'); + +$root.mindspore = $root.mindspore || {}; + +$root.mindspore.schema = $root.mindspore.schema || {}; + +$root.mindspore.schema.ResizeMethod = { + UNKNOWN: -1, + LINEAR: 0, + NEAREST: 1, + CUBIC: 2 +}; + +$root.mindspore.schema.CoordinateTransformMode = { + ASYMMETRIC: 0, + ALIGN_CORNERS: 1, + HALF_PIXEL: 2 +}; + +$root.mindspore.schema.NearestMode = { + NORMAL: 0, + ROUND_HALF_DOWN: 1, + ROUND_HALF_UP: 2, + FLOOR: 3, + CEIL: 4 +}; + +$root.mindspore.schema.Format = { + NCHW: 0, + NHWC: 1, + NHWC4: 2, + HWKC: 3, + HWCK: 4, + KCHW: 5, + CKHW: 6, + KHWC: 7, + CHWK: 8, + HW: 9, + HW4: 10, + NC: 11, + NC4: 12, + NC4HW4: 13, + NUM_OF_FORMAT: 14, + NCDHW: 15, + NWC: 16, + NCW: 17, + NC8HW8: 18 +}; + +$root.mindspore.schema.ActivationType = { + NO_ACTIVATION: 0, + RELU: 1, + SIGMOID: 2, + RELU6: 3, + ELU: 4, + LEAKY_RELU: 5, + ABS: 6, + RELU1: 7, + SOFTSIGN: 8, + SOFTPLUS: 9, + TANH: 10, + SELU: 11, + HSWISH: 12, + HSIGMOID: 13, + THRESHOLDRELU: 14, + LINEAR: 15, + HARD_TANH: 16, + SIGN: 17, + SWISH: 18, + GELU: 19, + FAST_GELU: 20, + UNKNOWN: 21 +}; + +$root.mindspore.schema.ReduceMode = { + ReduceMean: 0, + ReduceMax: 1, + ReduceMin: 2, + ReduceProd: 3, + ReduceSum: 4, + ReduceSumSquare: 5, + ReduceASum: 6, + ReduceAll: 7, + ReduceL2: 8 +}; + +$root.mindspore.schema.PoolMode = { + MAX_POOLING: 0, + MEAN_POOLING: 1 +}; + +$root.mindspore.schema.EltwiseMode = { + PROD: 0, + SUM: 1, + MAXIMUM: 2, + UNKNOWN: 3 +}; + +$root.mindspore.schema.PadMode = { + PAD: 0, + SAME: 1, + VALID: 2 +}; + +$root.mindspore.schema.RoundMode = { + FLOOR: 0, + CEIL: 1 +}; + +$root.mindspore.schema.PaddingMode = { + CONSTANT: 0, + REFLECT: 1, + SYMMETRIC: 2, + MODE_RESERVED: 3 +}; + +$root.mindspore.schema.LshProjectionType = { + UNKNOWN: 0, + SPARSE: 1, + DENSE: 2 +}; + +$root.mindspore.schema.Reduction = { + REDUCTION_SUM: 0, + MEAN: 1, + NONE: 2 +}; + +$root.mindspore.schema.Vec = class Vec { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Vec(); + $.data = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Vec(); + $.data = reader.array(json.data); + return $; + } +}; + +$root.mindspore.schema.Vec2D = class Vec2D { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Vec2D(); + $.data = reader.tableArray(position, 4, $root.mindspore.schema.Vec.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Vec2D(); + $.data = reader.objectArray(json.data, $root.mindspore.schema.Vec.decodeText); + return $; + } +}; + +$root.mindspore.schema.Attribute = class Attribute { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Attribute(); + $.name = reader.string_(position, 4, null); + $.data = reader.typedArray(position, 6, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Attribute(); + $.name = reader.value(json.name, null); + $.data = reader.typedArray(json.data, Uint8Array); + return $; + } +}; + +$root.mindspore.schema.PrimitiveType = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.mindspore.schema.Abs.decode(reader, position); + case 2: return $root.mindspore.schema.Activation.decode(reader, position); + case 3: return $root.mindspore.schema.ActivationGrad.decode(reader, position); + case 4: return $root.mindspore.schema.Adam.decode(reader, position); + case 5: return $root.mindspore.schema.AddFusion.decode(reader, position); + case 6: return $root.mindspore.schema.AdderFusion.decode(reader, position); + case 7: return $root.mindspore.schema.AddGrad.decode(reader, position); + case 8: return $root.mindspore.schema.AddN.decode(reader, position); + case 9: return $root.mindspore.schema.All.decode(reader, position); + case 10: return $root.mindspore.schema.ApplyMomentum.decode(reader, position); + case 11: return $root.mindspore.schema.ArgMaxFusion.decode(reader, position); + case 12: return $root.mindspore.schema.ArgMinFusion.decode(reader, position); + case 13: return $root.mindspore.schema.Assert.decode(reader, position); + case 14: return $root.mindspore.schema.Assign.decode(reader, position); + case 15: return $root.mindspore.schema.AssignAdd.decode(reader, position); + case 16: return $root.mindspore.schema.AudioSpectrogram.decode(reader, position); + case 17: return $root.mindspore.schema.AvgPoolFusion.decode(reader, position); + case 18: return $root.mindspore.schema.AvgPoolGrad.decode(reader, position); + case 19: return $root.mindspore.schema.BatchNorm.decode(reader, position); + case 20: return $root.mindspore.schema.BatchNormGrad.decode(reader, position); + case 21: return $root.mindspore.schema.BatchToSpace.decode(reader, position); + case 22: return $root.mindspore.schema.BatchToSpaceND.decode(reader, position); + case 23: return $root.mindspore.schema.BiasAdd.decode(reader, position); + case 24: return $root.mindspore.schema.BinaryCrossEntropy.decode(reader, position); + case 25: return $root.mindspore.schema.BinaryCrossEntropyGrad.decode(reader, position); + case 26: return $root.mindspore.schema.BiasAddGrad.decode(reader, position); + case 27: return $root.mindspore.schema.BroadcastTo.decode(reader, position); + case 28: return $root.mindspore.schema.Cast.decode(reader, position); + case 29: return $root.mindspore.schema.Ceil.decode(reader, position); + case 30: return $root.mindspore.schema.Clip.decode(reader, position); + case 31: return $root.mindspore.schema.Concat.decode(reader, position); + case 32: return $root.mindspore.schema.Attention.decode(reader, position); + case 33: return $root.mindspore.schema.Conv2DBackpropFilterFusion.decode(reader, position); + case 34: return $root.mindspore.schema.Conv2DBackpropInputFusion.decode(reader, position); + case 35: return $root.mindspore.schema.Conv2DFusion.decode(reader, position); + case 36: return $root.mindspore.schema.Conv2dTransposeFusion.decode(reader, position); + case 37: return $root.mindspore.schema.Cos.decode(reader, position); + case 38: return $root.mindspore.schema.ConstantOfShape.decode(reader, position); + case 39: return $root.mindspore.schema.Crop.decode(reader, position); + case 40: return $root.mindspore.schema.CustomExtractFeatures.decode(reader, position); + case 41: return $root.mindspore.schema.CustomNormalize.decode(reader, position); + case 42: return $root.mindspore.schema.CustomPredict.decode(reader, position); + case 43: return $root.mindspore.schema.DeConv2DGradFilter.decode(reader, position); + case 44: return $root.mindspore.schema.Depend.decode(reader, position); + case 45: return $root.mindspore.schema.DepthToSpace.decode(reader, position); + case 46: return $root.mindspore.schema.DetectionPostProcess.decode(reader, position); + case 47: return $root.mindspore.schema.DivFusion.decode(reader, position); + case 48: return $root.mindspore.schema.DivGrad.decode(reader, position); + case 49: return $root.mindspore.schema.Dropout.decode(reader, position); + case 50: return $root.mindspore.schema.DropoutGrad.decode(reader, position); + case 51: return $root.mindspore.schema.Elu.decode(reader, position); + case 52: return $root.mindspore.schema.Eltwise.decode(reader, position); + case 53: return $root.mindspore.schema.Equal.decode(reader, position); + case 54: return $root.mindspore.schema.EmbeddingLookupFusion.decode(reader, position); + case 55: return $root.mindspore.schema.ExpFusion.decode(reader, position); + case 56: return $root.mindspore.schema.ExpandDims.decode(reader, position); + case 57: return $root.mindspore.schema.FakeQuantWithMinMaxVars.decode(reader, position); + case 58: return $root.mindspore.schema.FakeQuantWithMinMaxVarsPerChannel.decode(reader, position); + case 59: return $root.mindspore.schema.FftReal.decode(reader, position); + case 60: return $root.mindspore.schema.FftImag.decode(reader, position); + case 61: return $root.mindspore.schema.Flatten.decode(reader, position); + case 62: return $root.mindspore.schema.FlattenGrad.decode(reader, position); + case 63: return $root.mindspore.schema.Floor.decode(reader, position); + case 64: return $root.mindspore.schema.FloorDiv.decode(reader, position); + case 65: return $root.mindspore.schema.FloorMod.decode(reader, position); + case 66: return $root.mindspore.schema.Fill.decode(reader, position); + case 67: return $root.mindspore.schema.FullConnection.decode(reader, position); + case 68: return $root.mindspore.schema.FusedBatchNorm.decode(reader, position); + case 69: return $root.mindspore.schema.Gather.decode(reader, position); + case 70: return $root.mindspore.schema.GatherNd.decode(reader, position); + case 71: return $root.mindspore.schema.Greater.decode(reader, position); + case 72: return $root.mindspore.schema.GreaterEqual.decode(reader, position); + case 73: return $root.mindspore.schema.HashtableLookup.decode(reader, position); + case 74: return $root.mindspore.schema.InstanceNorm.decode(reader, position); + case 75: return $root.mindspore.schema.LayerNormFusion.decode(reader, position); + case 76: return $root.mindspore.schema.LeakyRelu.decode(reader, position); + case 77: return $root.mindspore.schema.Less.decode(reader, position); + case 78: return $root.mindspore.schema.LessEqual.decode(reader, position); + case 79: return $root.mindspore.schema.Log.decode(reader, position); + case 80: return $root.mindspore.schema.LogGrad.decode(reader, position); + case 81: return $root.mindspore.schema.LogicalAnd.decode(reader, position); + case 82: return $root.mindspore.schema.LogicalNot.decode(reader, position); + case 83: return $root.mindspore.schema.LogicalOr.decode(reader, position); + case 84: return $root.mindspore.schema.LpNormalization.decode(reader, position); + case 85: return $root.mindspore.schema.LRN.decode(reader, position); + case 86: return $root.mindspore.schema.LshProjection.decode(reader, position); + case 87: return $root.mindspore.schema.LSTM.decode(reader, position); + case 88: return $root.mindspore.schema.L2NormalizeFusion.decode(reader, position); + case 89: return $root.mindspore.schema.MatMulFusion.decode(reader, position); + case 90: return $root.mindspore.schema.Maximum.decode(reader, position); + case 91: return $root.mindspore.schema.MaximumGrad.decode(reader, position); + case 92: return $root.mindspore.schema.MaxPoolFusion.decode(reader, position); + case 93: return $root.mindspore.schema.MaxPoolGrad.decode(reader, position); + case 94: return $root.mindspore.schema.SwitchLayer.decode(reader, position); + case 95: return $root.mindspore.schema.Mfcc.decode(reader, position); + case 96: return $root.mindspore.schema.Minimum.decode(reader, position); + case 97: return $root.mindspore.schema.MinimumGrad.decode(reader, position); + case 98: return $root.mindspore.schema.Mod.decode(reader, position); + case 99: return $root.mindspore.schema.MulFusion.decode(reader, position); + case 100: return $root.mindspore.schema.MulGrad.decode(reader, position); + case 101: return $root.mindspore.schema.Neg.decode(reader, position); + case 102: return $root.mindspore.schema.NegGrad.decode(reader, position); + case 103: return $root.mindspore.schema.NotEqual.decode(reader, position); + case 104: return $root.mindspore.schema.NonMaxSuppression.decode(reader, position); + case 105: return $root.mindspore.schema.OneHot.decode(reader, position); + case 106: return $root.mindspore.schema.OnesLike.decode(reader, position); + case 107: return $root.mindspore.schema.PadFusion.decode(reader, position); + case 108: return $root.mindspore.schema.PartialFusion.decode(reader, position); + case 109: return $root.mindspore.schema.PowerGrad.decode(reader, position); + case 110: return $root.mindspore.schema.PowFusion.decode(reader, position); + case 111: return $root.mindspore.schema.PriorBox.decode(reader, position); + case 112: return $root.mindspore.schema.PReLUFusion.decode(reader, position); + case 113: return $root.mindspore.schema.QuantDTypeCast.decode(reader, position); + case 114: return $root.mindspore.schema.Rank.decode(reader, position); + case 115: return $root.mindspore.schema.Range.decode(reader, position); + case 116: return $root.mindspore.schema.Reciprocal.decode(reader, position); + case 117: return $root.mindspore.schema.RealDiv.decode(reader, position); + case 118: return $root.mindspore.schema.ReduceFusion.decode(reader, position); + case 119: return $root.mindspore.schema.Reshape.decode(reader, position); + case 120: return $root.mindspore.schema.Resize.decode(reader, position); + case 121: return $root.mindspore.schema.ReverseSequence.decode(reader, position); + case 122: return $root.mindspore.schema.ReverseV2.decode(reader, position); + case 123: return $root.mindspore.schema.Rfft.decode(reader, position); + case 124: return $root.mindspore.schema.ROIPooling.decode(reader, position); + case 125: return $root.mindspore.schema.Round.decode(reader, position); + case 126: return $root.mindspore.schema.Rsqrt.decode(reader, position); + case 127: return $root.mindspore.schema.ScaleFusion.decode(reader, position); + case 128: return $root.mindspore.schema.ScatterNd.decode(reader, position); + case 129: return $root.mindspore.schema.SGD.decode(reader, position); + case 130: return $root.mindspore.schema.Shape.decode(reader, position); + case 131: return $root.mindspore.schema.SigmoidCrossEntropyWithLogits.decode(reader, position); + case 132: return $root.mindspore.schema.SigmoidCrossEntropyWithLogitsGrad.decode(reader, position); + case 133: return $root.mindspore.schema.Sin.decode(reader, position); + case 134: return $root.mindspore.schema.SkipGram.decode(reader, position); + case 135: return $root.mindspore.schema.SliceFusion.decode(reader, position); + case 136: return $root.mindspore.schema.SmoothL1Loss.decode(reader, position); + case 137: return $root.mindspore.schema.SmoothL1LossGrad.decode(reader, position); + case 138: return $root.mindspore.schema.Softmax.decode(reader, position); + case 139: return $root.mindspore.schema.SoftmaxCrossEntropyWithLogits.decode(reader, position); + case 140: return $root.mindspore.schema.SpaceToBatch.decode(reader, position); + case 141: return $root.mindspore.schema.SpaceToBatchND.decode(reader, position); + case 142: return $root.mindspore.schema.SpaceToDepth.decode(reader, position); + case 143: return $root.mindspore.schema.SparseSoftmaxCrossEntropyWithLogits.decode(reader, position); + case 144: return $root.mindspore.schema.SparseToDense.decode(reader, position); + case 145: return $root.mindspore.schema.Split.decode(reader, position); + case 146: return $root.mindspore.schema.Sqrt.decode(reader, position); + case 147: return $root.mindspore.schema.Squeeze.decode(reader, position); + case 148: return $root.mindspore.schema.Square.decode(reader, position); + case 149: return $root.mindspore.schema.SquaredDifference.decode(reader, position); + case 150: return $root.mindspore.schema.Stack.decode(reader, position); + case 151: return $root.mindspore.schema.StridedSlice.decode(reader, position); + case 152: return $root.mindspore.schema.SubFusion.decode(reader, position); + case 153: return $root.mindspore.schema.SubGrad.decode(reader, position); + case 154: return $root.mindspore.schema.Switch.decode(reader, position); + case 155: return $root.mindspore.schema.TensorListFromTensor.decode(reader, position); + case 156: return $root.mindspore.schema.TensorListGetItem.decode(reader, position); + case 157: return $root.mindspore.schema.TensorListReserve.decode(reader, position); + case 158: return $root.mindspore.schema.TensorListSetItem.decode(reader, position); + case 159: return $root.mindspore.schema.TensorListStack.decode(reader, position); + case 160: return $root.mindspore.schema.TileFusion.decode(reader, position); + case 161: return $root.mindspore.schema.TopKFusion.decode(reader, position); + case 162: return $root.mindspore.schema.Transpose.decode(reader, position); + case 163: return $root.mindspore.schema.Unique.decode(reader, position); + case 164: return $root.mindspore.schema.UnsortedSegmentSum.decode(reader, position); + case 165: return $root.mindspore.schema.Unsqueeze.decode(reader, position); + case 166: return $root.mindspore.schema.Unstack.decode(reader, position); + case 167: return $root.mindspore.schema.LSTMGrad.decode(reader, position); + case 168: return $root.mindspore.schema.Where.decode(reader, position); + case 169: return $root.mindspore.schema.ZerosLike.decode(reader, position); + case 170: return $root.mindspore.schema.Select.decode(reader, position); + case 171: return $root.mindspore.schema.ScatterNdUpdate.decode(reader, position); + case 172: return $root.mindspore.schema.GRU.decode(reader, position); + case 173: return $root.mindspore.schema.NonZero.decode(reader, position); + case 174: return $root.mindspore.schema.InvertPermutation.decode(reader, position); + case 175: return $root.mindspore.schema.Size.decode(reader, position); + case 176: return $root.mindspore.schema.RandomStandardNormal.decode(reader, position); + case 177: return $root.mindspore.schema.CropAndResize.decode(reader, position); + case 178: return $root.mindspore.schema.Erf.decode(reader, position); + case 179: return $root.mindspore.schema.StridedSliceGrad.decode(reader, position); + case 180: return $root.mindspore.schema.IsFinite.decode(reader, position); + case 181: return $root.mindspore.schema.LinSpace.decode(reader, position); + case 182: return $root.mindspore.schema.UniformReal.decode(reader, position); + case 183: return $root.mindspore.schema.AbsGrad.decode(reader, position); + case 184: return $root.mindspore.schema.RsqrtGrad.decode(reader, position); + case 185: return $root.mindspore.schema.SqrtGrad.decode(reader, position); + case 186: return $root.mindspore.schema.LayerNormGrad.decode(reader, position); + case 187: return $root.mindspore.schema.ResizeGrad.decode(reader, position); + case 188: return $root.mindspore.schema.Splice.decode(reader, position); + case 189: return $root.mindspore.schema.LogSoftmax.decode(reader, position); + case 190: return $root.mindspore.schema.Call.decode(reader, position); + case 191: return $root.mindspore.schema.Custom.decode(reader, position); + case 192: return $root.mindspore.schema.CumSum.decode(reader, position); + case 193: return $root.mindspore.schema.SplitWithOverlap.decode(reader, position); + case 194: return $root.mindspore.schema.GenOP.decode(reader, position); + case 195: return $root.mindspore.schema.RaggedRange.decode(reader, position); + case 196: return $root.mindspore.schema.GLU.decode(reader, position); + case 197: return $root.mindspore.schema.TensorArray.decode(reader, position); + case 198: return $root.mindspore.schema.TensorArrayRead.decode(reader, position); + case 199: return $root.mindspore.schema.TensorArrayWrite.decode(reader, position); + case 200: return $root.mindspore.schema.Affine.decode(reader, position); + case 201: return $root.mindspore.schema.AllGather.decode(reader, position); + case 202: return $root.mindspore.schema.ReduceScatter.decode(reader, position); + case 203: return $root.mindspore.schema.DynamicQuant.decode(reader, position); + case 204: return $root.mindspore.schema.LSTMGradData.decode(reader, position); + case 205: return $root.mindspore.schema.LSTMGradWeight.decode(reader, position); + case 206: return $root.mindspore.schema.RandomNormal.decode(reader, position); + case 207: return $root.mindspore.schema.NLLLoss.decode(reader, position); + case 208: return $root.mindspore.schema.NLLLossGrad.decode(reader, position); + case 209: return $root.mindspore.schema.FormatTranspose.decode(reader, position); + case 210: return $root.mindspore.schema.GatherD.decode(reader, position); + case 211: return $root.mindspore.schema.GroupNormFusion.decode(reader, position); + case 212: return $root.mindspore.schema.Log1p.decode(reader, position); + case 213: return $root.mindspore.schema.TensorScatterAdd.decode(reader, position); + case 214: return $root.mindspore.schema.SparseFillEmptyRows.decode(reader, position); + case 215: return $root.mindspore.schema.SparseReshape.decode(reader, position); + case 216: return $root.mindspore.schema.SparseSegmentSum.decode(reader, position); + case 217: return $root.mindspore.schema.ScatterElements.decode(reader, position); + case 218: return $root.mindspore.schema.Triu.decode(reader, position); + case 219: return $root.mindspore.schema.Tril.decode(reader, position); + case 220: return $root.mindspore.schema.AdamWeightDecay.decode(reader, position); + case 221: return $root.mindspore.schema.FillV2.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'Abs': return $root.mindspore.schema.Abs.decodeText(reader, json); + case 'Activation': return $root.mindspore.schema.Activation.decodeText(reader, json); + case 'ActivationGrad': return $root.mindspore.schema.ActivationGrad.decodeText(reader, json); + case 'Adam': return $root.mindspore.schema.Adam.decodeText(reader, json); + case 'AddFusion': return $root.mindspore.schema.AddFusion.decodeText(reader, json); + case 'AdderFusion': return $root.mindspore.schema.AdderFusion.decodeText(reader, json); + case 'AddGrad': return $root.mindspore.schema.AddGrad.decodeText(reader, json); + case 'AddN': return $root.mindspore.schema.AddN.decodeText(reader, json); + case 'All': return $root.mindspore.schema.All.decodeText(reader, json); + case 'ApplyMomentum': return $root.mindspore.schema.ApplyMomentum.decodeText(reader, json); + case 'ArgMaxFusion': return $root.mindspore.schema.ArgMaxFusion.decodeText(reader, json); + case 'ArgMinFusion': return $root.mindspore.schema.ArgMinFusion.decodeText(reader, json); + case 'Assert': return $root.mindspore.schema.Assert.decodeText(reader, json); + case 'Assign': return $root.mindspore.schema.Assign.decodeText(reader, json); + case 'AssignAdd': return $root.mindspore.schema.AssignAdd.decodeText(reader, json); + case 'AudioSpectrogram': return $root.mindspore.schema.AudioSpectrogram.decodeText(reader, json); + case 'AvgPoolFusion': return $root.mindspore.schema.AvgPoolFusion.decodeText(reader, json); + case 'AvgPoolGrad': return $root.mindspore.schema.AvgPoolGrad.decodeText(reader, json); + case 'BatchNorm': return $root.mindspore.schema.BatchNorm.decodeText(reader, json); + case 'BatchNormGrad': return $root.mindspore.schema.BatchNormGrad.decodeText(reader, json); + case 'BatchToSpace': return $root.mindspore.schema.BatchToSpace.decodeText(reader, json); + case 'BatchToSpaceND': return $root.mindspore.schema.BatchToSpaceND.decodeText(reader, json); + case 'BiasAdd': return $root.mindspore.schema.BiasAdd.decodeText(reader, json); + case 'BinaryCrossEntropy': return $root.mindspore.schema.BinaryCrossEntropy.decodeText(reader, json); + case 'BinaryCrossEntropyGrad': return $root.mindspore.schema.BinaryCrossEntropyGrad.decodeText(reader, json); + case 'BiasAddGrad': return $root.mindspore.schema.BiasAddGrad.decodeText(reader, json); + case 'BroadcastTo': return $root.mindspore.schema.BroadcastTo.decodeText(reader, json); + case 'Cast': return $root.mindspore.schema.Cast.decodeText(reader, json); + case 'Ceil': return $root.mindspore.schema.Ceil.decodeText(reader, json); + case 'Clip': return $root.mindspore.schema.Clip.decodeText(reader, json); + case 'Concat': return $root.mindspore.schema.Concat.decodeText(reader, json); + case 'Attention': return $root.mindspore.schema.Attention.decodeText(reader, json); + case 'Conv2DBackpropFilterFusion': return $root.mindspore.schema.Conv2DBackpropFilterFusion.decodeText(reader, json); + case 'Conv2DBackpropInputFusion': return $root.mindspore.schema.Conv2DBackpropInputFusion.decodeText(reader, json); + case 'Conv2DFusion': return $root.mindspore.schema.Conv2DFusion.decodeText(reader, json); + case 'Conv2dTransposeFusion': return $root.mindspore.schema.Conv2dTransposeFusion.decodeText(reader, json); + case 'Cos': return $root.mindspore.schema.Cos.decodeText(reader, json); + case 'ConstantOfShape': return $root.mindspore.schema.ConstantOfShape.decodeText(reader, json); + case 'Crop': return $root.mindspore.schema.Crop.decodeText(reader, json); + case 'CustomExtractFeatures': return $root.mindspore.schema.CustomExtractFeatures.decodeText(reader, json); + case 'CustomNormalize': return $root.mindspore.schema.CustomNormalize.decodeText(reader, json); + case 'CustomPredict': return $root.mindspore.schema.CustomPredict.decodeText(reader, json); + case 'DeConv2DGradFilter': return $root.mindspore.schema.DeConv2DGradFilter.decodeText(reader, json); + case 'Depend': return $root.mindspore.schema.Depend.decodeText(reader, json); + case 'DepthToSpace': return $root.mindspore.schema.DepthToSpace.decodeText(reader, json); + case 'DetectionPostProcess': return $root.mindspore.schema.DetectionPostProcess.decodeText(reader, json); + case 'DivFusion': return $root.mindspore.schema.DivFusion.decodeText(reader, json); + case 'DivGrad': return $root.mindspore.schema.DivGrad.decodeText(reader, json); + case 'Dropout': return $root.mindspore.schema.Dropout.decodeText(reader, json); + case 'DropoutGrad': return $root.mindspore.schema.DropoutGrad.decodeText(reader, json); + case 'Elu': return $root.mindspore.schema.Elu.decodeText(reader, json); + case 'Eltwise': return $root.mindspore.schema.Eltwise.decodeText(reader, json); + case 'Equal': return $root.mindspore.schema.Equal.decodeText(reader, json); + case 'EmbeddingLookupFusion': return $root.mindspore.schema.EmbeddingLookupFusion.decodeText(reader, json); + case 'ExpFusion': return $root.mindspore.schema.ExpFusion.decodeText(reader, json); + case 'ExpandDims': return $root.mindspore.schema.ExpandDims.decodeText(reader, json); + case 'FakeQuantWithMinMaxVars': return $root.mindspore.schema.FakeQuantWithMinMaxVars.decodeText(reader, json); + case 'FakeQuantWithMinMaxVarsPerChannel': return $root.mindspore.schema.FakeQuantWithMinMaxVarsPerChannel.decodeText(reader, json); + case 'FftReal': return $root.mindspore.schema.FftReal.decodeText(reader, json); + case 'FftImag': return $root.mindspore.schema.FftImag.decodeText(reader, json); + case 'Flatten': return $root.mindspore.schema.Flatten.decodeText(reader, json); + case 'FlattenGrad': return $root.mindspore.schema.FlattenGrad.decodeText(reader, json); + case 'Floor': return $root.mindspore.schema.Floor.decodeText(reader, json); + case 'FloorDiv': return $root.mindspore.schema.FloorDiv.decodeText(reader, json); + case 'FloorMod': return $root.mindspore.schema.FloorMod.decodeText(reader, json); + case 'Fill': return $root.mindspore.schema.Fill.decodeText(reader, json); + case 'FullConnection': return $root.mindspore.schema.FullConnection.decodeText(reader, json); + case 'FusedBatchNorm': return $root.mindspore.schema.FusedBatchNorm.decodeText(reader, json); + case 'Gather': return $root.mindspore.schema.Gather.decodeText(reader, json); + case 'GatherNd': return $root.mindspore.schema.GatherNd.decodeText(reader, json); + case 'Greater': return $root.mindspore.schema.Greater.decodeText(reader, json); + case 'GreaterEqual': return $root.mindspore.schema.GreaterEqual.decodeText(reader, json); + case 'HashtableLookup': return $root.mindspore.schema.HashtableLookup.decodeText(reader, json); + case 'InstanceNorm': return $root.mindspore.schema.InstanceNorm.decodeText(reader, json); + case 'LayerNormFusion': return $root.mindspore.schema.LayerNormFusion.decodeText(reader, json); + case 'LeakyRelu': return $root.mindspore.schema.LeakyRelu.decodeText(reader, json); + case 'Less': return $root.mindspore.schema.Less.decodeText(reader, json); + case 'LessEqual': return $root.mindspore.schema.LessEqual.decodeText(reader, json); + case 'Log': return $root.mindspore.schema.Log.decodeText(reader, json); + case 'LogGrad': return $root.mindspore.schema.LogGrad.decodeText(reader, json); + case 'LogicalAnd': return $root.mindspore.schema.LogicalAnd.decodeText(reader, json); + case 'LogicalNot': return $root.mindspore.schema.LogicalNot.decodeText(reader, json); + case 'LogicalOr': return $root.mindspore.schema.LogicalOr.decodeText(reader, json); + case 'LpNormalization': return $root.mindspore.schema.LpNormalization.decodeText(reader, json); + case 'LRN': return $root.mindspore.schema.LRN.decodeText(reader, json); + case 'LshProjection': return $root.mindspore.schema.LshProjection.decodeText(reader, json); + case 'LSTM': return $root.mindspore.schema.LSTM.decodeText(reader, json); + case 'L2NormalizeFusion': return $root.mindspore.schema.L2NormalizeFusion.decodeText(reader, json); + case 'MatMulFusion': return $root.mindspore.schema.MatMulFusion.decodeText(reader, json); + case 'Maximum': return $root.mindspore.schema.Maximum.decodeText(reader, json); + case 'MaximumGrad': return $root.mindspore.schema.MaximumGrad.decodeText(reader, json); + case 'MaxPoolFusion': return $root.mindspore.schema.MaxPoolFusion.decodeText(reader, json); + case 'MaxPoolGrad': return $root.mindspore.schema.MaxPoolGrad.decodeText(reader, json); + case 'SwitchLayer': return $root.mindspore.schema.SwitchLayer.decodeText(reader, json); + case 'Mfcc': return $root.mindspore.schema.Mfcc.decodeText(reader, json); + case 'Minimum': return $root.mindspore.schema.Minimum.decodeText(reader, json); + case 'MinimumGrad': return $root.mindspore.schema.MinimumGrad.decodeText(reader, json); + case 'Mod': return $root.mindspore.schema.Mod.decodeText(reader, json); + case 'MulFusion': return $root.mindspore.schema.MulFusion.decodeText(reader, json); + case 'MulGrad': return $root.mindspore.schema.MulGrad.decodeText(reader, json); + case 'Neg': return $root.mindspore.schema.Neg.decodeText(reader, json); + case 'NegGrad': return $root.mindspore.schema.NegGrad.decodeText(reader, json); + case 'NotEqual': return $root.mindspore.schema.NotEqual.decodeText(reader, json); + case 'NonMaxSuppression': return $root.mindspore.schema.NonMaxSuppression.decodeText(reader, json); + case 'OneHot': return $root.mindspore.schema.OneHot.decodeText(reader, json); + case 'OnesLike': return $root.mindspore.schema.OnesLike.decodeText(reader, json); + case 'PadFusion': return $root.mindspore.schema.PadFusion.decodeText(reader, json); + case 'PartialFusion': return $root.mindspore.schema.PartialFusion.decodeText(reader, json); + case 'PowerGrad': return $root.mindspore.schema.PowerGrad.decodeText(reader, json); + case 'PowFusion': return $root.mindspore.schema.PowFusion.decodeText(reader, json); + case 'PriorBox': return $root.mindspore.schema.PriorBox.decodeText(reader, json); + case 'PReLUFusion': return $root.mindspore.schema.PReLUFusion.decodeText(reader, json); + case 'QuantDTypeCast': return $root.mindspore.schema.QuantDTypeCast.decodeText(reader, json); + case 'Rank': return $root.mindspore.schema.Rank.decodeText(reader, json); + case 'Range': return $root.mindspore.schema.Range.decodeText(reader, json); + case 'Reciprocal': return $root.mindspore.schema.Reciprocal.decodeText(reader, json); + case 'RealDiv': return $root.mindspore.schema.RealDiv.decodeText(reader, json); + case 'ReduceFusion': return $root.mindspore.schema.ReduceFusion.decodeText(reader, json); + case 'Reshape': return $root.mindspore.schema.Reshape.decodeText(reader, json); + case 'Resize': return $root.mindspore.schema.Resize.decodeText(reader, json); + case 'ReverseSequence': return $root.mindspore.schema.ReverseSequence.decodeText(reader, json); + case 'ReverseV2': return $root.mindspore.schema.ReverseV2.decodeText(reader, json); + case 'Rfft': return $root.mindspore.schema.Rfft.decodeText(reader, json); + case 'ROIPooling': return $root.mindspore.schema.ROIPooling.decodeText(reader, json); + case 'Round': return $root.mindspore.schema.Round.decodeText(reader, json); + case 'Rsqrt': return $root.mindspore.schema.Rsqrt.decodeText(reader, json); + case 'ScaleFusion': return $root.mindspore.schema.ScaleFusion.decodeText(reader, json); + case 'ScatterNd': return $root.mindspore.schema.ScatterNd.decodeText(reader, json); + case 'SGD': return $root.mindspore.schema.SGD.decodeText(reader, json); + case 'Shape': return $root.mindspore.schema.Shape.decodeText(reader, json); + case 'SigmoidCrossEntropyWithLogits': return $root.mindspore.schema.SigmoidCrossEntropyWithLogits.decodeText(reader, json); + case 'SigmoidCrossEntropyWithLogitsGrad': return $root.mindspore.schema.SigmoidCrossEntropyWithLogitsGrad.decodeText(reader, json); + case 'Sin': return $root.mindspore.schema.Sin.decodeText(reader, json); + case 'SkipGram': return $root.mindspore.schema.SkipGram.decodeText(reader, json); + case 'SliceFusion': return $root.mindspore.schema.SliceFusion.decodeText(reader, json); + case 'SmoothL1Loss': return $root.mindspore.schema.SmoothL1Loss.decodeText(reader, json); + case 'SmoothL1LossGrad': return $root.mindspore.schema.SmoothL1LossGrad.decodeText(reader, json); + case 'Softmax': return $root.mindspore.schema.Softmax.decodeText(reader, json); + case 'SoftmaxCrossEntropyWithLogits': return $root.mindspore.schema.SoftmaxCrossEntropyWithLogits.decodeText(reader, json); + case 'SpaceToBatch': return $root.mindspore.schema.SpaceToBatch.decodeText(reader, json); + case 'SpaceToBatchND': return $root.mindspore.schema.SpaceToBatchND.decodeText(reader, json); + case 'SpaceToDepth': return $root.mindspore.schema.SpaceToDepth.decodeText(reader, json); + case 'SparseSoftmaxCrossEntropyWithLogits': return $root.mindspore.schema.SparseSoftmaxCrossEntropyWithLogits.decodeText(reader, json); + case 'SparseToDense': return $root.mindspore.schema.SparseToDense.decodeText(reader, json); + case 'Split': return $root.mindspore.schema.Split.decodeText(reader, json); + case 'Sqrt': return $root.mindspore.schema.Sqrt.decodeText(reader, json); + case 'Squeeze': return $root.mindspore.schema.Squeeze.decodeText(reader, json); + case 'Square': return $root.mindspore.schema.Square.decodeText(reader, json); + case 'SquaredDifference': return $root.mindspore.schema.SquaredDifference.decodeText(reader, json); + case 'Stack': return $root.mindspore.schema.Stack.decodeText(reader, json); + case 'StridedSlice': return $root.mindspore.schema.StridedSlice.decodeText(reader, json); + case 'SubFusion': return $root.mindspore.schema.SubFusion.decodeText(reader, json); + case 'SubGrad': return $root.mindspore.schema.SubGrad.decodeText(reader, json); + case 'Switch': return $root.mindspore.schema.Switch.decodeText(reader, json); + case 'TensorListFromTensor': return $root.mindspore.schema.TensorListFromTensor.decodeText(reader, json); + case 'TensorListGetItem': return $root.mindspore.schema.TensorListGetItem.decodeText(reader, json); + case 'TensorListReserve': return $root.mindspore.schema.TensorListReserve.decodeText(reader, json); + case 'TensorListSetItem': return $root.mindspore.schema.TensorListSetItem.decodeText(reader, json); + case 'TensorListStack': return $root.mindspore.schema.TensorListStack.decodeText(reader, json); + case 'TileFusion': return $root.mindspore.schema.TileFusion.decodeText(reader, json); + case 'TopKFusion': return $root.mindspore.schema.TopKFusion.decodeText(reader, json); + case 'Transpose': return $root.mindspore.schema.Transpose.decodeText(reader, json); + case 'Unique': return $root.mindspore.schema.Unique.decodeText(reader, json); + case 'UnsortedSegmentSum': return $root.mindspore.schema.UnsortedSegmentSum.decodeText(reader, json); + case 'Unsqueeze': return $root.mindspore.schema.Unsqueeze.decodeText(reader, json); + case 'Unstack': return $root.mindspore.schema.Unstack.decodeText(reader, json); + case 'LSTMGrad': return $root.mindspore.schema.LSTMGrad.decodeText(reader, json); + case 'Where': return $root.mindspore.schema.Where.decodeText(reader, json); + case 'ZerosLike': return $root.mindspore.schema.ZerosLike.decodeText(reader, json); + case 'Select': return $root.mindspore.schema.Select.decodeText(reader, json); + case 'ScatterNdUpdate': return $root.mindspore.schema.ScatterNdUpdate.decodeText(reader, json); + case 'GRU': return $root.mindspore.schema.GRU.decodeText(reader, json); + case 'NonZero': return $root.mindspore.schema.NonZero.decodeText(reader, json); + case 'InvertPermutation': return $root.mindspore.schema.InvertPermutation.decodeText(reader, json); + case 'Size': return $root.mindspore.schema.Size.decodeText(reader, json); + case 'RandomStandardNormal': return $root.mindspore.schema.RandomStandardNormal.decodeText(reader, json); + case 'CropAndResize': return $root.mindspore.schema.CropAndResize.decodeText(reader, json); + case 'Erf': return $root.mindspore.schema.Erf.decodeText(reader, json); + case 'StridedSliceGrad': return $root.mindspore.schema.StridedSliceGrad.decodeText(reader, json); + case 'IsFinite': return $root.mindspore.schema.IsFinite.decodeText(reader, json); + case 'LinSpace': return $root.mindspore.schema.LinSpace.decodeText(reader, json); + case 'UniformReal': return $root.mindspore.schema.UniformReal.decodeText(reader, json); + case 'AbsGrad': return $root.mindspore.schema.AbsGrad.decodeText(reader, json); + case 'RsqrtGrad': return $root.mindspore.schema.RsqrtGrad.decodeText(reader, json); + case 'SqrtGrad': return $root.mindspore.schema.SqrtGrad.decodeText(reader, json); + case 'LayerNormGrad': return $root.mindspore.schema.LayerNormGrad.decodeText(reader, json); + case 'ResizeGrad': return $root.mindspore.schema.ResizeGrad.decodeText(reader, json); + case 'Splice': return $root.mindspore.schema.Splice.decodeText(reader, json); + case 'LogSoftmax': return $root.mindspore.schema.LogSoftmax.decodeText(reader, json); + case 'Call': return $root.mindspore.schema.Call.decodeText(reader, json); + case 'Custom': return $root.mindspore.schema.Custom.decodeText(reader, json); + case 'CumSum': return $root.mindspore.schema.CumSum.decodeText(reader, json); + case 'SplitWithOverlap': return $root.mindspore.schema.SplitWithOverlap.decodeText(reader, json); + case 'GenOP': return $root.mindspore.schema.GenOP.decodeText(reader, json); + case 'RaggedRange': return $root.mindspore.schema.RaggedRange.decodeText(reader, json); + case 'GLU': return $root.mindspore.schema.GLU.decodeText(reader, json); + case 'TensorArray': return $root.mindspore.schema.TensorArray.decodeText(reader, json); + case 'TensorArrayRead': return $root.mindspore.schema.TensorArrayRead.decodeText(reader, json); + case 'TensorArrayWrite': return $root.mindspore.schema.TensorArrayWrite.decodeText(reader, json); + case 'Affine': return $root.mindspore.schema.Affine.decodeText(reader, json); + case 'AllGather': return $root.mindspore.schema.AllGather.decodeText(reader, json); + case 'ReduceScatter': return $root.mindspore.schema.ReduceScatter.decodeText(reader, json); + case 'DynamicQuant': return $root.mindspore.schema.DynamicQuant.decodeText(reader, json); + case 'LSTMGradData': return $root.mindspore.schema.LSTMGradData.decodeText(reader, json); + case 'LSTMGradWeight': return $root.mindspore.schema.LSTMGradWeight.decodeText(reader, json); + case 'RandomNormal': return $root.mindspore.schema.RandomNormal.decodeText(reader, json); + case 'NLLLoss': return $root.mindspore.schema.NLLLoss.decodeText(reader, json); + case 'NLLLossGrad': return $root.mindspore.schema.NLLLossGrad.decodeText(reader, json); + case 'FormatTranspose': return $root.mindspore.schema.FormatTranspose.decodeText(reader, json); + case 'GatherD': return $root.mindspore.schema.GatherD.decodeText(reader, json); + case 'GroupNormFusion': return $root.mindspore.schema.GroupNormFusion.decodeText(reader, json); + case 'Log1p': return $root.mindspore.schema.Log1p.decodeText(reader, json); + case 'TensorScatterAdd': return $root.mindspore.schema.TensorScatterAdd.decodeText(reader, json); + case 'SparseFillEmptyRows': return $root.mindspore.schema.SparseFillEmptyRows.decodeText(reader, json); + case 'SparseReshape': return $root.mindspore.schema.SparseReshape.decodeText(reader, json); + case 'SparseSegmentSum': return $root.mindspore.schema.SparseSegmentSum.decodeText(reader, json); + case 'ScatterElements': return $root.mindspore.schema.ScatterElements.decodeText(reader, json); + case 'Triu': return $root.mindspore.schema.Triu.decodeText(reader, json); + case 'Tril': return $root.mindspore.schema.Tril.decodeText(reader, json); + case 'AdamWeightDecay': return $root.mindspore.schema.AdamWeightDecay.decodeText(reader, json); + case 'FillV2': return $root.mindspore.schema.FillV2.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.mindspore.schema.Abs = class Abs { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Abs(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Abs(); + return $; + } +}; + +$root.mindspore.schema.Activation = class Activation { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Activation(); + $.activation_type = reader.int8_(position, 4, 0); + $.alpha = reader.float32_(position, 6, 0); + $.min_val = reader.float32_(position, 8, 0); + $.max_val = reader.float32_(position, 10, 0); + $.approximate = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Activation(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + $.alpha = reader.value(json.alpha, 0); + $.min_val = reader.value(json.min_val, 0); + $.max_val = reader.value(json.max_val, 0); + $.approximate = reader.value(json.approximate, false); + return $; + } +}; + +$root.mindspore.schema.ActivationGrad = class ActivationGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ActivationGrad(); + $.activation_type = reader.int8_(position, 4, 0); + $.alpha = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ActivationGrad(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + $.alpha = reader.value(json.alpha, 0); + return $; + } +}; + +$root.mindspore.schema.Adam = class Adam { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Adam(); + $.use_locking = reader.bool_(position, 4, false); + $.use_nesterov = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Adam(); + $.use_locking = reader.value(json.use_locking, false); + $.use_nesterov = reader.value(json.use_nesterov, false); + return $; + } +}; + +$root.mindspore.schema.AddFusion = class AddFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AddFusion(); + $.activation_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AddFusion(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.AdderFusion = class AdderFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AdderFusion(); + $.format = reader.int32_(position, 4, 0); + $.kernel_size = reader.int64s_(position, 6); + $.stride = reader.int64s_(position, 8); + $.dilation = reader.int64s_(position, 10); + $.pad_mode = reader.int8_(position, 12, 0); + $.pad_list = reader.int64s_(position, 14); + $.group = reader.int64_(position, 16, 0); + $.in_channel = reader.int64_(position, 18, 0); + $.out_channel = reader.int64_(position, 20, 0); + $.activation_type = reader.int8_(position, 22, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AdderFusion(); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad_list = reader.array(json.pad_list); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.AddGrad = class AddGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.AddGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.AddGrad(); + return $; + } +}; + +$root.mindspore.schema.AddN = class AddN { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.AddN(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.AddN(); + return $; + } +}; + +$root.mindspore.schema.All = class All { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.All(); + $.keep_dims = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.All(); + $.keep_dims = reader.value(json.keep_dims, 0); + return $; + } +}; + +$root.mindspore.schema.ApplyMomentum = class ApplyMomentum { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ApplyMomentum(); + $.use_nesterov = reader.bool_(position, 4, false); + $.use_locking = reader.bool_(position, 6, false); + $.gradient_scale = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ApplyMomentum(); + $.use_nesterov = reader.value(json.use_nesterov, false); + $.use_locking = reader.value(json.use_locking, false); + $.gradient_scale = reader.value(json.gradient_scale, 0); + return $; + } +}; + +$root.mindspore.schema.ArgMaxFusion = class ArgMaxFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ArgMaxFusion(); + $.axis = reader.int64_(position, 4, 0); + $.top_k = reader.int64_(position, 6, 1); + $.keep_dims = reader.bool_(position, 8, false); + $.out_max_value = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ArgMaxFusion(); + $.axis = reader.value(json.axis, 0); + $.top_k = reader.value(json.top_k, 1); + $.keep_dims = reader.value(json.keep_dims, false); + $.out_max_value = reader.value(json.out_max_value, false); + return $; + } +}; + +$root.mindspore.schema.ArgMinFusion = class ArgMinFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ArgMinFusion(); + $.axis = reader.int64_(position, 4, 0); + $.top_k = reader.int64_(position, 6, 0); + $.keep_dims = reader.bool_(position, 8, false); + $.out_max_value = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ArgMinFusion(); + $.axis = reader.value(json.axis, 0); + $.top_k = reader.value(json.top_k, 0); + $.keep_dims = reader.value(json.keep_dims, false); + $.out_max_value = reader.value(json.out_max_value, false); + return $; + } +}; + +$root.mindspore.schema.Assert = class Assert { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Assert(); + $.summarize = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Assert(); + $.summarize = reader.value(json.summarize, 0); + return $; + } +}; + +$root.mindspore.schema.Assign = class Assign { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Assign(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Assign(); + return $; + } +}; + +$root.mindspore.schema.AssignAdd = class AssignAdd { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.AssignAdd(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.AssignAdd(); + return $; + } +}; + +$root.mindspore.schema.AudioSpectrogram = class AudioSpectrogram { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AudioSpectrogram(); + $.window_size = reader.int64_(position, 4, 0); + $.stride = reader.int64_(position, 6, 0); + $.mag_square = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AudioSpectrogram(); + $.window_size = reader.value(json.window_size, 0); + $.stride = reader.value(json.stride, 0); + $.mag_square = reader.value(json.mag_square, false); + return $; + } +}; + +$root.mindspore.schema.AvgPoolFusion = class AvgPoolFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AvgPoolFusion(); + $.kernel_size = reader.int64s_(position, 4); + $.strides = reader.int64s_(position, 6); + $.pad = reader.int64s_(position, 8); + $.pad_mode = reader.int8_(position, 10, 0); + $.round_mode = reader.int8_(position, 12, 0); + $.format = reader.int32_(position, 14, 0); + $.global = reader.bool_(position, 16, false); + $.activation_type = reader.int8_(position, 18, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AvgPoolFusion(); + $.kernel_size = reader.array(json.kernel_size); + $.strides = reader.array(json.strides); + $.pad = reader.array(json.pad); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.round_mode = $root.mindspore.schema.RoundMode[json.round_mode]; + $.format = $root.mindspore.schema.Format[json.format]; + $.global = reader.value(json.global, false); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.AvgPoolGrad = class AvgPoolGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AvgPoolGrad(); + $.kernel_size = reader.int64s_(position, 4); + $.strides = reader.int64s_(position, 6); + $.pad_mode = reader.int8_(position, 8, 0); + $.format = reader.int32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AvgPoolGrad(); + $.kernel_size = reader.array(json.kernel_size); + $.strides = reader.array(json.strides); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.format = $root.mindspore.schema.Format[json.format]; + return $; + } +}; + +$root.mindspore.schema.BatchNorm = class BatchNorm { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BatchNorm(); + $.epsilon = reader.float32_(position, 4, 0); + $.format = reader.int32_(position, 6, 0); + $.is_training = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BatchNorm(); + $.epsilon = reader.value(json.epsilon, 0); + $.format = $root.mindspore.schema.Format[json.format]; + $.is_training = reader.value(json.is_training, false); + return $; + } +}; + +$root.mindspore.schema.BatchNormGrad = class BatchNormGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BatchNormGrad(); + $.epsilon = reader.float32_(position, 4, 0); + $.is_training = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BatchNormGrad(); + $.epsilon = reader.value(json.epsilon, 0); + $.is_training = reader.value(json.is_training, false); + return $; + } +}; + +$root.mindspore.schema.BatchToSpace = class BatchToSpace { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BatchToSpace(); + $.block_size = reader.int64s_(position, 4); + $.crops = reader.table(position, 6, $root.mindspore.schema.Vec2D.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BatchToSpace(); + $.block_size = reader.array(json.block_size); + $.crops = reader.object(json.crops, $root.mindspore.schema.Vec2D.decodeText); + return $; + } +}; + +$root.mindspore.schema.BatchToSpaceND = class BatchToSpaceND { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BatchToSpaceND(); + $.block_shape = reader.int64s_(position, 4); + $.crops = reader.table(position, 6, $root.mindspore.schema.Vec2D.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BatchToSpaceND(); + $.block_shape = reader.array(json.block_shape); + $.crops = reader.object(json.crops, $root.mindspore.schema.Vec2D.decodeText); + return $; + } +}; + +$root.mindspore.schema.BiasAdd = class BiasAdd { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BiasAdd(); + $.format = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BiasAdd(); + $.format = $root.mindspore.schema.Format[json.format]; + return $; + } +}; + +$root.mindspore.schema.BinaryCrossEntropy = class BinaryCrossEntropy { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BinaryCrossEntropy(); + $.reduction = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BinaryCrossEntropy(); + $.reduction = $root.mindspore.schema.Reduction[json.reduction]; + return $; + } +}; + +$root.mindspore.schema.BinaryCrossEntropyGrad = class BinaryCrossEntropyGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BinaryCrossEntropyGrad(); + $.reduction = reader.int8_(position, 4, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BinaryCrossEntropyGrad(); + $.reduction = $root.mindspore.schema.Reduction[json.reduction]; + return $; + } +}; + +$root.mindspore.schema.BiasAddGrad = class BiasAddGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.BiasAddGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.BiasAddGrad(); + return $; + } +}; + +$root.mindspore.schema.BroadcastTo = class BroadcastTo { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.BroadcastTo(); + $.shape = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.BroadcastTo(); + $.shape = reader.array(json.shape); + return $; + } +}; + +$root.mindspore.schema.Cast = class Cast { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Cast(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Cast(); + return $; + } +}; + +$root.mindspore.schema.Ceil = class Ceil { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Ceil(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Ceil(); + return $; + } +}; + +$root.mindspore.schema.Clip = class Clip { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Clip(); + $.max = reader.float32_(position, 4, 0); + $.min = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Clip(); + $.max = reader.value(json.max, 0); + $.min = reader.value(json.min, 0); + return $; + } +}; + +$root.mindspore.schema.Concat = class Concat { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Concat(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Concat(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.Attention = class Attention { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Attention(); + $.head_num = reader.int64_(position, 4, 0); + $.head_size = reader.int64_(position, 6, 0); + $.cross = reader.bool_(position, 8, false); + $.scale = reader.float32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Attention(); + $.head_num = reader.value(json.head_num, 0); + $.head_size = reader.value(json.head_size, 0); + $.cross = reader.value(json.cross, false); + $.scale = reader.value(json.scale, 0); + return $; + } +}; + +$root.mindspore.schema.Conv2DBackpropFilterFusion = class Conv2DBackpropFilterFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Conv2DBackpropFilterFusion(); + $.format = reader.int32_(position, 4, 0); + $.kernel_size = reader.int64s_(position, 6); + $.stride = reader.int64s_(position, 8); + $.dilation = reader.int64s_(position, 10); + $.pad_mode = reader.int8_(position, 12, 0); + $.pad_list = reader.int64s_(position, 14); + $.mode = reader.int64_(position, 16, 0); + $.group = reader.int64_(position, 18, 0); + $.in_channel = reader.int64_(position, 20, 0); + $.out_channel = reader.int64_(position, 22, 0); + $.activation_type = reader.int8_(position, 24, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Conv2DBackpropFilterFusion(); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad_list = reader.array(json.pad_list); + $.mode = reader.value(json.mode, 0); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.Conv2DBackpropInputFusion = class Conv2DBackpropInputFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Conv2DBackpropInputFusion(); + $.format = reader.int32_(position, 4, 0); + $.kernel_size = reader.int64s_(position, 6); + $.stride = reader.int64s_(position, 8); + $.dilation = reader.int64s_(position, 10); + $.pad_mode = reader.int8_(position, 12, 0); + $.pad = reader.int64s_(position, 14); + $.pad_list = reader.int64s_(position, 16); + $.mode = reader.int64_(position, 18, 0); + $.group = reader.int64_(position, 20, 0); + $.in_channel = reader.int64_(position, 22, 0); + $.out_channel = reader.int64_(position, 24, 0); + $.activation_type = reader.int8_(position, 26, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Conv2DBackpropInputFusion(); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad = reader.array(json.pad); + $.pad_list = reader.array(json.pad_list); + $.mode = reader.value(json.mode, 0); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.Conv2DFusion = class Conv2DFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Conv2DFusion(); + $.format = reader.int32_(position, 4, 0); + $.kernel_size = reader.int64s_(position, 6); + $.stride = reader.int64s_(position, 8); + $.dilation = reader.int64s_(position, 10); + $.pad_mode = reader.int8_(position, 12, 0); + $.pad_list = reader.int64s_(position, 14); + $.mode = reader.int64_(position, 16, 0); + $.group = reader.int64_(position, 18, 0); + $.in_channel = reader.int64_(position, 20, 0); + $.out_channel = reader.int64_(position, 22, 0); + $.activation_type = reader.int8_(position, 24, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Conv2DFusion(); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad_list = reader.array(json.pad_list); + $.mode = reader.value(json.mode, 0); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.Conv2dTransposeFusion = class Conv2dTransposeFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Conv2dTransposeFusion(); + $.format = reader.int32_(position, 4, 0); + $.kernel_size = reader.int64s_(position, 6); + $.stride = reader.int64s_(position, 8); + $.dilation = reader.int64s_(position, 10); + $.pad_mode = reader.int8_(position, 12, 0); + $.pad = reader.int64s_(position, 14); + $.pad_list = reader.int64s_(position, 16); + $.mode = reader.int64_(position, 18, 0); + $.group = reader.int64_(position, 20, 0); + $.in_channel = reader.int64_(position, 22, 0); + $.out_channel = reader.int64_(position, 24, 0); + $.activation_type = reader.int8_(position, 26, 0); + $.output_paddings = reader.int64s_(position, 28); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Conv2dTransposeFusion(); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad = reader.array(json.pad); + $.pad_list = reader.array(json.pad_list); + $.mode = reader.value(json.mode, 0); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + $.output_paddings = reader.array(json.output_paddings); + return $; + } +}; + +$root.mindspore.schema.Cos = class Cos { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Cos(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Cos(); + return $; + } +}; + +$root.mindspore.schema.ConstantOfShape = class ConstantOfShape { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ConstantOfShape(); + $.data_type = reader.int64_(position, 4, 0); + $.value = reader.typedArray(position, 6, Float32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ConstantOfShape(); + $.data_type = reader.value(json.data_type, 0); + $.value = reader.typedArray(json.value, Float32Array); + return $; + } +}; + +$root.mindspore.schema.Crop = class Crop { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Crop(); + $.axis = reader.int64_(position, 4, 0); + $.offsets = reader.int64s_(position, 6); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Crop(); + $.axis = reader.value(json.axis, 0); + $.offsets = reader.array(json.offsets); + return $; + } +}; + +$root.mindspore.schema.CustomExtractFeatures = class CustomExtractFeatures { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.CustomExtractFeatures(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.CustomExtractFeatures(); + return $; + } +}; + +$root.mindspore.schema.CustomNormalize = class CustomNormalize { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.CustomNormalize(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.CustomNormalize(); + return $; + } +}; + +$root.mindspore.schema.CustomPredict = class CustomPredict { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.CustomPredict(); + $.output_num = reader.int64_(position, 4, 0); + $.weight_threshold = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.CustomPredict(); + $.output_num = reader.value(json.output_num, 0); + $.weight_threshold = reader.value(json.weight_threshold, 0); + return $; + } +}; + +$root.mindspore.schema.DeConv2DGradFilter = class DeConv2DGradFilter { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DeConv2DGradFilter(); + $.in_channel = reader.int64_(position, 4, 0); + $.out_channel = reader.int64_(position, 6, 0); + $.kernel_size = reader.int64s_(position, 8); + $.pad_mode = reader.int8_(position, 10, 0); + $.pad_list = reader.int64s_(position, 12); + $.stride = reader.int64s_(position, 14); + $.dilation = reader.int64s_(position, 16); + $.group = reader.int64_(position, 18, 0); + $.format = reader.int32_(position, 20, 0); + $.activation_type = reader.int8_(position, 22, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DeConv2DGradFilter(); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.kernel_size = reader.array(json.kernel_size); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad_list = reader.array(json.pad_list); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.group = reader.value(json.group, 0); + $.format = $root.mindspore.schema.Format[json.format]; + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.Depend = class Depend { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Depend(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Depend(); + return $; + } +}; + +$root.mindspore.schema.DepthToSpace = class DepthToSpace { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DepthToSpace(); + $.block_size = reader.int64_(position, 4, 0); + $.format = reader.int32_(position, 6, 0); + $.mode = reader.string_(position, 8, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DepthToSpace(); + $.block_size = reader.value(json.block_size, 0); + $.format = $root.mindspore.schema.Format[json.format]; + $.mode = reader.value(json.mode, null); + return $; + } +}; + +$root.mindspore.schema.DetectionPostProcess = class DetectionPostProcess { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DetectionPostProcess(); + $.format = reader.int32_(position, 4, 0); + $.input_size = reader.int64_(position, 6, 0); + $.scale = reader.typedArray(position, 8, Float32Array); + $.nms_iou_threshold = reader.float32_(position, 10, 0); + $.nms_score_threshold = reader.float32_(position, 12, 0); + $.max_detections = reader.int64_(position, 14, 0); + $.detections_per_class = reader.int64_(position, 16, 0); + $.max_classes_per_detection = reader.int64_(position, 18, 0); + $.num_classes = reader.int64_(position, 20, 0); + $.use_regular_nms = reader.bool_(position, 22, false); + $.out_quantized = reader.bool_(position, 24, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DetectionPostProcess(); + $.format = $root.mindspore.schema.Format[json.format]; + $.input_size = reader.value(json.input_size, 0); + $.scale = reader.typedArray(json.scale, Float32Array); + $.nms_iou_threshold = reader.value(json.nms_iou_threshold, 0); + $.nms_score_threshold = reader.value(json.nms_score_threshold, 0); + $.max_detections = reader.value(json.max_detections, 0); + $.detections_per_class = reader.value(json.detections_per_class, 0); + $.max_classes_per_detection = reader.value(json.max_classes_per_detection, 0); + $.num_classes = reader.value(json.num_classes, 0); + $.use_regular_nms = reader.value(json.use_regular_nms, false); + $.out_quantized = reader.value(json.out_quantized, false); + return $; + } +}; + +$root.mindspore.schema.DivFusion = class DivFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DivFusion(); + $.activation_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DivFusion(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.DivGrad = class DivGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.DivGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.DivGrad(); + return $; + } +}; + +$root.mindspore.schema.Dropout = class Dropout { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Dropout(); + $.keep_prob = reader.float32_(position, 4, 0.5); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Dropout(); + $.keep_prob = reader.value(json.keep_prob, 0.5); + return $; + } +}; + +$root.mindspore.schema.DropoutGrad = class DropoutGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DropoutGrad(); + $.keep_prob = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DropoutGrad(); + $.keep_prob = reader.value(json.keep_prob, 0); + return $; + } +}; + +$root.mindspore.schema.Elu = class Elu { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Elu(); + $.alpha = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Elu(); + $.alpha = reader.value(json.alpha, 0); + return $; + } +}; + +$root.mindspore.schema.Eltwise = class Eltwise { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Eltwise(); + $.mode = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Eltwise(); + $.mode = $root.mindspore.schema.EltwiseMode[json.mode]; + return $; + } +}; + +$root.mindspore.schema.Equal = class Equal { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Equal(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Equal(); + return $; + } +}; + +$root.mindspore.schema.EmbeddingLookupFusion = class EmbeddingLookupFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.EmbeddingLookupFusion(); + $.max_norm = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.EmbeddingLookupFusion(); + $.max_norm = reader.value(json.max_norm, 0); + return $; + } +}; + +$root.mindspore.schema.ExpFusion = class ExpFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ExpFusion(); + $.base = reader.float32_(position, 4, -1); + $.scale = reader.float32_(position, 6, 1); + $.shift = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ExpFusion(); + $.base = reader.value(json.base, -1); + $.scale = reader.value(json.scale, 1); + $.shift = reader.value(json.shift, 0); + return $; + } +}; + +$root.mindspore.schema.ExpandDims = class ExpandDims { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.ExpandDims(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.ExpandDims(); + return $; + } +}; + +$root.mindspore.schema.FakeQuantWithMinMaxVars = class FakeQuantWithMinMaxVars { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.FakeQuantWithMinMaxVars(); + $.num_bits = reader.int64_(position, 4, 0); + $.narrow_range = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.FakeQuantWithMinMaxVars(); + $.num_bits = reader.value(json.num_bits, 0); + $.narrow_range = reader.value(json.narrow_range, false); + return $; + } +}; + +$root.mindspore.schema.FakeQuantWithMinMaxVarsPerChannel = class FakeQuantWithMinMaxVarsPerChannel { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.FakeQuantWithMinMaxVarsPerChannel(); + $.num_bits = reader.int64_(position, 4, 0); + $.narrow_range = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.FakeQuantWithMinMaxVarsPerChannel(); + $.num_bits = reader.value(json.num_bits, 0); + $.narrow_range = reader.value(json.narrow_range, false); + return $; + } +}; + +$root.mindspore.schema.FftReal = class FftReal { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FftReal(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FftReal(); + return $; + } +}; + +$root.mindspore.schema.FftImag = class FftImag { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FftImag(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FftImag(); + return $; + } +}; + +$root.mindspore.schema.Flatten = class Flatten { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Flatten(); + $.axis = reader.int64_(position, 4, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Flatten(); + $.axis = reader.value(json.axis, 1); + return $; + } +}; + +$root.mindspore.schema.FlattenGrad = class FlattenGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FlattenGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FlattenGrad(); + return $; + } +}; + +$root.mindspore.schema.Floor = class Floor { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Floor(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Floor(); + return $; + } +}; + +$root.mindspore.schema.FloorDiv = class FloorDiv { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FloorDiv(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FloorDiv(); + return $; + } +}; + +$root.mindspore.schema.FloorMod = class FloorMod { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FloorMod(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FloorMod(); + return $; + } +}; + +$root.mindspore.schema.Fill = class Fill { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Fill(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Fill(); + return $; + } +}; + +$root.mindspore.schema.FullConnection = class FullConnection { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.FullConnection(); + $.has_bias = reader.bool_(position, 4, false); + $.use_axis = reader.bool_(position, 6, false); + $.axis = reader.int64_(position, 8, 0); + $.activation_type = reader.int8_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.FullConnection(); + $.has_bias = reader.value(json.has_bias, false); + $.use_axis = reader.value(json.use_axis, false); + $.axis = reader.value(json.axis, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.FusedBatchNorm = class FusedBatchNorm { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.FusedBatchNorm(); + $.epsilon = reader.float32_(position, 4, 0.0001); + $.momentum = reader.float32_(position, 6, 0.9); + $.mode = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.FusedBatchNorm(); + $.epsilon = reader.value(json.epsilon, 0.0001); + $.momentum = reader.value(json.momentum, 0.9); + $.mode = reader.value(json.mode, 0); + return $; + } +}; + +$root.mindspore.schema.Gather = class Gather { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Gather(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Gather(); + return $; + } +}; + +$root.mindspore.schema.GatherNd = class GatherNd { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.GatherNd(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.GatherNd(); + return $; + } +}; + +$root.mindspore.schema.Greater = class Greater { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Greater(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Greater(); + return $; + } +}; + +$root.mindspore.schema.GreaterEqual = class GreaterEqual { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.GreaterEqual(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.GreaterEqual(); + return $; + } +}; + +$root.mindspore.schema.HashtableLookup = class HashtableLookup { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.HashtableLookup(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.HashtableLookup(); + return $; + } +}; + +$root.mindspore.schema.InstanceNorm = class InstanceNorm { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.InstanceNorm(); + $.epsilon = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.InstanceNorm(); + $.epsilon = reader.value(json.epsilon, 0); + return $; + } +}; + +$root.mindspore.schema.LayerNormFusion = class LayerNormFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LayerNormFusion(); + $.begin_norm_axis = reader.int64_(position, 4, 0); + $.epsilon = reader.float32_(position, 6, 0.00001); + $.elementwise_affine = reader.bool_(position, 8, false); + $.begin_params_axis = reader.int64_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LayerNormFusion(); + $.begin_norm_axis = reader.value(json.begin_norm_axis, 0); + $.epsilon = reader.value(json.epsilon, 0.00001); + $.elementwise_affine = reader.value(json.elementwise_affine, false); + $.begin_params_axis = reader.value(json.begin_params_axis, 0); + return $; + } +}; + +$root.mindspore.schema.LeakyRelu = class LeakyRelu { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LeakyRelu(); + $.negative_slope = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LeakyRelu(); + $.negative_slope = reader.value(json.negative_slope, 0); + return $; + } +}; + +$root.mindspore.schema.Less = class Less { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Less(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Less(); + return $; + } +}; + +$root.mindspore.schema.LessEqual = class LessEqual { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LessEqual(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LessEqual(); + return $; + } +}; + +$root.mindspore.schema.Log = class Log { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Log(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Log(); + return $; + } +}; + +$root.mindspore.schema.LogGrad = class LogGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LogGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LogGrad(); + return $; + } +}; + +$root.mindspore.schema.LogicalAnd = class LogicalAnd { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LogicalAnd(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LogicalAnd(); + return $; + } +}; + +$root.mindspore.schema.LogicalNot = class LogicalNot { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LogicalNot(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LogicalNot(); + return $; + } +}; + +$root.mindspore.schema.LogicalOr = class LogicalOr { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LogicalOr(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LogicalOr(); + return $; + } +}; + +$root.mindspore.schema.LpNormalization = class LpNormalization { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LpNormalization(); + $.axis = reader.int64_(position, 4, 0); + $.p = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LpNormalization(); + $.axis = reader.value(json.axis, 0); + $.p = reader.value(json.p, 0); + return $; + } +}; + +$root.mindspore.schema.LRN = class LRN { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LRN(); + $.depth_radius = reader.int64_(position, 4, 0); + $.bias = reader.float32_(position, 6, 0); + $.alpha = reader.float32_(position, 8, 0); + $.beta = reader.float32_(position, 10, 0); + $.norm_region = reader.string_(position, 12, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LRN(); + $.depth_radius = reader.value(json.depth_radius, 0); + $.bias = reader.value(json.bias, 0); + $.alpha = reader.value(json.alpha, 0); + $.beta = reader.value(json.beta, 0); + $.norm_region = reader.value(json.norm_region, null); + return $; + } +}; + +$root.mindspore.schema.LshProjection = class LshProjection { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LshProjection(); + $.type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LshProjection(); + $.type = $root.mindspore.schema.LshProjectionType[json.type]; + return $; + } +}; + +$root.mindspore.schema.LSTM = class LSTM { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LSTM(); + $.bidirectional = reader.bool_(position, 4, false); + $.has_bias = reader.bool_(position, 6, false); + $.input_size = reader.int64_(position, 8, 0); + $.hidden_size = reader.int64_(position, 10, 0); + $.num_layers = reader.int64_(position, 12, 0); + $.num_directions = reader.int64_(position, 14, 0); + $.dropout = reader.float32_(position, 16, 0); + $.zoneout_cell = reader.float32_(position, 18, 0); + $.zoneout_hidden = reader.float32_(position, 20, 0); + $.proj_size = reader.int64_(position, 22, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LSTM(); + $.bidirectional = reader.value(json.bidirectional, false); + $.has_bias = reader.value(json.has_bias, false); + $.input_size = reader.value(json.input_size, 0); + $.hidden_size = reader.value(json.hidden_size, 0); + $.num_layers = reader.value(json.num_layers, 0); + $.num_directions = reader.value(json.num_directions, 0); + $.dropout = reader.value(json.dropout, 0); + $.zoneout_cell = reader.value(json.zoneout_cell, 0); + $.zoneout_hidden = reader.value(json.zoneout_hidden, 0); + $.proj_size = reader.value(json.proj_size, 0); + return $; + } +}; + +$root.mindspore.schema.LSTMGrad = class LSTMGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LSTMGrad(); + $.bidirectional = reader.bool_(position, 4, false); + $.has_bias = reader.bool_(position, 6, false); + $.input_size = reader.int64_(position, 8, 0); + $.hidden_size = reader.int64_(position, 10, 0); + $.num_layers = reader.int64_(position, 12, 0); + $.num_directions = reader.int64_(position, 14, 0); + $.dropout = reader.float32_(position, 16, 0); + $.zoneout_cell = reader.float32_(position, 18, 0); + $.zoneout_hidden = reader.float32_(position, 20, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LSTMGrad(); + $.bidirectional = reader.value(json.bidirectional, false); + $.has_bias = reader.value(json.has_bias, false); + $.input_size = reader.value(json.input_size, 0); + $.hidden_size = reader.value(json.hidden_size, 0); + $.num_layers = reader.value(json.num_layers, 0); + $.num_directions = reader.value(json.num_directions, 0); + $.dropout = reader.value(json.dropout, 0); + $.zoneout_cell = reader.value(json.zoneout_cell, 0); + $.zoneout_hidden = reader.value(json.zoneout_hidden, 0); + return $; + } +}; + +$root.mindspore.schema.L2NormalizeFusion = class L2NormalizeFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.L2NormalizeFusion(); + $.axis = reader.int64s_(position, 4); + $.epsilon = reader.float32_(position, 6, 0); + $.activation_type = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.L2NormalizeFusion(); + $.axis = reader.array(json.axis); + $.epsilon = reader.value(json.epsilon, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.MatMulFusion = class MatMulFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MatMulFusion(); + $.transpose_a = reader.bool_(position, 4, false); + $.transpose_b = reader.bool_(position, 6, false); + $.activation_type = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MatMulFusion(); + $.transpose_a = reader.value(json.transpose_a, false); + $.transpose_b = reader.value(json.transpose_b, false); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.Maximum = class Maximum { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Maximum(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Maximum(); + return $; + } +}; + +$root.mindspore.schema.MaximumGrad = class MaximumGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MaximumGrad(); + $.grad_x = reader.bool_(position, 4, false); + $.grad_y = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MaximumGrad(); + $.grad_x = reader.value(json.grad_x, false); + $.grad_y = reader.value(json.grad_y, false); + return $; + } +}; + +$root.mindspore.schema.MaxPoolFusion = class MaxPoolFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MaxPoolFusion(); + $.kernel_size = reader.int64s_(position, 4); + $.strides = reader.int64s_(position, 6); + $.pad = reader.int64s_(position, 8); + $.pad_mode = reader.int8_(position, 10, 0); + $.round_mode = reader.int8_(position, 12, 0); + $.format = reader.int32_(position, 14, 0); + $.global = reader.bool_(position, 16, false); + $.activation_type = reader.int8_(position, 18, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MaxPoolFusion(); + $.kernel_size = reader.array(json.kernel_size); + $.strides = reader.array(json.strides); + $.pad = reader.array(json.pad); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.round_mode = $root.mindspore.schema.RoundMode[json.round_mode]; + $.format = $root.mindspore.schema.Format[json.format]; + $.global = reader.value(json.global, false); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.MaxPoolGrad = class MaxPoolGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MaxPoolGrad(); + $.kernel_size = reader.int64s_(position, 4); + $.strides = reader.int64s_(position, 6); + $.pad_mode = reader.int8_(position, 8, 0); + $.format = reader.int32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MaxPoolGrad(); + $.kernel_size = reader.array(json.kernel_size); + $.strides = reader.array(json.strides); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.format = $root.mindspore.schema.Format[json.format]; + return $; + } +}; + +$root.mindspore.schema.SwitchLayer = class SwitchLayer { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SwitchLayer(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SwitchLayer(); + return $; + } +}; + +$root.mindspore.schema.Mfcc = class Mfcc { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Mfcc(); + $.freq_upper_limit = reader.float32_(position, 4, 0); + $.freq_lower_limit = reader.float32_(position, 6, 0); + $.filter_bank_channel_num = reader.int64_(position, 8, 0); + $.dct_coeff_num = reader.int64_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Mfcc(); + $.freq_upper_limit = reader.value(json.freq_upper_limit, 0); + $.freq_lower_limit = reader.value(json.freq_lower_limit, 0); + $.filter_bank_channel_num = reader.value(json.filter_bank_channel_num, 0); + $.dct_coeff_num = reader.value(json.dct_coeff_num, 0); + return $; + } +}; + +$root.mindspore.schema.Minimum = class Minimum { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Minimum(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Minimum(); + return $; + } +}; + +$root.mindspore.schema.MinimumGrad = class MinimumGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MinimumGrad(); + $.grad_x = reader.bool_(position, 4, false); + $.grad_y = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MinimumGrad(); + $.grad_x = reader.value(json.grad_x, false); + $.grad_y = reader.value(json.grad_y, false); + return $; + } +}; + +$root.mindspore.schema.Mod = class Mod { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Mod(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Mod(); + return $; + } +}; + +$root.mindspore.schema.MulFusion = class MulFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MulFusion(); + $.activation_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MulFusion(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.MulGrad = class MulGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.MulGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.MulGrad(); + return $; + } +}; + +$root.mindspore.schema.Neg = class Neg { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Neg(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Neg(); + return $; + } +}; + +$root.mindspore.schema.NegGrad = class NegGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.NegGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.NegGrad(); + return $; + } +}; + +$root.mindspore.schema.NotEqual = class NotEqual { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.NotEqual(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.NotEqual(); + return $; + } +}; + +$root.mindspore.schema.NonMaxSuppression = class NonMaxSuppression { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.NonMaxSuppression(); + $.center_point_box = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.NonMaxSuppression(); + $.center_point_box = reader.value(json.center_point_box, 0); + return $; + } +}; + +$root.mindspore.schema.OneHot = class OneHot { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.OneHot(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.OneHot(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.OnesLike = class OnesLike { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.OnesLike(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.OnesLike(); + return $; + } +}; + +$root.mindspore.schema.PadFusion = class PadFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PadFusion(); + $.paddings = reader.table(position, 4, $root.mindspore.schema.Vec2D.decode); + $.padding_mode = reader.int8_(position, 6, 0); + $.constant_value = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PadFusion(); + $.paddings = reader.object(json.paddings, $root.mindspore.schema.Vec2D.decodeText); + $.padding_mode = $root.mindspore.schema.PaddingMode[json.padding_mode]; + $.constant_value = reader.value(json.constant_value, 0); + return $; + } +}; + +$root.mindspore.schema.PartialFusion = class PartialFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PartialFusion(); + $.sub_graph_index = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PartialFusion(); + $.sub_graph_index = reader.value(json.sub_graph_index, 0); + return $; + } +}; + +$root.mindspore.schema.PowerGrad = class PowerGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PowerGrad(); + $.power = reader.float32_(position, 4, 0); + $.scale = reader.float32_(position, 6, 0); + $.shift = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PowerGrad(); + $.power = reader.value(json.power, 0); + $.scale = reader.value(json.scale, 0); + $.shift = reader.value(json.shift, 0); + return $; + } +}; + +$root.mindspore.schema.PowFusion = class PowFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PowFusion(); + $.scale = reader.float32_(position, 4, 1); + $.shift = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PowFusion(); + $.scale = reader.value(json.scale, 1); + $.shift = reader.value(json.shift, 0); + return $; + } +}; + +$root.mindspore.schema.PriorBox = class PriorBox { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PriorBox(); + $.min_sizes = reader.int64s_(position, 4); + $.max_sizes = reader.int64s_(position, 6); + $.aspect_ratios = reader.typedArray(position, 8, Float32Array); + $.variances = reader.typedArray(position, 10, Float32Array); + $.image_size_w = reader.int64_(position, 12, 0); + $.image_size_h = reader.int64_(position, 14, 0); + $.step_w = reader.float32_(position, 16, 0); + $.step_h = reader.float32_(position, 18, 0); + $.clip = reader.bool_(position, 20, false); + $.flip = reader.bool_(position, 22, false); + $.offset = reader.float32_(position, 24, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PriorBox(); + $.min_sizes = reader.array(json.min_sizes); + $.max_sizes = reader.array(json.max_sizes); + $.aspect_ratios = reader.typedArray(json.aspect_ratios, Float32Array); + $.variances = reader.typedArray(json.variances, Float32Array); + $.image_size_w = reader.value(json.image_size_w, 0); + $.image_size_h = reader.value(json.image_size_h, 0); + $.step_w = reader.value(json.step_w, 0); + $.step_h = reader.value(json.step_h, 0); + $.clip = reader.value(json.clip, false); + $.flip = reader.value(json.flip, false); + $.offset = reader.value(json.offset, 0); + return $; + } +}; + +$root.mindspore.schema.PReLUFusion = class PReLUFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.PReLUFusion(); + $.channel_shared = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.PReLUFusion(); + $.channel_shared = reader.value(json.channel_shared, false); + return $; + } +}; + +$root.mindspore.schema.Rank = class Rank { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Rank(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Rank(); + return $; + } +}; + +$root.mindspore.schema.Range = class Range { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Range(); + $.d_type = reader.int64_(position, 4, 0); + $.start = reader.int64_(position, 6, 0); + $.limit = reader.int64_(position, 8, 0); + $.delta = reader.int64_(position, 10, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Range(); + $.d_type = reader.value(json.d_type, 0); + $.start = reader.value(json.start, 0); + $.limit = reader.value(json.limit, 0); + $.delta = reader.value(json.delta, 1); + return $; + } +}; + +$root.mindspore.schema.Reciprocal = class Reciprocal { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Reciprocal(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Reciprocal(); + return $; + } +}; + +$root.mindspore.schema.RealDiv = class RealDiv { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.RealDiv(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.RealDiv(); + return $; + } +}; + +$root.mindspore.schema.ReduceFusion = class ReduceFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ReduceFusion(); + $.keep_dims = reader.bool_(position, 4, false); + $.mode = reader.int8_(position, 6, 0); + $.reduce_to_end = reader.bool_(position, 8, false); + $.coeff = reader.float32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ReduceFusion(); + $.keep_dims = reader.value(json.keep_dims, false); + $.mode = $root.mindspore.schema.ReduceMode[json.mode]; + $.reduce_to_end = reader.value(json.reduce_to_end, false); + $.coeff = reader.value(json.coeff, 0); + return $; + } +}; + +$root.mindspore.schema.Reshape = class Reshape { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Reshape(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Reshape(); + return $; + } +}; + +$root.mindspore.schema.Resize = class Resize { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Resize(); + $.format = reader.int32_(position, 4, 0); + $.method = reader.int8_(position, 6, 0); + $.new_height = reader.int64_(position, 8, 0); + $.new_width = reader.int64_(position, 10, 0); + $.preserve_aspect_ratio = reader.bool_(position, 12, false); + $.coordinate_transform_mode = reader.int8_(position, 14, 0); + $.cubic_coeff = reader.float32_(position, 16, 0); + $.exclude_outside = reader.int64_(position, 18, 0); + $.extrapolation_value = reader.float32_(position, 20, 0); + $.nearest_mode = reader.int8_(position, 22, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Resize(); + $.format = $root.mindspore.schema.Format[json.format]; + $.method = $root.mindspore.schema.ResizeMethod[json.method]; + $.new_height = reader.value(json.new_height, 0); + $.new_width = reader.value(json.new_width, 0); + $.preserve_aspect_ratio = reader.value(json.preserve_aspect_ratio, false); + $.coordinate_transform_mode = $root.mindspore.schema.CoordinateTransformMode[json.coordinate_transform_mode]; + $.cubic_coeff = reader.value(json.cubic_coeff, 0); + $.exclude_outside = reader.value(json.exclude_outside, 0); + $.extrapolation_value = reader.value(json.extrapolation_value, 0); + $.nearest_mode = $root.mindspore.schema.NearestMode[json.nearest_mode]; + return $; + } +}; + +$root.mindspore.schema.ReverseSequence = class ReverseSequence { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ReverseSequence(); + $.seq_dim = reader.int64_(position, 4, 0); + $.batch_dim = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ReverseSequence(); + $.seq_dim = reader.value(json.seq_dim, 0); + $.batch_dim = reader.value(json.batch_dim, 0); + return $; + } +}; + +$root.mindspore.schema.ReverseV2 = class ReverseV2 { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ReverseV2(); + $.axis = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ReverseV2(); + $.axis = reader.array(json.axis); + return $; + } +}; + +$root.mindspore.schema.Rfft = class Rfft { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Rfft(); + $.fft_length = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Rfft(); + $.fft_length = reader.value(json.fft_length, 0); + return $; + } +}; + +$root.mindspore.schema.ROIPooling = class ROIPooling { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ROIPooling(); + $.pooled_h = reader.int64_(position, 4, 0); + $.pooled_w = reader.int64_(position, 6, 0); + $.scale = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ROIPooling(); + $.pooled_h = reader.value(json.pooled_h, 0); + $.pooled_w = reader.value(json.pooled_w, 0); + $.scale = reader.value(json.scale, 0); + return $; + } +}; + +$root.mindspore.schema.Round = class Round { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Round(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Round(); + return $; + } +}; + +$root.mindspore.schema.Rsqrt = class Rsqrt { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Rsqrt(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Rsqrt(); + return $; + } +}; + +$root.mindspore.schema.QuantDTypeCast = class QuantDTypeCast { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.QuantDTypeCast(); + $.src_t = reader.int64_(position, 4, 0); + $.dst_t = reader.int64_(position, 6, 0); + $.axis = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.QuantDTypeCast(); + $.src_t = reader.value(json.src_t, 0); + $.dst_t = reader.value(json.dst_t, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.ScaleFusion = class ScaleFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ScaleFusion(); + $.axis = reader.int64_(position, 4, 0); + $.activation_type = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ScaleFusion(); + $.axis = reader.value(json.axis, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.ScatterNd = class ScatterNd { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.ScatterNd(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.ScatterNd(); + return $; + } +}; + +$root.mindspore.schema.SGD = class SGD { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SGD(); + $.nesterov = reader.bool_(position, 4, false); + $.dampening = reader.float32_(position, 6, 0); + $.weight_decay = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SGD(); + $.nesterov = reader.value(json.nesterov, false); + $.dampening = reader.value(json.dampening, 0); + $.weight_decay = reader.value(json.weight_decay, 0); + return $; + } +}; + +$root.mindspore.schema.Shape = class Shape { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Shape(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Shape(); + return $; + } +}; + +$root.mindspore.schema.SigmoidCrossEntropyWithLogits = class SigmoidCrossEntropyWithLogits { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SigmoidCrossEntropyWithLogits(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SigmoidCrossEntropyWithLogits(); + return $; + } +}; + +$root.mindspore.schema.SigmoidCrossEntropyWithLogitsGrad = class SigmoidCrossEntropyWithLogitsGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SigmoidCrossEntropyWithLogitsGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SigmoidCrossEntropyWithLogitsGrad(); + return $; + } +}; + +$root.mindspore.schema.Sin = class Sin { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Sin(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Sin(); + return $; + } +}; + +$root.mindspore.schema.SkipGram = class SkipGram { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SkipGram(); + $.include_all_grams = reader.bool_(position, 4, false); + $.max_skip_size = reader.int64_(position, 6, 0); + $.ngram_size = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SkipGram(); + $.include_all_grams = reader.value(json.include_all_grams, false); + $.max_skip_size = reader.value(json.max_skip_size, 0); + $.ngram_size = reader.value(json.ngram_size, 0); + return $; + } +}; + +$root.mindspore.schema.SliceFusion = class SliceFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SliceFusion(); + $.axes = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SliceFusion(); + $.axes = reader.array(json.axes); + return $; + } +}; + +$root.mindspore.schema.SmoothL1Loss = class SmoothL1Loss { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SmoothL1Loss(); + $.beta = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SmoothL1Loss(); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.mindspore.schema.SmoothL1LossGrad = class SmoothL1LossGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SmoothL1LossGrad(); + $.beta = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SmoothL1LossGrad(); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.mindspore.schema.Softmax = class Softmax { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Softmax(); + $.axis = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Softmax(); + $.axis = reader.array(json.axis); + return $; + } +}; + +$root.mindspore.schema.SoftmaxCrossEntropyWithLogits = class SoftmaxCrossEntropyWithLogits { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SoftmaxCrossEntropyWithLogits(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SoftmaxCrossEntropyWithLogits(); + return $; + } +}; + +$root.mindspore.schema.SpaceToBatch = class SpaceToBatch { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SpaceToBatch(); + $.block_size = reader.int64s_(position, 4); + $.paddings = reader.table(position, 6, $root.mindspore.schema.Vec2D.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SpaceToBatch(); + $.block_size = reader.array(json.block_size); + $.paddings = reader.object(json.paddings, $root.mindspore.schema.Vec2D.decodeText); + return $; + } +}; + +$root.mindspore.schema.SpaceToBatchND = class SpaceToBatchND { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SpaceToBatchND(); + $.block_shape = reader.int64s_(position, 4); + $.paddings = reader.table(position, 6, $root.mindspore.schema.Vec2D.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SpaceToBatchND(); + $.block_shape = reader.array(json.block_shape); + $.paddings = reader.object(json.paddings, $root.mindspore.schema.Vec2D.decodeText); + return $; + } +}; + +$root.mindspore.schema.SpaceToDepth = class SpaceToDepth { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SpaceToDepth(); + $.block_size = reader.int64_(position, 4, 0); + $.format = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SpaceToDepth(); + $.block_size = reader.value(json.block_size, 0); + $.format = $root.mindspore.schema.Format[json.format]; + return $; + } +}; + +$root.mindspore.schema.SparseSoftmaxCrossEntropyWithLogits = class SparseSoftmaxCrossEntropyWithLogits { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SparseSoftmaxCrossEntropyWithLogits(); + $.is_grad = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SparseSoftmaxCrossEntropyWithLogits(); + $.is_grad = reader.value(json.is_grad, false); + return $; + } +}; + +$root.mindspore.schema.SparseToDense = class SparseToDense { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SparseToDense(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SparseToDense(); + return $; + } +}; + +$root.mindspore.schema.Split = class Split { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Split(); + $.output_num = reader.int64_(position, 4, 0); + $.size_splits = reader.int64s_(position, 6); + $.axis = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Split(); + $.output_num = reader.value(json.output_num, 0); + $.size_splits = reader.array(json.size_splits); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.Sqrt = class Sqrt { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Sqrt(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Sqrt(); + return $; + } +}; + +$root.mindspore.schema.Squeeze = class Squeeze { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Squeeze(); + $.axis = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Squeeze(); + $.axis = reader.array(json.axis); + return $; + } +}; + +$root.mindspore.schema.Square = class Square { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Square(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Square(); + return $; + } +}; + +$root.mindspore.schema.SquaredDifference = class SquaredDifference { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SquaredDifference(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SquaredDifference(); + return $; + } +}; + +$root.mindspore.schema.Stack = class Stack { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Stack(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Stack(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.StridedSlice = class StridedSlice { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.StridedSlice(); + $.begin_mask = reader.int64_(position, 4, 0); + $.end_mask = reader.int64_(position, 6, 0); + $.ellipsis_mask = reader.int64_(position, 8, 0); + $.new_axis_mask = reader.int64_(position, 10, 0); + $.shrink_axis_mask = reader.int64_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.StridedSlice(); + $.begin_mask = reader.value(json.begin_mask, 0); + $.end_mask = reader.value(json.end_mask, 0); + $.ellipsis_mask = reader.value(json.ellipsis_mask, 0); + $.new_axis_mask = reader.value(json.new_axis_mask, 0); + $.shrink_axis_mask = reader.value(json.shrink_axis_mask, 0); + return $; + } +}; + +$root.mindspore.schema.SubFusion = class SubFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SubFusion(); + $.activation_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SubFusion(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + return $; + } +}; + +$root.mindspore.schema.SubGrad = class SubGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SubGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SubGrad(); + return $; + } +}; + +$root.mindspore.schema.Switch = class Switch { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Switch(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Switch(); + return $; + } +}; + +$root.mindspore.schema.TensorListFromTensor = class TensorListFromTensor { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorListFromTensor(); + $.element_dtype = reader.int64_(position, 4, 0); + $.shape_type = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorListFromTensor(); + $.element_dtype = reader.value(json.element_dtype, 0); + $.shape_type = reader.value(json.shape_type, 0); + return $; + } +}; + +$root.mindspore.schema.TensorListGetItem = class TensorListGetItem { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorListGetItem(); + $.element_dtype = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorListGetItem(); + $.element_dtype = reader.value(json.element_dtype, 0); + return $; + } +}; + +$root.mindspore.schema.TensorListReserve = class TensorListReserve { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorListReserve(); + $.element_dtype = reader.int64_(position, 4, 0); + $.shape_type = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorListReserve(); + $.element_dtype = reader.value(json.element_dtype, 0); + $.shape_type = reader.value(json.shape_type, 0); + return $; + } +}; + +$root.mindspore.schema.TensorListSetItem = class TensorListSetItem { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorListSetItem(); + $.element_dtype = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorListSetItem(); + $.element_dtype = reader.value(json.element_dtype, 0); + return $; + } +}; + +$root.mindspore.schema.TensorListStack = class TensorListStack { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorListStack(); + $.num_elements = reader.int64_(position, 4, 0); + $.element_dtype = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorListStack(); + $.num_elements = reader.value(json.num_elements, 0); + $.element_dtype = reader.value(json.element_dtype, 0); + return $; + } +}; + +$root.mindspore.schema.TileFusion = class TileFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TileFusion(); + $.dims = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TileFusion(); + $.dims = reader.array(json.dims); + return $; + } +}; + +$root.mindspore.schema.TopKFusion = class TopKFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TopKFusion(); + $.sorted = reader.bool_(position, 4, true); + $.axis = reader.int64_(position, 6, 0); + $.largest = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TopKFusion(); + $.sorted = reader.value(json.sorted, true); + $.axis = reader.value(json.axis, 0); + $.largest = reader.value(json.largest, 0); + return $; + } +}; + +$root.mindspore.schema.Transpose = class Transpose { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Transpose(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Transpose(); + return $; + } +}; + +$root.mindspore.schema.Unique = class Unique { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Unique(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Unique(); + return $; + } +}; + +$root.mindspore.schema.UnsortedSegmentSum = class UnsortedSegmentSum { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.UnsortedSegmentSum(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.UnsortedSegmentSum(); + return $; + } +}; + +$root.mindspore.schema.Unsqueeze = class Unsqueeze { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Unsqueeze(); + $.axis = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Unsqueeze(); + $.axis = reader.array(json.axis); + return $; + } +}; + +$root.mindspore.schema.Unstack = class Unstack { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Unstack(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Unstack(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.Where = class Where { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Where(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Where(); + return $; + } +}; + +$root.mindspore.schema.ZerosLike = class ZerosLike { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.ZerosLike(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.ZerosLike(); + return $; + } +}; + +$root.mindspore.schema.Select = class Select { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Select(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Select(); + return $; + } +}; + +$root.mindspore.schema.GRU = class GRU { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.GRU(); + $.bidirectional = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.GRU(); + $.bidirectional = reader.value(json.bidirectional, false); + return $; + } +}; + +$root.mindspore.schema.NonZero = class NonZero { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.NonZero(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.NonZero(); + return $; + } +}; + +$root.mindspore.schema.InvertPermutation = class InvertPermutation { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.InvertPermutation(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.InvertPermutation(); + return $; + } +}; + +$root.mindspore.schema.Size = class Size { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Size(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Size(); + return $; + } +}; + +$root.mindspore.schema.RandomStandardNormal = class RandomStandardNormal { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.RandomStandardNormal(); + $.seed = reader.int64_(position, 4, 0); + $.seed2 = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.RandomStandardNormal(); + $.seed = reader.value(json.seed, 0); + $.seed2 = reader.value(json.seed2, 0); + return $; + } +}; + +$root.mindspore.schema.CropAndResize = class CropAndResize { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.CropAndResize(); + $.method = reader.int8_(position, 4, 0); + $.extrapolation_value = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.CropAndResize(); + $.method = $root.mindspore.schema.ResizeMethod[json.method]; + $.extrapolation_value = reader.value(json.extrapolation_value, 0); + return $; + } +}; + +$root.mindspore.schema.Erf = class Erf { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Erf(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Erf(); + return $; + } +}; + +$root.mindspore.schema.StridedSliceGrad = class StridedSliceGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.StridedSliceGrad(); + $.begin_mask = reader.int64_(position, 4, 0); + $.end_mask = reader.int64_(position, 6, 0); + $.ellipsis_mask = reader.int64_(position, 8, 0); + $.new_axis_mask = reader.int64_(position, 10, 0); + $.shrink_axis_mask = reader.int64_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.StridedSliceGrad(); + $.begin_mask = reader.value(json.begin_mask, 0); + $.end_mask = reader.value(json.end_mask, 0); + $.ellipsis_mask = reader.value(json.ellipsis_mask, 0); + $.new_axis_mask = reader.value(json.new_axis_mask, 0); + $.shrink_axis_mask = reader.value(json.shrink_axis_mask, 0); + return $; + } +}; + +$root.mindspore.schema.IsFinite = class IsFinite { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.IsFinite(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.IsFinite(); + return $; + } +}; + +$root.mindspore.schema.LinSpace = class LinSpace { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.LinSpace(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.LinSpace(); + return $; + } +}; + +$root.mindspore.schema.UniformReal = class UniformReal { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.UniformReal(); + $.seed = reader.int64_(position, 4, 0); + $.seed2 = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.UniformReal(); + $.seed = reader.value(json.seed, 0); + $.seed2 = reader.value(json.seed2, 0); + return $; + } +}; + +$root.mindspore.schema.AbsGrad = class AbsGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.AbsGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.AbsGrad(); + return $; + } +}; + +$root.mindspore.schema.RsqrtGrad = class RsqrtGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.RsqrtGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.RsqrtGrad(); + return $; + } +}; + +$root.mindspore.schema.SqrtGrad = class SqrtGrad { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SqrtGrad(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SqrtGrad(); + return $; + } +}; + +$root.mindspore.schema.LayerNormGrad = class LayerNormGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LayerNormGrad(); + $.begin_norm_axis = reader.int64_(position, 4, 0); + $.begin_params_axis = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LayerNormGrad(); + $.begin_norm_axis = reader.value(json.begin_norm_axis, 0); + $.begin_params_axis = reader.value(json.begin_params_axis, 0); + return $; + } +}; + +$root.mindspore.schema.ResizeGrad = class ResizeGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ResizeGrad(); + $.method = reader.int8_(position, 4, 0); + $.align_corners = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ResizeGrad(); + $.method = $root.mindspore.schema.ResizeMethod[json.method]; + $.align_corners = reader.value(json.align_corners, false); + return $; + } +}; + +$root.mindspore.schema.Splice = class Splice { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Splice(); + $.context = reader.int64s_(position, 4); + $.forward_indexes = reader.int64s_(position, 6); + $.output_dim = reader.int64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Splice(); + $.context = reader.array(json.context); + $.forward_indexes = reader.array(json.forward_indexes); + $.output_dim = reader.value(json.output_dim, 0); + return $; + } +}; + +$root.mindspore.schema.LogSoftmax = class LogSoftmax { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LogSoftmax(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LogSoftmax(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.Call = class Call { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Call(); + $.is_tail_call = reader.bool_(position, 4, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Call(); + $.is_tail_call = reader.value(json.is_tail_call, true); + return $; + } +}; + +$root.mindspore.schema.CumSum = class CumSum { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.CumSum(); + $.exclusive = reader.bool_(position, 4, false); + $.reverse = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.CumSum(); + $.exclusive = reader.value(json.exclusive, false); + $.reverse = reader.value(json.reverse, false); + return $; + } +}; + +$root.mindspore.schema.Custom = class Custom { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Custom(); + $.type = reader.string_(position, 4, null); + $.attr = reader.tableArray(position, 6, $root.mindspore.schema.Attribute.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Custom(); + $.type = reader.value(json.type, null); + $.attr = reader.objectArray(json.attr, $root.mindspore.schema.Attribute.decodeText); + return $; + } +}; + +$root.mindspore.schema.SplitWithOverlap = class SplitWithOverlap { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SplitWithOverlap(); + $.split_dim = reader.int64_(position, 4, 0); + $.number_split = reader.int64_(position, 6, 0); + $.ratio = reader.int64s_(position, 8); + $.extend_top = reader.int64s_(position, 10); + $.extend_bottom = reader.int64s_(position, 12); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SplitWithOverlap(); + $.split_dim = reader.value(json.split_dim, 0); + $.number_split = reader.value(json.number_split, 0); + $.ratio = reader.array(json.ratio); + $.extend_top = reader.array(json.extend_top); + $.extend_bottom = reader.array(json.extend_bottom); + return $; + } +}; + +$root.mindspore.schema.GenOP = class GenOP { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.GenOP(); + $.activation_type = reader.int8_(position, 4, 0); + $.alpha = reader.float32_(position, 6, 0); + $.min_val = reader.float32_(position, 8, 0); + $.max_val = reader.float32_(position, 10, 0); + $.is_training = reader.bool_(position, 12, false); + $.format = reader.int32_(position, 14, 0); + $.kernel_size = reader.int64s_(position, 16); + $.stride = reader.int64s_(position, 18); + $.dilation = reader.int64s_(position, 20); + $.pad_mode = reader.int8_(position, 22, 0); + $.pad_list = reader.int64s_(position, 24); + $.mode = reader.int64_(position, 26, 0); + $.group = reader.int64_(position, 28, 0); + $.in_channel = reader.int64_(position, 30, 0); + $.out_channel = reader.int64_(position, 32, 0); + $.eltwise_mode = reader.int8_(position, 34, 0); + $.has_bias = reader.bool_(position, 36, false); + $.use_axis = reader.bool_(position, 38, false); + $.axis = reader.int64_(position, 40, 0); + $.epsilon = reader.float32_(position, 42, 0.0001); + $.momentum = reader.float32_(position, 44, 0.9); + $.transpose_a = reader.bool_(position, 46, false); + $.transpose_b = reader.bool_(position, 48, false); + $.pad = reader.int64s_(position, 50); + $.round_mode = reader.int8_(position, 52, 0); + $.global = reader.bool_(position, 54, false); + $.channel_shared = reader.bool_(position, 56, false); + $.axes = reader.int64s_(position, 58); + $.keep_dims = reader.bool_(position, 60, false); + $.reduce_mode = reader.int8_(position, 62, 0); + $.reduce_to_end = reader.bool_(position, 64, false); + $.coeff = reader.float32_(position, 66, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.GenOP(); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + $.alpha = reader.value(json.alpha, 0); + $.min_val = reader.value(json.min_val, 0); + $.max_val = reader.value(json.max_val, 0); + $.is_training = reader.value(json.is_training, false); + $.format = $root.mindspore.schema.Format[json.format]; + $.kernel_size = reader.array(json.kernel_size); + $.stride = reader.array(json.stride); + $.dilation = reader.array(json.dilation); + $.pad_mode = $root.mindspore.schema.PadMode[json.pad_mode]; + $.pad_list = reader.array(json.pad_list); + $.mode = reader.value(json.mode, 0); + $.group = reader.value(json.group, 0); + $.in_channel = reader.value(json.in_channel, 0); + $.out_channel = reader.value(json.out_channel, 0); + $.eltwise_mode = $root.mindspore.schema.EltwiseMode[json.eltwise_mode]; + $.has_bias = reader.value(json.has_bias, false); + $.use_axis = reader.value(json.use_axis, false); + $.axis = reader.value(json.axis, 0); + $.epsilon = reader.value(json.epsilon, 0.0001); + $.momentum = reader.value(json.momentum, 0.9); + $.transpose_a = reader.value(json.transpose_a, false); + $.transpose_b = reader.value(json.transpose_b, false); + $.pad = reader.array(json.pad); + $.round_mode = $root.mindspore.schema.RoundMode[json.round_mode]; + $.global = reader.value(json.global, false); + $.channel_shared = reader.value(json.channel_shared, false); + $.axes = reader.array(json.axes); + $.keep_dims = reader.value(json.keep_dims, false); + $.reduce_mode = $root.mindspore.schema.ReduceMode[json.reduce_mode]; + $.reduce_to_end = reader.value(json.reduce_to_end, false); + $.coeff = reader.value(json.coeff, 0); + return $; + } +}; + +$root.mindspore.schema.RaggedRange = class RaggedRange { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.RaggedRange(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.RaggedRange(); + return $; + } +}; + +$root.mindspore.schema.GLU = class GLU { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.GLU(); + $.axis = reader.int64_(position, 4, -1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.GLU(); + $.axis = reader.value(json.axis, -1); + return $; + } +}; + +$root.mindspore.schema.TensorArray = class TensorArray { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.TensorArray(); + $.dynamic_size = reader.bool_(position, 4, false); + $.identical_element_shapes = reader.bool_(position, 6, false); + $.element_shape = reader.typedArray(position, 8, Int32Array); + $.data_type = reader.int32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.TensorArray(); + $.dynamic_size = reader.value(json.dynamic_size, false); + $.identical_element_shapes = reader.value(json.identical_element_shapes, false); + $.element_shape = reader.typedArray(json.element_shape, Int32Array); + $.data_type = reader.value(json.data_type, 0); + return $; + } +}; + +$root.mindspore.schema.TensorArrayRead = class TensorArrayRead { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.TensorArrayRead(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.TensorArrayRead(); + return $; + } +}; + +$root.mindspore.schema.TensorArrayWrite = class TensorArrayWrite { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.TensorArrayWrite(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.TensorArrayWrite(); + return $; + } +}; + +$root.mindspore.schema.Affine = class Affine { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Affine(); + $.context = reader.int64s_(position, 4); + $.output_dim = reader.int64_(position, 6, 0); + $.activation_type = reader.int8_(position, 8, 0); + $.transpose_a = reader.bool_(position, 10, false); + $.transpose_b = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Affine(); + $.context = reader.array(json.context); + $.output_dim = reader.value(json.output_dim, 0); + $.activation_type = $root.mindspore.schema.ActivationType[json.activation_type]; + $.transpose_a = reader.value(json.transpose_a, false); + $.transpose_b = reader.value(json.transpose_b, false); + return $; + } +}; + +$root.mindspore.schema.ScatterNdUpdate = class ScatterNdUpdate { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.ScatterNdUpdate(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.ScatterNdUpdate(); + return $; + } +}; + +$root.mindspore.schema.AllGather = class AllGather { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AllGather(); + $.group = reader.string_(position, 4, null); + $.rank_size = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AllGather(); + $.group = reader.value(json.group, null); + $.rank_size = reader.value(json.rank_size, 0); + return $; + } +}; + +$root.mindspore.schema.ReduceScatter = class ReduceScatter { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ReduceScatter(); + $.group = reader.string_(position, 4, null); + $.mode = reader.int8_(position, 6, 0); + $.rank_size = reader.int32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ReduceScatter(); + $.group = reader.value(json.group, null); + $.mode = $root.mindspore.schema.ReduceMode[json.mode]; + $.rank_size = reader.value(json.rank_size, 0); + return $; + } +}; + +$root.mindspore.schema.DynamicQuant = class DynamicQuant { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.DynamicQuant(); + $.symmetric = reader.bool_(position, 4, false); + $.dst_type = reader.int64_(position, 6, 32); + $.activation_channel = reader.bool_(position, 8, false); + $.prefer_axis = reader.int64_(position, 10, 0); + $.transpose = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.DynamicQuant(); + $.symmetric = reader.value(json.symmetric, false); + $.dst_type = reader.value(json.dst_type, 32); + $.activation_channel = reader.value(json.activation_channel, false); + $.prefer_axis = reader.value(json.prefer_axis, 0); + $.transpose = reader.value(json.transpose, false); + return $; + } +}; + +$root.mindspore.schema.LSTMGradData = class LSTMGradData { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LSTMGradData(); + $.bidirectional = reader.bool_(position, 4, false); + $.has_bias = reader.bool_(position, 6, false); + $.input_size = reader.int64_(position, 8, 0); + $.hidden_size = reader.int64_(position, 10, 0); + $.num_layers = reader.int64_(position, 12, 0); + $.num_directions = reader.int64_(position, 14, 0); + $.dropout = reader.float32_(position, 16, 0); + $.zoneout_cell = reader.float32_(position, 18, 0); + $.zoneout_hidden = reader.float32_(position, 20, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LSTMGradData(); + $.bidirectional = reader.value(json.bidirectional, false); + $.has_bias = reader.value(json.has_bias, false); + $.input_size = reader.value(json.input_size, 0); + $.hidden_size = reader.value(json.hidden_size, 0); + $.num_layers = reader.value(json.num_layers, 0); + $.num_directions = reader.value(json.num_directions, 0); + $.dropout = reader.value(json.dropout, 0); + $.zoneout_cell = reader.value(json.zoneout_cell, 0); + $.zoneout_hidden = reader.value(json.zoneout_hidden, 0); + return $; + } +}; + +$root.mindspore.schema.LSTMGradWeight = class LSTMGradWeight { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.LSTMGradWeight(); + $.bidirectional = reader.bool_(position, 4, false); + $.has_bias = reader.bool_(position, 6, false); + $.input_size = reader.int64_(position, 8, 0); + $.hidden_size = reader.int64_(position, 10, 0); + $.num_layers = reader.int64_(position, 12, 0); + $.num_directions = reader.int64_(position, 14, 0); + $.dropout = reader.float32_(position, 16, 0); + $.zoneout_cell = reader.float32_(position, 18, 0); + $.zoneout_hidden = reader.float32_(position, 20, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.LSTMGradWeight(); + $.bidirectional = reader.value(json.bidirectional, false); + $.has_bias = reader.value(json.has_bias, false); + $.input_size = reader.value(json.input_size, 0); + $.hidden_size = reader.value(json.hidden_size, 0); + $.num_layers = reader.value(json.num_layers, 0); + $.num_directions = reader.value(json.num_directions, 0); + $.dropout = reader.value(json.dropout, 0); + $.zoneout_cell = reader.value(json.zoneout_cell, 0); + $.zoneout_hidden = reader.value(json.zoneout_hidden, 0); + return $; + } +}; + +$root.mindspore.schema.RandomNormal = class RandomNormal { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.RandomNormal(); + $.seed = reader.float32_(position, 4, 0); + $.mean = reader.float32_(position, 6, 0); + $.scale = reader.float32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.RandomNormal(); + $.seed = reader.value(json.seed, 0); + $.mean = reader.value(json.mean, 0); + $.scale = reader.value(json.scale, 0); + return $; + } +}; + +$root.mindspore.schema.NLLLoss = class NLLLoss { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.NLLLoss(); + $.reduction = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.NLLLoss(); + $.reduction = $root.mindspore.schema.Reduction[json.reduction]; + return $; + } +}; + +$root.mindspore.schema.NLLLossGrad = class NLLLossGrad { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.NLLLossGrad(); + $.reduction = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.NLLLossGrad(); + $.reduction = $root.mindspore.schema.Reduction[json.reduction]; + return $; + } +}; + +$root.mindspore.schema.FormatTranspose = class FormatTranspose { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.FormatTranspose(); + $.src_format = reader.int32_(position, 4, 1); + $.dst_format = reader.int32_(position, 6, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.FormatTranspose(); + $.src_format = $root.mindspore.schema.Format[json.src_format]; + $.dst_format = $root.mindspore.schema.Format[json.dst_format]; + return $; + } +}; + +$root.mindspore.schema.GatherD = class GatherD { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.GatherD(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.GatherD(); + return $; + } +}; + +$root.mindspore.schema.GroupNormFusion = class GroupNormFusion { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.GroupNormFusion(); + $.num_groups = reader.int64_(position, 4, 0); + $.epsilon = reader.float32_(position, 6, 0.00001); + $.affine = reader.bool_(position, 8, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.GroupNormFusion(); + $.num_groups = reader.value(json.num_groups, 0); + $.epsilon = reader.value(json.epsilon, 0.00001); + $.affine = reader.value(json.affine, true); + return $; + } +}; + +$root.mindspore.schema.Log1p = class Log1p { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Log1p(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Log1p(); + return $; + } +}; + +$root.mindspore.schema.TensorScatterAdd = class TensorScatterAdd { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.TensorScatterAdd(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.TensorScatterAdd(); + return $; + } +}; + +$root.mindspore.schema.SparseFillEmptyRows = class SparseFillEmptyRows { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SparseFillEmptyRows(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SparseFillEmptyRows(); + return $; + } +}; + +$root.mindspore.schema.SparseReshape = class SparseReshape { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SparseReshape(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SparseReshape(); + return $; + } +}; + +$root.mindspore.schema.SparseSegmentSum = class SparseSegmentSum { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.SparseSegmentSum(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.SparseSegmentSum(); + return $; + } +}; + +$root.mindspore.schema.ScatterElements = class ScatterElements { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ScatterElements(); + $.axis = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ScatterElements(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.mindspore.schema.Triu = class Triu { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Triu(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Triu(); + return $; + } +}; + +$root.mindspore.schema.Tril = class Tril { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.Tril(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.Tril(); + return $; + } +}; + +$root.mindspore.schema.AdamWeightDecay = class AdamWeightDecay { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.AdamWeightDecay(); + $.use_locking = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.AdamWeightDecay(); + $.use_locking = reader.value(json.use_locking, false); + return $; + } +}; + +$root.mindspore.schema.FillV2 = class FillV2 { + + static decode(/* reader, position */) { + const $ = new $root.mindspore.schema.FillV2(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.mindspore.schema.FillV2(); + return $; + } +}; + +$root.mindspore.schema.QuantParam = class QuantParam { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.QuantParam(); + $.scale = reader.float64_(position, 4, 1); + $.zeroPoint = reader.int32_(position, 6, 0); + $.min = reader.float64_(position, 8, 0); + $.max = reader.float64_(position, 10, 0); + $.narrowRange = reader.bool_(position, 12, true); + $.numBits = reader.int32_(position, 14, 8); + $.inited = reader.bool_(position, 16, false); + $.varCorr = reader.float32_(position, 18, 1); + $.meanCorr = reader.float32_(position, 20, 0); + $.dstDtype = reader.int32_(position, 22, 32); + $.roundType = reader.int32_(position, 24, 1); + $.multiplier = reader.int32_(position, 26, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.QuantParam(); + $.scale = reader.value(json.scale, 1); + $.zeroPoint = reader.value(json.zeroPoint, 0); + $.min = reader.value(json.min, 0); + $.max = reader.value(json.max, 0); + $.narrowRange = reader.value(json.narrowRange, true); + $.numBits = reader.value(json.numBits, 8); + $.inited = reader.value(json.inited, false); + $.varCorr = reader.value(json.varCorr, 1); + $.meanCorr = reader.value(json.meanCorr, 0); + $.dstDtype = reader.value(json.dstDtype, 32); + $.roundType = reader.value(json.roundType, 1); + $.multiplier = reader.value(json.multiplier, 1); + return $; + } +}; + +$root.mindspore.schema.WeightQuantCompressType = { + NONE: 0, + INDEXING: 1, + SPARSE: 2, + FSE: 3, + BITPACKING: 4, + FSE_INT: 5, + FSE_INFER: 6 +}; + +$root.mindspore.schema.ExternalData = class ExternalData { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.ExternalData(); + $.checkSum = reader.string_(position, 4, null); + $.location = reader.string_(position, 6, null); + $.offset = reader.int64_(position, 8, 0); + $.length = reader.int64_(position, 10, -1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.ExternalData(); + $.checkSum = reader.value(json.checkSum, null); + $.location = reader.value(json.location, null); + $.offset = reader.value(json.offset, 0); + $.length = reader.value(json.length, -1); + return $; + } +}; + +$root.mindspore.schema.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Tensor(); + $.nodeType = reader.int32_(position, 4, 0); + $.dataType = reader.int32_(position, 6, 0); + $.dims = reader.typedArray(position, 8, Int32Array); + $.format = reader.int32_(position, 10, 0); + $.refCount = reader.int32_(position, 12, 0); + $.offset = reader.int32_(position, 14, 0); + $.data = reader.typedArray(position, 16, Uint8Array); + $.quantParams = reader.tableArray(position, 18, $root.mindspore.schema.QuantParam.decode); + $.quantClusters = reader.typedArray(position, 20, Float32Array); + $.name = reader.string_(position, 22, null); + $.enableHuffmanCode = reader.bool_(position, 24, false); + $.weightQuantCompressType = reader.int32_(position, 26, 0); + $.externalData = reader.tableArray(position, 28, $root.mindspore.schema.ExternalData.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Tensor(); + $.nodeType = reader.value(json.nodeType, 0); + $.dataType = reader.value(json.dataType, 0); + $.dims = reader.typedArray(json.dims, Int32Array); + $.format = $root.mindspore.schema.Format[json.format]; + $.refCount = reader.value(json.refCount, 0); + $.offset = reader.value(json.offset, 0); + $.data = reader.typedArray(json.data, Uint8Array); + $.quantParams = reader.objectArray(json.quantParams, $root.mindspore.schema.QuantParam.decodeText); + $.quantClusters = reader.typedArray(json.quantClusters, Float32Array); + $.name = reader.value(json.name, null); + $.enableHuffmanCode = reader.value(json.enableHuffmanCode, false); + $.weightQuantCompressType = $root.mindspore.schema.WeightQuantCompressType[json.weightQuantCompressType]; + $.externalData = reader.objectArray(json.externalData, $root.mindspore.schema.ExternalData.decodeText); + return $; + } +}; + +$root.mindspore.schema.QuantType = { + QUANT_NONE: 0, + AwareTraining: 1, + WeightQuant: 2, + PostTraining: 3, + QUANT_WEIGHT: 4, + QUANT_ALL: 5, + QUANT_DYNAMIC: 6 +}; + +$root.mindspore.schema.Primitive = class Primitive { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.Primitive(); + $.value = reader.union(position, 4, $root.mindspore.schema.PrimitiveType.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.Primitive(); + $.value = $root.mindspore.schema.PrimitiveType.decodeText(reader, json.value, json.value_type); + return $; + } +}; + +$root.mindspore.schema.CNode = class CNode { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.CNode(); + $.name = reader.string_(position, 4, null); + $.nodeType = reader.int32_(position, 6, 0); + $.primitive = reader.table(position, 8, $root.mindspore.schema.Primitive.decode); + $.inputIndex = reader.typedArray(position, 10, Uint32Array); + $.outputIndex = reader.typedArray(position, 12, Uint32Array); + $.quantType = reader.int32_(position, 14, 0); + $.deviceType = reader.int32_(position, 16, -1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.CNode(); + $.name = reader.value(json.name, null); + $.nodeType = reader.value(json.nodeType, 0); + $.primitive = reader.object(json.primitive, $root.mindspore.schema.Primitive.decodeText); + $.inputIndex = reader.typedArray(json.inputIndex, Uint32Array); + $.outputIndex = reader.typedArray(json.outputIndex, Uint32Array); + $.quantType = $root.mindspore.schema.QuantType[json.quantType]; + $.deviceType = reader.value(json.deviceType, -1); + return $; + } +}; + +$root.mindspore.schema.SubGraph = class SubGraph { + + static decode(reader, position) { + const $ = new $root.mindspore.schema.SubGraph(); + $.name = reader.string_(position, 4, null); + $.inputIndices = reader.typedArray(position, 6, Uint32Array); + $.outputIndices = reader.typedArray(position, 8, Uint32Array); + $.nodeIndices = reader.typedArray(position, 10, Uint32Array); + $.tensorIndices = reader.typedArray(position, 12, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.SubGraph(); + $.name = reader.value(json.name, null); + $.inputIndices = reader.typedArray(json.inputIndices, Uint32Array); + $.outputIndices = reader.typedArray(json.outputIndices, Uint32Array); + $.nodeIndices = reader.typedArray(json.nodeIndices, Uint32Array); + $.tensorIndices = reader.typedArray(json.tensorIndices, Uint32Array); + return $; + } +}; + +$root.mindspore.schema.MetaGraph = class MetaGraph { + + static identifier(reader) { + return reader.identifier === 'MSL2'; + } + + static create(reader) { + return $root.mindspore.schema.MetaGraph.decode(reader, reader.root); + } + + static createText(reader) { + return $root.mindspore.schema.MetaGraph.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.mindspore.schema.MetaGraph(); + $.name = reader.string_(position, 4, null); + $.version = reader.string_(position, 6, null); + $.fmkType = reader.int32_(position, 8, 0); + $.inputIndex = reader.typedArray(position, 10, Uint32Array); + $.outputIndex = reader.typedArray(position, 12, Uint32Array); + $.mempoolSize = reader.uint32_(position, 14, 0); + $.nodes = reader.tableArray(position, 16, $root.mindspore.schema.CNode.decode); + $.allTensors = reader.tableArray(position, 18, $root.mindspore.schema.Tensor.decode); + $.subGraph = reader.tableArray(position, 20, $root.mindspore.schema.SubGraph.decode); + $.obfuscate = reader.bool_(position, 22, false); + $.obfMetaData = reader.typedArray(position, 24, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.mindspore.schema.MetaGraph(); + $.name = reader.value(json.name, null); + $.version = reader.value(json.version, null); + $.fmkType = reader.value(json.fmkType, 0); + $.inputIndex = reader.typedArray(json.inputIndex, Uint32Array); + $.outputIndex = reader.typedArray(json.outputIndex, Uint32Array); + $.mempoolSize = reader.value(json.mempoolSize, 0); + $.nodes = reader.objectArray(json.nodes, $root.mindspore.schema.CNode.decodeText); + $.allTensors = reader.objectArray(json.allTensors, $root.mindspore.schema.Tensor.decodeText); + $.subGraph = reader.objectArray(json.subGraph, $root.mindspore.schema.SubGraph.decodeText); + $.obfuscate = reader.value(json.obfuscate, false); + $.obfMetaData = reader.typedArray(json.obfMetaData, Uint8Array); + return $; + } +}; diff --git a/mslite.js b/mslite.js new file mode 100644 index 00000000000..946fa2a138f --- /dev/null +++ b/mslite.js @@ -0,0 +1,354 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const mslite = {}; + +mslite.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream && stream.length >= 8) { + const buffer = stream.peek(8); + const reader = flatbuffers.BinaryReader.open(buffer); + if (reader.identifier === '' || reader.identifier === 'MSL1' || reader.identifier === 'MSL2') { + return 'mslite'; + } + } + return ''; + } + + async open(context) { + await context.require('./mslite-schema'); + const stream = context.stream; + const reader = flatbuffers.BinaryReader.open(stream); + switch (reader.identifier) { + case '': { + throw new mslite.Error('MSL0 format is deprecated.'); + } + case 'MSL1': { + throw new mslite.Error('MSL1 format is deprecated.'); + } + case 'MSL2': + break; + default: + throw new mslite.Error(`Unsupported file identifier '${reader.identifier}'.`); + } + let model = null; + try { + mslite.schema = flatbuffers.get('mslite').mindspore.schema; + model = mslite.schema.MetaGraph.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mslite.Error(`File format is not mslite.MetaGraph (${message.replace(/\.$/, '')}).`); + } + const metadata = await context.metadata('mslite-metadata.json'); + return new mslite.Model(metadata, model); + } +}; + +mslite.Model = class { + + constructor(metadata, model) { + this.name = model.name || ''; + this.graphs = []; + const version = model.version ? model.version.match(/^.*(\d\.\d\.\d)$/) : null; + this.format = `MindSpore Lite${version ? ` v${version[1]}` : ''}`; + const subgraphs = model.subGraph; + if (!Array.isArray(subgraphs)) { + this.graphs.push(new mslite.Graph(metadata, model, model)); + } else { + for (const subgraph of subgraphs) { + this.graphs.push(new mslite.Graph(metadata, subgraph, model)); + } + } + } +}; + +mslite.Graph = class { + + constructor(metadata, subgraph, model) { + this.name = subgraph.name || ''; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = model.allTensors.map((tensor, index) => { + const name = tensor.name || index.toString(); + const data = tensor.data; + const type = new mslite.TensorType(tensor.dataType, tensor.dims); + const initializer = (data && data.length > 0) ? new mslite.Tensor(type, tensor.data) : null; + return new mslite.Value(name, tensor, initializer); + }); + if (subgraph === model) { + for (let i = 0; i < subgraph.inputIndex.length; i++) { + const index = subgraph.inputIndex[i]; + this.inputs.push(new mslite.Argument(i.toString(), [ values[index] ])); + } + for (let i = 0; i < subgraph.outputIndex.length; i++) { + const index = subgraph.outputIndex[i]; + this.outputs.push(new mslite.Argument(i.toString(), [ values[index] ])); + } + for (let i = 0; i < subgraph.nodes.length; i++) { + this.nodes.push(new mslite.Node(metadata, subgraph.nodes[i], values)); + } + } else { + for (let i = 0; i < subgraph.inputIndices.length; i++) { + const index = subgraph.inputIndices[i]; + this.inputs.push(new mslite.Argument(i.toString(), [ values[index] ])); + } + for (let i = 0; i < subgraph.outputIndices.length; i++) { + const index = subgraph.outputIndices[i]; + this.outputs.push(new mslite.Argument(i.toString(), [ values[index] ])); + } + for (const name of subgraph.nodeIndices) { + const node = new mslite.Node(metadata, model.nodes[name], values); + this.nodes.push(node); + } + } + } +}; + +mslite.Node = class { + + constructor(metadata, op, values) { + this.name = op.name || ''; + this.type = { name: '?' }; + this.attributes = []; + this.inputs = []; + this.outputs = []; + const data = op.primitive.value; + if (data && data.constructor) { + const type = data.constructor.name; + this.type = metadata.type(type); + this.attributes = Object.keys(data).map((key) => new mslite.Attribute(metadata.attribute(type, key), key.toString(), data[key])); + } + + const input_num = op.inputIndex.length; + let i = 0; + if (this.type && this.type.inputs) { + for (const input of this.type.inputs) { + if (i >= input_num) { + break; + } + const index = op.inputIndex[i]; + this.inputs.push(new mslite.Argument(input.name, [ values[index] ])); + i += 1; + } + } + for (let j = i; j < input_num; j++) { + const index = op.inputIndex[j]; + this.inputs.push(new mslite.Argument(j.toString(), [ values[index] ])); + } + + const output_num = op.outputIndex.length; + i = 0; + if (this.type && this.type.outputs) { + for (const output of this.type.outputs) { + if (i >= output_num) { + break; + } + const index = op.outputIndex[i]; + const argument = new mslite.Argument(output.name, [ values[index] ]); + this.outputs.push(argument); + i += 1; + } + } + for (let j = i; j < output_num; j++) { + const index = op.outputIndex[j]; + const argument = new mslite.Argument(j.toString(), [ values[index] ]); + this.outputs.push(argument); + } + } +}; + +mslite.Attribute = class { + + constructor(metadata, name, value) { + this.type = null; + this.name = name; + this.visible = false; + this.value = ArrayBuffer.isView(value) ? Array.from(value) : value; + if (metadata && metadata.type) { + this.type = metadata.type; + if (this.type) { + this.value = mslite.Utility.enum(this.type, this.value); + } + } + } +}; + +mslite.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +mslite.Value = class { + + constructor(name, tensor, initializer) { + this.name = name; + this.type = initializer ? initializer.type : new mslite.TensorType(tensor.dataType, tensor.dims); + this.initializer = initializer || null; + if (tensor.quantParams) { + const list = []; + for (let i = 0; i < tensor.quantParams.length; i++) { + const param = tensor.quantParams[i]; + if (param.scale !== 0 || param.zeroPoint !== 0) { + const scale = param.scale; + const zeroPoint = param.zeroPoint; + let quantization = ''; + if (scale !== 1) { + quantization += `${scale} * `; + } + if (zeroPoint === 0) { + quantization += 'q'; + } else if (zeroPoint < 0) { + quantization += `(q + ${-zeroPoint})`; + } else if (zeroPoint > 0) { + quantization += `(q - ${zeroPoint})`; + } + list.push(quantization); + } + } + if (list.length > 0 && !list.every((value) => value === 'q')) { + this.quantization = list.length === 1 ? list[0] : list; + } + } + } +}; + +mslite.Tensor = class { + + constructor(type, data) { + this.type = type; + this.encoding = type.dataType === 'string' ? '|' : '<'; + this._data = data || null; + } + + get values() { + switch (this.type.dataType) { + case 'string': { + let offset = 0; + const data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + const count = data.getInt32(0, true); + offset += 4; + const offsetTable = []; + for (let j = 0; j < count; j++) { + offsetTable.push(data.getInt32(offset, true)); + offset += 4; + } + offsetTable.push(this._data.length); + const stringTable = []; + const utf8Decoder = new TextDecoder('utf-8'); + for (let k = 0; k < count; k++) { + const textArray = this._data.subarray(offsetTable[k], offsetTable[k + 1]); + stringTable.push(utf8Decoder.decode(textArray)); + } + return stringTable; + } + default: return this._data; + } + } +}; + +mslite.TensorType = class { + + constructor(dataType, dimensions) { + switch (dataType) { + case 0: this.dataType = "?"; break; + case 1: this.dataType = "type"; break; + case 2: this.dataType = "any"; break; + case 3: this.dataType = "object"; break; + case 4: this.dataType = "typetype"; break; + case 5: this.dataType = "problem"; break; + case 6: this.dataType = "external"; break; + case 7: this.dataType = "none"; break; + case 8: this.dataType = "null"; break; + case 9: this.dataType = "ellipsis"; break; + case 11: this.dataType = "number"; break; + case 12: this.dataType = "string"; break; + case 13: this.dataType = "list"; break; + case 14: this.dataType = "tuple"; break; + case 15: this.dataType = "slice"; break; + case 16: this.dataType = "keyword"; break; + case 17: this.dataType = "tensortype"; break; + case 18: this.dataType = "rowtensortype"; break; + case 19: this.dataType = "sparsetensortype"; break; + case 20: this.dataType = "undeterminedtype"; break; + case 21: this.dataType = "class"; break; + case 22: this.dataType = "dictionary"; break; + case 23: this.dataType = "function"; break; + case 24: this.dataType = "jtagged"; break; + case 25: this.dataType = "symbolickeytype"; break; + case 26: this.dataType = "envtype"; break; + case 27: this.dataType = "refkey"; break; + case 28: this.dataType = "ref"; break; + case 30: this.dataType = "boolean"; break; + case 31: this.dataType = "int"; break; + case 32: this.dataType = "int8"; break; + case 33: this.dataType = "int16"; break; + case 34: this.dataType = "int32"; break; + case 35: this.dataType = "int64"; break; + case 36: this.dataType = "uint"; break; + case 37: this.dataType = "uint8"; break; + case 38: this.dataType = "uint16"; break; + case 39: this.dataType = "uint32"; break; + case 40: this.dataType = "uint64"; break; + case 41: this.dataType = "float"; break; + case 42: this.dataType = "float16"; break; + case 43: this.dataType = "float32"; break; + case 44: this.dataType = "float64"; break; + case 45: this.dataType = "complex64"; break; + default: throw new mslite.Error(`Unsupported data type '${dataType}'.`); + } + this.shape = new mslite.TensorShape(Array.from(dimensions)); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +mslite.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`; + } + return ''; + } +}; + +mslite.Utility = class { + + static enum(name, value) { + const type = name && mslite.schema ? mslite.schema[name] : undefined; + if (type) { + mslite.Utility._enumKeyMap = mslite.Utility._enumKeyMap || new Map(); + if (!mslite.Utility._enumKeyMap.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + mslite.Utility._enumKeyMap.set(name, entries); + } + const map = mslite.Utility._enumKeyMap.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } +}; + +mslite.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MindSpore Lite model.'; + } +}; + +export const ModelFactory = mslite.ModelFactory; diff --git a/mxnet-metadata.json b/mxnet-metadata.json new file mode 100644 index 00000000000..13b499b379a --- /dev/null +++ b/mxnet-metadata.json @@ -0,0 +1,587 @@ +[ + { + "name": "_copy", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_linalg_gemm2", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_minus_scalar", + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_minus_scalar", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_mul", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "_mul_scalar", + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_mul_scalar", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_Plus", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_plus_scalar", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_rminus_scalar", + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "_sub", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "_zeros", + "category": "Constant", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Activation", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "attributes": [ + { "type": "int32", "default": 1, "name": "axis" }, + { "type": "float64", "default": 0.001, "name": "eps" }, + { "type": "float32", "default": 0.9, "name": "momentum" }, + { "type": "boolean", "default": true, "name": "fix_gamma" }, + { "type": "boolean", "default": false, "name": "use_global_stats" } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "broadcast_add", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "broadcast_div", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "broadcast_mul", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "attributes": [ + { "default": "1", "name": "dim" }, + { "visible": false, "name": "num_args" } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "attributes": [ + { "default": false, "name": "cudnn_off", "type": "boolean" }, + { "default": "off", "name": "cudnn_tune" }, + { "default": [ 1, null ], "name": "dilate", "type": "int32[]" }, + { "name": "kernel", "type": "int32[]" }, + { "visible": false, "name": "no_bias", "type": "boolean" }, + { "type": "int32", "default": 1, "name": "num_group" }, + { "type": "int32", "name": "num_filter" }, + { "default": [ 0, null ], "name": "pad", "type": "int32[]" }, + { "default": [ 1, null ], "name": "stride", "type": "int32[]" }, + { "type": "int32", "default": "1024", "name": "workspace" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias", "option": "optional" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "CuDNNBatchNorm", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Deconvolution", + "category": "Layer", + "attributes": [ + { "visible": false, "name": "no_bias" }, + { "default": "1", "name": "num_group" }, + { "type": "int32", "default": "1024", "name": "workspace" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Dropout", + "category": "Dropout", + "attributes": [ + { "type": "float32", "default": 0.5, "name": "p" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ElementWiseSum", + "category": "Normalization", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "elemwise_add", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "elemwise_div", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "elemwise_sub", + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + }, + { + "name": "Embedding", + "category": "Transform", + "attributes": [ + { "type": "int32", "name": "input_dim" }, + { "type": "int32", "name": "output_dim" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Flatten", + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "FullyConnected", + "category": "Layer", + "attributes": [ + { "type": "boolean", "default": true, "name": "flatten" }, + { "type": "boolean", "visible": false, "name": "no_bias" }, + { "type": "int32", "name": "num_hidden" } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LayerNorm", + "category": "Normalization", + "attributes": [ + { "type": "int32", "default": 1, "name": "axis" }, + { "type": "float64", "default": 0.001, "name": "eps" }, + { "type": "float32", "default": 0.9, "name": "momentum" }, + { "type": "boolean", "default": true, "name": "fix_gamma" }, + { "type": "boolean", "default": false, "name": "use_global_stats" } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LeakyReLU", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "log", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LRN", + "category": "Normalization", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 }, + { "name": "knorm", "type": "float32", "default": 2 }, + { "name": "nsize", "type": "int32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MakeLoss", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "mean", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "attributes": [ + { "default": false, "name": "cudnn_off" }, + { "default": false, "name": "global_pool" }, + { "name": "kernel", "type": "int32[]" }, + { "default": [ 0, null ], "name": "pad", "type": "int32[]" }, + { "default": "valid", "name": "pooling_convention" }, + { "default": "max", "name": "pool_type" }, + { "default": [ 1, null ], "name": "stride", "type": "int32[]" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "relu", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "RNN", + "category": "Layer", + "attributes": [ + { "type": "boolean", "name": "bidirectional", "default": false }, + { "name": "lstm_parameters", "visible": false }, + { "type": "int32", "name": "num_layers" }, + { "type": "boolean", "default": false, "name": "state_outputs" }, + { "type": "int32", "name": "state_size" }, + { "type": "float32", "name": "p", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "state_0" }, + { "name": "state_1" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "slice_axis", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SliceChannel", + "inputs": [ + { "name": "inputs" } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + }, + { + "name": "softmax", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SoftmaxActivation", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SoftmaxOutput", + "category": "Activation", + "attributes": [ + { "default": "1", "name": "grad_scale" }, + { "default": "-1", "name": "ignore_label" }, + { "default": false, "name": "multi_output" }, + { "default": "null", "name": "normalization" }, + { "default": false, "name": "out_grad" }, + { "default": "0", "name": "smooth_alpha" }, + { "default": false, "name": "use_ignore" }, + { "default": false, "name": "preserve_shape" } + ], + "inputs": [ + { "name": "input" }, + { "name": "label" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "sqrt", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "square", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "sum", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "transpose", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } +] \ No newline at end of file diff --git a/mxnet.js b/mxnet.js new file mode 100644 index 00000000000..c4b8462920d --- /dev/null +++ b/mxnet.js @@ -0,0 +1,950 @@ + +import * as base from './base.js'; +import * as json from './json.js'; + +const mxnet = {}; + +mxnet.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'json') { + const obj = context.peek('json'); + if (obj && obj.nodes && obj.arg_nodes && obj.heads) { + return { name: 'mxnet.json', value: obj }; + } + } + if (extension === 'params') { + const stream = context.stream; + const signature = [ 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]; + if (stream && stream.length > signature.length && stream.peek(signature.length).every((value, index) => value == signature[index])) { + return { name: 'mxnet.params', value: stream }; + } + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('mxnet-metadata.json'); + const basename = (base, identifier, extension, suffix, append) => { + if (!base) { + if (identifier.toLowerCase().endsWith(extension)) { + const items = identifier.substring(0, identifier.length - extension.length).split('-'); + if (items.length >= 2) { + const token = items.pop(); + if ((suffix && token === suffix) || /[a-zA-Z0-9]*/.exec(token)) { + return items.join('-') + append; + } + } + } + } + return base; + }; + const convertVersion = (value) => { + if (Array.isArray(value)) { + if (value.length === 2 && value[0] === 'int') { + const major = Math.floor(value[1] / 10000) % 100; + const minor = Math.floor(value[1] / 100) % 100; + const patch = Math.floor(value[1]) % 100; + return [ major.toString(), minor.toString(), patch.toString() ].join('.'); + } + } + return null; + }; + const requestManifest = async () => { + const parse = async (stream) => { + try { + const manifest = {}; + if (stream) { + const reader = json.TextReader.open(stream); + const obj = reader.read(); + if (obj.Model) { + const modelFormat = obj.Model['Model-Format']; + if (modelFormat && modelFormat !== 'MXNet-Symbolic') { + throw new mxnet.Error(`Model format '${modelFormat}' not supported.`); + } + manifest.format = 'MXNet Model Server'; + if (obj['Model-Archive-Version']) { + manifest.format += ` v${obj['Model-Archive-Version']}`; + } + if (!obj.Model.Symbol) { + throw new mxnet.Error('Manifest does not contain symbol entry.'); + } + manifest.symbol = obj.Model.Symbol; + if (obj.Model.Signature) { + manifest.signature = obj.Model.Signature; + } + if (obj.Model.Parameters) { + manifest.params = obj.Model.Parameters; + } + if (obj.Model['Model-Name']) { + manifest.name = obj.Model['Model-Name']; + } + if (obj.Model.Description && manifest.name !== obj.Model.Description) { + manifest.description = obj.Model.Description; + } + } else if (obj.model) { + manifest.format = 'MXNet Model Archive'; + if (obj.specificationVersion) { + manifest.format += ` v${obj.specificationVersion}`; + } + if (obj.model.modelName) { + manifest.symbol = `${obj.model.modelName}-symbol.json`; + } + if (obj.model.modelName) { + manifest.name = obj.model.modelName; + } + if (manifest.model && obj.model.modelVersion) { + manifest.version = obj.model.modelVersion; + } + if (manifest.model && manifest.model.modelName && manifest.name != obj.model.description) { + manifest.description = obj.model.description; + } + } else { + throw new mxnet.Error('Manifest does not contain model.'); + } + if (obj.Engine && obj.Engine.MXNet) { + const version = convertVersion(obj.Engine.MXNet); + manifest.runtime = `MXNet v${version ? version : obj.Engine.MXNet}`; + } + if (obj.License) { + manifest.license = obj.License; + } + if (obj.runtime) { + manifest.runtime = obj.runtime; + } + if (obj.engine && obj.engine.engineName) { + const engine = obj.engine.engineVersion ? `${obj.engine.engineName} ${obj.engine.engineVersion}` : obj.engine.engineName; + manifest.runtime = manifest.runtime ? (`${manifest.runtime} (${engine})`) : engine; + } + if (obj.publisher && obj.publisher.author) { + manifest.author = obj.publisher.author; + if (obj.publisher.email) { + manifest.author = `${manifest.author} <${obj.publisher.email}>`; + } + } + if (obj.license) { + manifest.license = obj.license; + } + if (obj.Model && obj.Model.Signature) { + try { + const content = await context.fetch(obj.Model.Signature); + manifest.signature = content.read('json'); + return manifest; + } catch (error) { + return manifest; + } + } + } + return manifest; + } catch (err) { + throw new mxnet.Error(`Failed to read manifest. ${err.message}`); + } + }; + try { + const content = await context.fetch('MANIFEST.json'); + return parse(content.stream); + } catch (error) { + try { + const content = await context.fetch('MAR-INF/MANIFEST.json'); + return parse(content.stream); + } catch (error) { + return parse(null); + } + } + }; + const createModel = (metadata, manifest, symbol, params) => { + const parameters = new Map(); + if (params) { + try { + for (const [key, array] of mxnet.ndarray.load(params)) { + const name = (key.startsWith('arg:') || key.startsWith('aux:')) ? key.substring(4) : key; + parameters.set(name, array); + } + } catch (error) { + // continue regardless of error + } + } + if (symbol) { + if (!manifest.format) { + const version = convertVersion(symbol.attrs && symbol.attrs.mxnet_version ? symbol.attrs.mxnet_version : null); + manifest.format = `MXNet${version ? ` v${version}` : ''}`; + } + if (symbol.nodes && symbol.nodes.some((node) => node && node.op == 'tvm_op')) { + manifest.producer = 'TVM'; + } + } + return new mxnet.Model(metadata, manifest, symbol, parameters); + }; + const identifier = context.identifier; + switch (target.name) { + case 'mxnet.json': { + let symbol = null; + try { + symbol = target.value; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mxnet.Error(`Failed to load symbol entry (${message.replace(/\.$/, '')}).`); + } + const requestParams = async (manifest) => { + const file = basename(manifest.params, identifier, '.json', 'symbol', '-0000.params'); + if (file) { + try { + const content = await context.fetch(file); + const buffer = content.stream.peek(); + return createModel(metadata, manifest, symbol, buffer); + } catch (error) { + return createModel(metadata, manifest, symbol, null); + } + } + return createModel(metadata, manifest, symbol, null); + }; + const manifest = await requestManifest(); + return requestParams(manifest); + } + case 'mxnet.params': { + const stream = target.value; + const params = stream.peek(); + const requestSymbol = async (manifest) => { + const name = basename(manifest.symbol, identifier, '.params', null, '-symbol.json'); + if (name) { + try { + const content = await context.fetch(name); + const symbol = content.read('json'); + return createModel(metadata, manifest, symbol, params); + } catch (error) { + return createModel(metadata, manifest, null, params); + } + } + return createModel(metadata, manifest, null, params); + }; + const manifest = await requestManifest(); + return requestSymbol(manifest); + } + default: { + throw new mxnet.Error(`Unsupported MXNet format '${target}'.`); + } + } + } +}; + +mxnet.Model = class { + + constructor(metadata, manifest, symbol, params) { + if (!symbol && !params) { + throw new mxnet.Error('JSON symbol data not available.'); + } + if (symbol) { + if (!Object.prototype.hasOwnProperty.call(symbol, 'nodes')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'nodes\' property.'); + } + if (!Object.prototype.hasOwnProperty.call(symbol, 'arg_nodes')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'arg_nodes\' property.'); + } + if (!Object.prototype.hasOwnProperty.call(symbol, 'heads')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'heads\' property.'); + } + } + this._format = manifest.format || 'MXNet'; + this._producer = manifest.producer || ''; + this._name = manifest.name || ''; + this._version = manifest.version; + this._description = manifest.description || ''; + this._runtime = manifest.runtime || ''; + this._metadata = new Map(); + if (manifest.author) { + this._metadata.set('author', manifest.author); + } + if (manifest.license) { + this._metadata.set('license', manifest.license); + } + this._graphs = [ new mxnet.Graph(metadata, manifest, symbol, params) ]; + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get runtime() { + return this._runtime; + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +mxnet.Graph = class { + + constructor(metadata, manifest, symbol, params) { + this._metadata = metadata; + this._nodes = []; + this._inputs = []; + this._outputs = []; + const tensors = new Map(); + if (params) { + for (const [name, value] of params) { + const shape = new mxnet.TensorShape(value.shape); + const type = new mxnet.TensorType(value.dtype, shape); + const tensor = new mxnet.Tensor(name, type, value.data); + tensors.set(name, tensor); + } + } + const values = new Map(); + values.map = (name, type, tensor) => { + if (!values.has(name)) { + values.set(name, new mxnet.Value(name, type || null, tensor || null)); + } else if (type || (tensor && tensor !== values.get(name).initializer)) { + throw new mxnet.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const updateOutput = (nodes, input) => { + const [nodeIndex, outputIndex] = input; + const node = nodes[nodeIndex]; + if (node) { + while (outputIndex >= node.outputs.length) { + node.outputs.push([ nodeIndex, node.outputs.length ]); + } + } + return [ nodeIndex, outputIndex ]; + }; + if (symbol) { + let nodes = symbol.nodes; + const inputs = {}; + if (manifest && manifest.signature && manifest.signature.inputs) { + for (const input of manifest.signature.inputs) { + inputs[input.data_name] = input; + } + } + const outputs = {}; + if (manifest && manifest.signature && manifest.signature.outputs) { + for (const output of manifest.signature.outputs) { + outputs[output.data_name] = output; + } + } + for (const node of nodes) { + node.outputs = []; + } + for (const node of nodes) { + node.inputs = node.inputs || []; + node.inputs = node.inputs.map((input) => updateOutput(nodes, input)); + } + const outputCountMap = {}; + for (const node of nodes) { + for (const output of node.outputs) { + outputCountMap[output] = (outputCountMap[output] || 0) + 1; + } + } + const arg_nodes = new Map(symbol.arg_nodes.map((index) => [ index, index < nodes.length ? nodes[index] : null ])); + for (let i = 0; i < symbol.heads.length; i++) { + const head = symbol.heads[i]; + const identifier = updateOutput(nodes, head); + const name = nodes[identifier[0]] ? nodes[identifier[0]].name : (`output${(i == 0) ? '' : (i + 1)}`); + const signature = outputs[name]; + const type = signature && signature.data_shape ? new mxnet.TensorType(-1, new mxnet.TensorShape(signature.data_shape)) : null; + const value = values.map(`[${identifier.join(',')}]`, type); + const argument = new mxnet.Argument(name, [ value ]); + this._outputs.push(argument); + } + nodes = nodes.filter((node, index) => !arg_nodes.has(index)); + const initializers = new Map(); + for (const node of nodes) { + if (node.op == 'RNN') { + node.inputs = node.inputs.filter((input) => { + const [index] = input; + const arg_node = arg_nodes.get(index); + if (arg_node && arg_node.op == 'null' && arg_node.name && arg_node.name.endsWith('_parameters') && arg_node.attr && arg_node.attr.__init__) { + let attr = node.attrs || node.attr || node.param; + if (!attr) { + node.attr = {}; + attr = node.attr; + } + attr[arg_node.name] = arg_node.attr.__init__; + arg_nodes.delete(index); + return false; + } + return true; + }); + } + for (const input of node.inputs) { + const identifier = `[${input.join(',')}]`; + if (!initializers.has(identifier)) { + const [index] = input; + const arg_node = arg_nodes.get(index); + if (arg_node && arg_node.name && (!arg_node.inputs || arg_node.inputs.length == 0) && (arg_node.outputs && arg_node.outputs.length == 1)) { + if (tensors.has(arg_node.name)) { + initializers.set(identifier, tensors.get(arg_node.name)); + arg_nodes.delete(index); + } else { + const prefix = node.name.endsWith('_fwd') ? node.name.slice(0, -3) : node.name; + if (arg_node.name && (arg_node.name.startsWith(`${prefix}_`) || arg_node.name.startsWith(`${prefix}.`))) { + let dataType = -1; + let shape = []; + if (arg_node.attrs && arg_node.attrs.__dtype__ && arg_node.attrs.__shape__) { + try { + dataType = parseInt(arg_node.attrs.__dtype__); + shape = JSON.parse(`[${arg_node.attrs.__shape__.replace('(', '').replace(')', '').split(' ').join('').split(',').map(((dimension) => dimension || '"?"')).join(',')}]`); + } catch (err) { + // continue regardless of error + } + } + const type = (dataType !== -1 || shape.length > 0) ? + new mxnet.TensorType(dataType, new mxnet.TensorShape(shape)) : + new mxnet.TensorType(-1, new mxnet.TensorShape(null)); + initializers.set(identifier, new mxnet.Tensor(arg_node.name, type, null)); + arg_nodes.delete(index); + } + } + } + } + } + if (node.params) { + for (const param of node.params) { + values.map(param.id, null, tensors.get(param.id)); + } + } + } + for (const [, arg_node] of arg_nodes) { + if (arg_node && (!arg_node.inputs || arg_node.inputs.length == 0) && (arg_node.outputs && arg_node.outputs.length == 1)) { + const identifier = `[${arg_node.outputs[0].join(',')}]`; + const name = arg_node.name; + const signature = inputs[name]; + const type = signature && signature.data_shape ? new mxnet.TensorType(-1, new mxnet.TensorShape(signature.data_shape)) : null; + const value = values.map(identifier, type, tensors.get(identifier)); + const argument = new mxnet.Argument(name, [ value ]); + this._inputs.push(argument); + } + } + for (const node of nodes) { + this._nodes.push(new mxnet.Node(this._metadata, node, initializers, values)); + } + } else if (params) { + const blocks = new Map(); + let separator = Array.from(params.keys()).every((key) => key.indexOf('_') != -1) ? '_' : ''; + if (separator.length == 0) { + separator = Array.from(params.keys()).every((key) => key.indexOf('.') != -1) ? '.' : ''; + } + if (separator.length > 0) { + for (const [key] of params) { + const parts = key.split(separator); + let argumentName = parts.pop(); + if (key.endsWith('moving_mean') || key.endsWith('moving_var')) { + argumentName = [ parts.pop(), argumentName ].join(separator); + } + const nodeName = parts.join(separator); + if (!blocks.has(nodeName)) { + blocks.set(nodeName, { name: nodeName, op: 'Weights', params: [] }); + } + blocks.get(nodeName).params.push({ name: argumentName, id: key }); + values.map(key, null, tensors.get(key)); + } + } else { + throw new mxnet.Error("Unsupported key format in params."); + } + + for (const block of blocks.values()) { + this._nodes.push(new mxnet.Node(metadata, block, new Map(), values)); + } + } + } + + get name() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +mxnet.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +mxnet.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new mxnet.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + if (this._initializer) { + return this._initializer.name; + } + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +mxnet.Node = class { + + constructor(metadata, node, initializers, values) { + let type = node.op; + this._name = node.name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + const attrs = node.attrs || node.attr || node.param; + if (attrs) { + if (type == 'tvm_op' && attrs.func_name) { + type = attrs.func_name; + } + for (const [name, value] of Object.entries(attrs)) { + if (type != 'tvm_op' && name != 'func_name') { + const attribute = new mxnet.Attribute(metadata, type, name, value); + this._attributes.push(attribute); + } + } + } + this._type = metadata.type(type) || { name: type }; + if (node.inputs) { + const inputs = node.inputs; + let inputIndex = 0; + if (this._type && this._type.inputs) { + for (const inputDef of this._type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const count = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const list = []; + for (const input of inputs.slice(inputIndex, inputIndex + count)) { + const identifier = `[${input.join(',')}]`; + if (identifier !== '' || inputDef.option != 'optional') { + const value = values.map(identifier, inputDef.type, initializers.get(identifier)); + list.push(value); + } + } + const argument = new mxnet.Argument(inputDef.name, list); + this._inputs.push(argument); + inputIndex += count; + } + } + } + if (inputIndex < inputs.length) { + this._inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const name = (inputIndex + index).toString(); + const identifier = `[${input.join(',')}]`; + const value = values.map(identifier, null, initializers.get(identifier)); + return new mxnet.Argument(name, [ value ]); + })); + } + } + if (node.outputs) { + const outputs = node.outputs; + let outputIndex = 0; + if (this._type && this._type.outputs) { + for (const outputDef of this._type.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const list = []; + const count = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + for (const output of outputs.slice(outputIndex, outputIndex + count)) { + const value = values.map(`[${output.join(',')}]`); + list.push(value); + } + const argument = new mxnet.Argument(outputDef.name, list); + this._outputs.push(argument); + outputIndex += count; + } + } + } + if (outputIndex < outputs.length) { + this._outputs.push(...outputs.slice(outputIndex).map((output, index) => { + const name = (outputIndex + index).toString(); + const value = values.map(`[${output.join(',')}]`); + return new mxnet.Argument(name, [ value ]); + })); + } + } + if (node.params) { + for (const param of node.params) { + const value = values.map(param.id); + const argument = new mxnet.Argument(param.name, [ value ]); + this._inputs.push(argument); + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +mxnet.Attribute = class { + + constructor(metadata, type, name, value) { + this._name = name; + this._value = value; + + let number; + metadata = metadata.attribute(type, name); + if (metadata && metadata.type) { + switch (metadata.type) { + case 'boolean': + switch (value) { + case 0: + case '0': + case 'False': + this._value = false; + break; + case 1: + case '1': + case 'True': + this._value = true; + break; + default: + throw new mxnet.Error(`Unsupported attribute boolean value '${value}'.`); + } + break; + case 'int32': + number = Number.parseInt(this._value, 10); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 'float32': + case 'float64': + number = Number.parseFloat(this._value); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 'int32[]': + if (this._value.length > 2 && this._value.startsWith('(') && this._value.endsWith(')')) { + let array = []; + const items = this._value.substring(1, this._value.length - 1).split(',') + .map((item) => item.trim()) + .map((item) => item.endsWith('L') ? item.substring(0, item.length - 1) : item); + for (const item of items) { + number = Number.parseInt(item, 10); + if (Number.isNaN(item - number)) { + array = null; + } else if (array != null) { + array.push(number); + } + } + if (array != null) { + this._value = array; + } + } + break; + default: + throw new mxnet.Error(`Unsupported attribute type '${metadata.type}'.`); + } + } + if (metadata) { + if (metadata.visible === false) { + this._visible = false; + } else if (metadata.default !== undefined) { + let defaultValue = metadata.default; + if (this._value == defaultValue) { + this._visible = false; + } else if (Array.isArray(this._value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < this._value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (this._value.every((item, index) => item == defaultValue[index])) { + this._visible = false; + } + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +mxnet.Tensor = class { + + constructor(name, type, data) { + this._name = name; + this._type = type; + this._data = data; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get encoding() { + return '<'; + } + + get values() { + return this._data; + } +}; + +mxnet.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case 0: this._dataType = 'float32'; break; + case 1: this._dataType = 'float64'; break; + case 2: this._dataType = 'float16'; break; + case 3: this._dataType = 'uint8'; break; + case 4: this._dataType = 'int32'; break; + case 5: this._dataType = 'int8'; break; + case 6: this._dataType = 'int64'; break; + case -1: this._dataType = '?'; break; + default: throw new mxnet.Error(`Unsupported type '${dataType}'.`); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +mxnet.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +mxnet.ndarray = class { + + static load(buffer) { + // NDArray::Load(dmlc::Stream* fi, std::vector* data, std::vector* keys) + const map = new Map(); + const reader = new mxnet.BinaryReader(buffer); + if (reader.uint64() !== 0x112) { // kMXAPINDArrayListMagic + throw new mxnet.Error('Invalid signature.'); + } + if (reader.uint64() !== 0) { + throw new mxnet.Error('Invalid reserved block.'); + } + const data = new Array(reader.uint64()); + for (let i = 0; i < data.length; i++) { + data[i] = new mxnet.ndarray.NDArray(reader); + } + const decoder = new TextDecoder('ascii'); + const names = new Array(reader.uint64()); + for (let i = 0; i < names.length; i++) { + names[i] = decoder.decode(reader.read(reader.uint64())); + } + if (names.length != data.length) { + throw new mxnet.Error('Label count mismatch.'); + } + for (let i = 0; i < names.length; i++) { + map.set(names[i], data[i]); + } + return map; + } +}; + +mxnet.ndarray.NDArray = class { + + constructor(reader) { + mxnet.ndarray.NDArray._dataTypeSizeTable = [ 4, 8, 2, 1, 4, 1, 8 ]; + switch (reader.uint32()) { + case 0xf993faca: { // NDARRAY_V3_MAGIC + throw new mxnet.Array('mxnet.ndarray.NDArray v3 not supported.'); + } + case 0xf993fac9: { // NDARRAY_V2_MAGIC + const stype = reader.uint32(); + let num_aux_data = 0; + switch (stype) { + case 0: num_aux_data = 0; break; // kDefaultStorage + case 1: num_aux_data = 1; break; // kRowSparseStorage + case 2: num_aux_data = 2; break; // kCSRStorage + default: throw mxnet.Error(`Unsupported NDArray type '${stype}'.`); + } + this.sshape = null; + if (num_aux_data > 0) { + this.sshape = reader.uint64s(); + } + this.shape = reader.uint64s(); + if (this.shape.length !== 0) { + this.context = new mxnet.context.Context(reader); + this.dtype = reader.uint32(); + if (num_aux_data > 0) { + throw new mxnet.Error('Not implemented.'); + } + const dataTypeSize = (this.dtype < mxnet.ndarray.NDArray._dataTypeSizeTable.length) ? mxnet.ndarray.NDArray._dataTypeSizeTable[this.dtype] : 0; + const size = dataTypeSize * this.size; + this.data = reader.read(size); + } + break; + } + case 0xf993fac8: { // NDARRAY_V1_MAGIC + this.shape = reader.uint64s(); + if (this.shape.length !== 0) { + this.context = new mxnet.context.Context(reader); + this.dtype = reader.uint32(); + const itemsize = (this.dtype < mxnet.ndarray.NDArray._dataTypeSizeTable.length) ? mxnet.ndarray.NDArray._dataTypeSizeTable[this.dtype] : 0; + const size = itemsize * this.size; + this.data = reader.read(size); + } + break; + } + default: { + reader.skip(-4); + this.shape = reader.uint32s(); + this.context = new mxnet.context.Context(reader); + this.dtype = reader.uint32(); + const itemsize = (this.dtype < mxnet.ndarray.NDArray._dataTypeSizeTable.length) ? mxnet.ndarray.NDArray._dataTypeSizeTable[this.dtype] : 0; + const size = itemsize * this.size; + this.data = reader.read(size); + break; + } + } + } + + get size() { + return this.shape.reduce((a, b) => a * b, 1); + } +}; + +mxnet.BinaryReader = class extends base.BinaryReader { + + uint32s() { + const count = this.uint32(); + const array = new Array(count); + for (let i = 0; i < array.length; i++) { + array[i] = this.uint32(); + } + return array; + } + + uint64s() { + const count = this.uint32(); + const array = new Array(count); + for (let i = 0; i < array.length; i++) { + array[i] = this.uint64(); + } + return array; + } +}; + +mxnet.context = {}; + +mxnet.context.Context = class { + + constructor(reader) { + this._deviceType = reader.uint32(); + this._deviceId = reader.uint32(); + } +}; + +mxnet.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MXNet model.'; + } +}; + +export const ModelFactory = mxnet.ModelFactory; diff --git a/ncnn-metadata.json b/ncnn-metadata.json new file mode 100644 index 00000000000..f71f217ed07 --- /dev/null +++ b/ncnn-metadata.json @@ -0,0 +1,1029 @@ +[ + { + "name": "AbsVal", + "identifier": 0 + }, + { + "name": "ArgMax", + "identifier": 1 + }, + { + "name": "BatchNorm", + "identifier": 2, + "category": "Normalization", + "attributes": [ + { "name": "channels", "type": "int32", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0 } + ] + }, + { + "name": "Bias", + "identifier": 3, + "category": "Layer", + "attributes": [ + { "name": "bias_data_size", "default": 0, "visible": false } + ] + }, + { + "name": "BinaryOp", + "identifier": 40, + "attributes": [ + { "name": "op_type", "type": "BinaryOpType", "default": 0 }, + { "name": "with_scalar", "type": "int32", "default": 0 }, + { "name": "b", "type": "float32", "default": 0 } + ] + }, + { + "name": "BNLL", + "identifier": 4 + }, + { + "name": "Cast", + "identifier": 64, + "attributes": [ + { "name": "type_from", "type": "CastOpType", "default": 0 }, + { "name": "type_to", "type": "CastOpType", "default": 0 } + ] + }, + { + "name": "Clip", + "identifier": 54, + "attributes": [ + { "name": "min", "type": "float32" }, + { "name": "max", "type": "float32" } + ] + }, + { + "name": "Concat", + "identifier": 5, + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Convolution", + "identifier": 6, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_left", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "type": "int32", "default": 0, "visible": false }, + { "name": "" }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "dilation_h", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "" }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "dynamic_weight", "type": "int32", "default": 0 } + ] + }, + { + "name": "Convolution1D", + "identifier": 81, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_left", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "type": "int32", "default": 0, "visible": false }, + { "name": "" }, + { "name": "" }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "dynamic_weight", "type": "int32", "default": 0 } + ] + }, + { + "name": "Convolution3D", + "identifier": 84, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_left", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "type": "int32", "default": 0, "visible": false }, + { "name": "" }, + { "name": "" }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "dilation_h", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "pad_behind", "type": "int32", "default": 0 }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "kernel_d", "type": "int32", "default": 0 }, + { "name": "dilation_d", "type": "int32", "default": 1 }, + { "name": "stride_d", "type": "int32", "default": 1 }, + { "name": "pad_front", "type": "int32", "default": 0 } + ] + }, + { + "name": "ConvolutionDepthWise", + "identifier": 42, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "" }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "dynamic_weight", "type": "int32", "default": 0 } + ] + }, + { + "name": "ConvolutionDepthWise1D", + "identifier": 83, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "" }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "dynamic_weight", "type": "int32", "default": 0 } + ] + }, + { + "name": "ConvolutionDepthWise3D", + "identifier": 85, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "" }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "pad_behind", "type": "int32", "default": 0 }, + { "name": "pad_value", "type": "float32", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "kernel_d", "type": "int32", "default": 0 }, + { "name": "dilation_d", "type": "int32", "default": 1 }, + { "name": "stride_d", "type": "int32", "default": 1 }, + { "name": "pad_front", "type": "int32", "default": 0 } + ] + }, + { + "name": "Crop", + "identifier": 7, + "category": "Data", + "attributes": [ + { "name": "woffset", "default": 0 }, + { "name": "hoffset", "default": 0 }, + { "name": "coffset", "default": 0 }, + { "name": "outw", "default": 0 }, + { "name": "outh", "default": 0 }, + { "name": "outc", "default": 0 }, + { "name": "woffset2", "default": 0 }, + { "name": "hoffset2", "default": 0 }, + { "name": "coffset2", "default": 0 }, + { "name": "starts", "default": [] }, + { "name": "ends", "default": [] }, + { "name": "axes", "default": [] } + ] + }, + { + "name": "Deconvolution", + "identifier": 8, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_w", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "" }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "default": 0 }, + { "name": "pad_right", "default": 0 }, + { "name": "" }, + { "name": "pad_bottom", "default": 0 }, + { "name": "" }, + { "name": "output_pad_right", "default": 0 }, + { "name": "output_pad_bottom", "default": 0 }, + { "name": "output_w", "default": 0 }, + { "name": "output_h", "default": 0 } + ] + }, + { + "name": "DeconvolutionDepthWise", + "identifier": 51, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_w", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "default": 0 }, + { "name": "pad_right", "default": 0 }, + { "name": "" }, + { "name": "pad_bottom", "default": 0 }, + { "name": "" }, + { "name": "output_pad_right", "default": 0 }, + { "name": "output_pad_bottom", "default": 0 }, + { "name": "output_w", "default": 0 }, + { "name": "output_h", "default": 0 } + ] + }, + { + "name": "DeepCopy", + "identifier": 70 + }, + { + "name": "Dequantize", + "identifier": 58, + "attributes": [ + { "name": "scale_data_size", "default": 1, "visible": false }, + { "name": "bias_data_size", "default": 0, "visible": false } + ] + }, + { + "name": "DetectionOutput", + "identifier": 49, + "attributes": [ + { "name": "num_class", "default": 0 }, + { "name": "nms_threshold", "default": 0.05 }, + { "name": "nms_top_k", "default": 300 }, + { "name": "keep_top_k", "default": 100 }, + { "name": "confidence_threshold", "default": 0.5 }, + { "name": "varainces0", "default": 0.1 }, + { "name": "varainces1", "default": 0.1 }, + { "name": "varainces2", "default": 0.2 }, + { "name": "varainces3", "default": 0.2 } + ] + }, + { + "name": "Dropout", + "identifier": 9, + "category": "Dropout", + "attributes": [ + { "name": "scale", "type": "float32", "default": 1 } + ] + }, + { + "name": "Eltwise", + "identifier": 10, + "attributes": [ + { "name": "op_type", "type": "EltwiseType", "default": 0 }, + { "name": "coeffs", "default": [] } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + }, + { + "name": "ELU", + "identifier": 11, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.1 } + ] + }, + { + "name": "Embed", + "identifier": 12, + "category": "Transform", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "input_dim", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false } + ] + }, + { + "name": "Exp", + "identifier": 13, + "attributes": [ + { "name": "base", "type": "float32", "default": -1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "ExpandDims", + "identifier": 45, + "category": "Shape", + "attributes": [ + { "name": "expand_w", "default": 0 }, + { "name": "expand_h", "default": 0 }, + { "name": "expand_c", "default": 0 }, + { "name": "axes", "default": [] } + ] + }, + { + "name": "Flatten", + "identifier": 14, + "category": "Shape" + }, + { + "name": "GELU", + "identifier": 80, + "category": "Activation", + "attributes": [ + { "name": "fast_gelu", "type": "int32", "default": 0 } + ] + }, + { + "name": "Gemm", + "identifier": 74, + "category": "Layer", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 1 }, + { "name": "beta", "type": "float32", "default": 1 }, + { "name": "transA", "type": "int32", "default": 0 }, + { "name": "transB", "type": "int32", "default": 0 } + ] + }, + { + "name": "GroupNorm", + "identifier": 75, + "category": "Normalization", + "attributes": [ + { "name": "group", "default": 1 }, + { "name": "channels", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.001 }, + { "name": "affine", "default": 0 } + ] + }, + { + "name": "GRU", + "identifier": 78, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "direction", "default": 1 } + ] + }, + { + "name": "HardSigmoid", + "identifier": 65, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.2 }, + { "name": "beta", "type": "float32", "default": 0.5 } + ] + }, + { + "name": "HardSwish", + "identifier": 67, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.2 }, + { "name": "beta", "type": "float32", "default": 0.5 } + ] + }, + { + "name": "InnerProduct", + "identifier": 15, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] } + ] + }, + { + "name": "Input", + "identifier": 16 + }, + { + "name": "InstanceNorm", + "identifier": 53, + "category": "Normalization", + "attributes": [ + { "name": "channels", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.001 }, + { "name": "affine", "default": 1 } + ] + }, + { + "name": "Interp", + "identifier": 50, + "attributes": [ + { "name": "resize_type", "type": "InterpResizeType", "default": 0 }, + { "name": "height_scale", "type": "float32", "default": 1 }, + { "name": "width_scale", "type": "float32", "default": 1 }, + { "name": "output_height", "default": 0 }, + { "name": "output_width", "default": 0 }, + { "name": "dynamic_target_size", "default": 0 }, + { "name": "align_corner", "default": 0 } + ] + }, + { + "name": "LayerNorm", + "identifier": 76, + "category": "Normalization", + "attributes": [ + { "name": "channels", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.001 }, + { "name": "affine", "type": "int32", "default": 1 } + ] + }, + { + "name": "Log", + "identifier": 17, + "attributes": [ + { "name": "base", "type": "float32", "default": -1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "LRN", + "identifier": 18, + "category": "Normalization", + "attributes": [ + { "name": "region_type", "default": 0 }, + { "name": "local_size", "default": 5 }, + { "name": "alpha", "default": 1 }, + { "name": "beta", "default": 0.75 }, + { "name": "bias", "default": 1 } + ] + }, + { + "name": "LSTM", + "identifier": 39, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "direction", "default": 1 } + ] + }, + { + "name": "MemoryData", + "identifier": 19, + "attributes": [ + { "name": "w", "default": 0 }, + { "name": "h", "default": 0 }, + { "name": "c", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "d", "default": 0 } + ] + }, + { + "name": "Mish", + "identifier": 71, + "category": "Activation" + }, + { + "name": "MultiHeadAttention", + "identifier": 79, + "category": "Layer", + "attributes": [ + { "name": "embed_dim", "default": 0 }, + { "name": "num_head", "default": 1 }, + { "name": "weight_data_size", "default": 0, "visible": false } + ] + }, + { + "name": "MVN", + "identifier": 20, + "category": "Normalization", + "attributes": [ + { "name": "normalize_variance", "default": 0 }, + { "name": "across_channels", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.0001 } + ] + }, + { + "name": "Noop", + "identifier": 68 + }, + { + "name": "Normalize", + "identifier": 46, + "category": "Normalization", + "attributes": [ + { "name": "across_spatial", "default": 0 }, + { "name": "channel_shared", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0.0001 }, + { "name": "scale_data_size", "default": 0, "visible": false }, + { "name": "across_channel", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "eps_mode", "default": 0 } + ] + }, + { + "name": "Packing", + "identifier": 62, + "category": "Shape", + "attributes": [ + { "name": "out_elempack", "default": 1 }, + { "name": "use_padding", "default": 0 }, + { "name": "cast_type_from", "default": 0 }, + { "name": "cast_type_to", "default": 0 }, + { "name": "storage_type_from", "default": 0 }, + { "name": "storage_type_to", "default": 0 } + ] + }, + { + "name": "Padding", + "identifier": 43, + "category": "Layer", + "attributes": [ + { "name": "top", "default": 0 }, + { "name": "bottom", "default": 0 }, + { "name": "left", "default": 0 }, + { "name": "right", "default": 0 }, + { "name": "type", "type": "PaddingType", "default": 0 }, + { "name": "value", "type": "float32", "default": 0 }, + { "name": "per_channel_pad_data_size", "default": 0, "visible": false }, + { "name": "front", "default": 0 }, + { "name": "behind", "default": 0 } + ] + }, + { + "name": "Permute", + "identifier": 47, + "category": "Shape", + "attributes": [ + { "name": "order_type", "type": "PermuteOrderType", "default": 0 } + ] + }, + { + "name": "PixelShuffle", + "identifier": 69, + "category": "Shape", + "attributes": [ + { "name": "upscale_factor", "default": 1 }, + { "name": "mode", "default": 0 } + ] + }, + { + "name": "Pooling", + "identifier": 21, + "category": "Pool", + "attributes": [ + { "name": "pooling_type", "type": "PoolingType", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "global_pooling", "default": 0 }, + { "name": "pad_mode", "default": 0 }, + { "name": "avgpool_count_include_pad", "default": 0 }, + { "name": "adaptive_pooling", "default": 0 }, + { "name": "out_w", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "kernel_h", "default": 0 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "default": 0 }, + { "name": "pad_right", "default": 0 }, + { "name": "pad_bottom", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "out_h", "default": 0 } + ] + }, + { + "name": "Pooling1D", + "identifier": 82, + "category": "Pool", + "attributes": [ + { "name": "pooling_type", "type": "PoolingType", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "global_pooling", "default": 0 }, + { "name": "pad_mode", "default": 0 }, + { "name": "avgpool_count_include_pad", "default": 0 }, + { "name": "adaptive_pooling", "default": 0 }, + { "name": "out_w", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "pad_right", "default": 0 } + ] + }, + { + "name": "Pooling3D", + "identifier": 86, + "category": "Pool", + "attributes": [ + { "name": "pooling_type", "type": "PoolingType", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "global_pooling", "default": 0 }, + { "name": "pad_mode", "default": 0 }, + { "name": "avgpool_count_include_pad", "default": 0 }, + { "name": "adaptive_pooling", "default": 0 }, + { "name": "out_w", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "kernel_h", "default": 0 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_top", "default": 0 }, + { "name": "pad_right", "default": 0 }, + { "name": "pad_bottom", "default": 0 }, + { "name": "pad_behind", "default": 0 }, + { "name": "" }, + { "name": "out_h", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "kernel_d", "default": 0 }, + { "name": "stride_d", "default": 1 }, + { "name": "pad_front", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "out_d", "default": 0 } + ] + }, + { + "name": "Power", + "identifier": 22, + "attributes": [ + { "name": "power", "type": "float32", "default": 1 }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + }, + { + "name": "PReLU", + "identifier": 23, + "category": "Activation", + "attributes": [ + { "name": "num_slope", "type": "int32", "default": 0, "visible": false } + ] + }, + { + "name": "PriorBox", + "identifier": 48, + "attributes": [ + { "name": "min_sizes", "default": [] }, + { "name": "max_sizes", "default": [] }, + { "name": "aspect_ratios", "default": [] }, + { "name": "varainces0", "type": "float32", "default": 0 }, + { "name": "varainces1", "type": "float32", "default": 0 }, + { "name": "varainces2", "type": "float32", "default": 0 }, + { "name": "varainces3", "type": "float32", "default": 0 }, + { "name": "flip", "default": 1 }, + { "name": "clip", "default": 0 }, + { "name": "image_width", "default": 0 }, + { "name": "image_height", "default": 0 }, + { "name": "step_width", "default": -233 }, + { "name": "step_height", "default": -233 }, + { "name": "offset", "default": 0 } + ] + }, + { + "name": "Proposal", + "identifier": 24 + }, + { + "name": "PSROIPooling", + "identifier": 60 + }, + { + "name": "Quantize", + "identifier": 57, + "attributes": [ + { "name": "scale_data_size", "default": 1, "visible": false } + ] + }, + { + "name": "Reduction", + "identifier": 25, + "attributes": [ + { "name": "op_type", "type": "ReductionOpType", "default": 0 }, + { "name": "reduce_all", "type": "int32", "default": 1 }, + { "name": "coeff", "type": "float32", "default": 1.0 }, + { "name": "axes", "default": [] }, + { "name": "keepdims", "type": "int32", "default": 0 } + ] + }, + { + "name": "ReLU", + "identifier": 26, + "category": "Activation", + "attributes": [ + { "name": "slope", "type": "float32", "default": 0 } + ] + }, + { + "name": "ReLU6", + "category": "Activation" + }, + { + "name": "Reorg", + "identifier": 55, + "category": "Shape", + "attributes": [ + { "name": "stride", "default": 1 }, + { "name": "mode", "default": 0 } + ] + }, + { + "name": "Requantize", + "identifier": 63, + "attributes": [ + { "name": "scale_in_data_size", "default": 1, "visible": false }, + { "name": "scale_out_data_size", "default": 1, "visible": false }, + { "name": "bias_data_size", "default": 0, "visible": false }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] } + ] + }, + { + "name": "Reshape", + "identifier": 27, + "category": "Shape", + "attributes": [ + { "name": "w", "default": -233 }, + { "name": "h", "default": -233 }, + { "name": "c", "default": -233 }, + { "name": "permute", "default": 0 }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "" }, + { "name": "d", "default": -233 } + ] + }, + { + "name": "RNN", + "identifier": 38, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "direction", "default": 1 } + ] + }, + { + "name": "ROIAlign", + "identifier": 61 + }, + { + "name": "ROIPooling", + "identifier": 28 + }, + { + "name": "Scale", + "identifier": 29, + "category": "Layer", + "attributes": [ + { "name": "scale_data_size", "default": 0, "visible": false }, + { "name": "bias_term", "default": 0, "visible": false } + ] + }, + { + "name": "SELU", + "identifier": 66, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 1.67326324 }, + { "name": "lambda", "type": "float32", "default": 1.050700987 } + ] + }, + { + "name": "ShuffleChannel", + "identifier": 52, + "category": "Shape", + "attributes": [ + { "name": "group", "default": 1 }, + { "name": "reverse", "default": 0 } + ] + }, + { + "name": "Sigmoid", + "identifier": 30, + "category": "Activation" + }, + { + "name": "Slice", + "identifier": 31, + "category": "Tensor", + "attributes": [ + { "name": "slices", "default": [] }, + { "name": "axis", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output", "option": "variadic" } + ] + }, + { + "name": "Softmax", + "identifier": 32, + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "fixbug0", "type": "int32", "default": 0, "visible": false } + ] + }, + { + "name": "Softplus", + "identifier": 77, + "category": "Activation" + }, + { + "name": "Split", + "identifier": 33, + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output", "option": "variadic" } + ] + }, + { + "name": "SPP", + "identifier": 34, + "category": "Activation" + }, + { + "name": "Squeeze", + "identifier": 44, + "category": "Shape", + "attributes": [ + { "name": "squeeze_w", "default": 0 }, + { "name": "squeeze_h", "default": 0 }, + { "name": "squeeze_c", "default": 0 }, + { "name": "axes", "default": [] } + ] + }, + { + "name": "StatisticsPooling", + "identifier": 72, + "category": "Pool" + }, + { + "name": "Swish", + "identifier": 73, + "category": "Activation" + }, + { + "name": "TanH", + "identifier": 35, + "category": "Activation" + }, + { + "name": "Threshold", + "identifier": 36 + }, + { + "name": "Tile", + "identifier": 37 + }, + { + "name": "UnaryOp", + "identifier": 41, + "attributes": [ + { "name": "op_type", "type": "UnaryOpType", "default": 0 } + ] + }, + { + "name": "YoloDetectionOutput", + "identifier": 56, + "attributes": [ + { "name": "num_class", "type": "int32", "default": 20 }, + { "name": "num_box", "type": "int32", "default": 5 }, + { "name": "confidence_threshold", "type": "float32", "default": 0.01 }, + { "name": "nms_threshold", "type": "float32", "default": 0.45 }, + { "name": "biases" } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ] + }, + { + "name": "Yolov3DetectionOutput", + "identifier": 59, + "attributes": [ + { "name": "num_class", "type": "int32", "default": 20 }, + { "name": "num_box", "type": "int32", "default": 5 }, + { "name": "confidence_threshold", "type": "float32", "default": 0.01 }, + { "name": "nms_threshold", "type": "float32", "default": 0.45 }, + { "name": "biases", "type": "float32[]" }, + { "name": "mask", "type": "float32[]" }, + { "name": "anchors_scale", "type": "float32[]" } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ] + } +] diff --git a/ncnn.js b/ncnn.js new file mode 100644 index 00000000000..17aa95c82d0 --- /dev/null +++ b/ncnn.js @@ -0,0 +1,928 @@ + +import * as base from './base.js'; +import * as text from './text.js'; + +const ncnn = {}; + +// https://github.com/Tencent/ncnn/wiki/param-and-model-file-structure +// https://github.com/Tencent/ncnn/wiki/operation-param-weight-table +// https://github.com/Tencent/ncnn/wiki/operators + +ncnn.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + if (identifier.endsWith('.param.bin') || identifier.endsWith('.ncnnmodel')) { + const stream = context.stream; + if (stream.length > 4) { + const buffer = stream.peek(4); + const signature = (buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24) >>> 0; + if (signature == 0x007685DD) { + return 'ncnn.model.bin'; + } + } + } + if (identifier.endsWith('.param') || identifier.endsWith('.cfg.ncnn')) { + try { + const reader = text.Reader.open(context.stream, 2048); + const signature = reader.read(); + if (signature !== undefined) { + if (signature.trim() === '7767517') { + return 'ncnn.model'; + } + const header = signature.trim().split(' '); + if (header.length === 2 && header.every((value) => value >>> 0 === parseFloat(value))) { + return 'ncnn.model'; + } + } + } catch (err) { + // continue regardless of error + } + } + if (identifier.endsWith('.bin') || identifier.endsWith('.weights.ncnn')) { + const stream = context.stream; + if (stream.length > 4) { + const buffer = stream.peek(4); + const signature = (buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24) >>> 0; + if (signature === 0x00000000 || signature === 0x00000001 || + signature === 0x01306B47 || signature === 0x000D4B38 || signature === 0x0002C056) { + return 'ncnn.weights'; + } + } + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('ncnn-metadata.json'); + const openBinary = (param, bin) => { + const reader = new ncnn.BinaryParamReader(param); + return new ncnn.Model(metadata, reader, bin); + }; + const openText = (param, bin) => { + const reader = new ncnn.TextParamReader(param); + return new ncnn.Model(metadata, reader, bin); + }; + const identifier = context.identifier.toLowerCase(); + let bin = null; + switch (target) { + case 'ncnn.model': { + if (identifier.endsWith('.param')) { + bin = `${context.identifier.substring(0, context.identifier.length - 6)}.bin`; + } else if (identifier.endsWith('.cfg.ncnn')) { + bin = `${context.identifier.substring(0, context.identifier.length - 9)}.weights.ncnn`; + } + try { + const content = await context.fetch(bin); + const buffer = content.stream.peek(); + return openText(context.stream.peek(), buffer); + } catch (error) { + return openText(context.stream.peek(), null); + } + } + case 'ncnn.model.bin': { + bin = `${context.identifier.substring(0, context.identifier.length - 10)}.bin`; + try { + const content = await context.fetch(bin); + const buffer = content.stream.peek(); + return openBinary(context.stream.peek(), buffer); + } catch (error) { + return openBinary(context.stream.peek(), null); + } + } + case 'ncnn.weights': { + let file = null; + if (identifier.endsWith('bin')) { + file = `${context.identifier.substring(0, context.identifier.length - 4)}.param`; + } else if (identifier.endsWith('.weights.ncnn')) { + file = `${context.identifier.substring(0, context.identifier.length - 13)}.cfg.ncnn`; + } + try { + const content = await context.fetch(file); + const buffer = content.stream.peek(); + return openText(buffer, context.stream.peek()); + } catch (error) { + const content = await context.fetch(`${file}.bin`); + const buffer = content.stream.peek(); + return openBinary(buffer, context.stream.peek()); + } + } + default: { + throw new ncnn.Error(`Unsupported ncnn format '${target}'.`); + } + } + } +}; + +ncnn.Model = class { + + constructor(metadata, param, bin) { + this._format = 'ncnn'; + this._graphs = [ new ncnn.Graph(metadata, param, bin) ]; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +ncnn.Graph = class { + + constructor(metadata, param, bin) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + const blobReader = new ncnn.BlobReader(bin); + const layers = param.layers; + const values = new Map(); + values.map = (name, type, tensor) => { + if (name.length === 0 && tensor) { + return new ncnn.Value(name, type, tensor); + } + if (!values.has(name)) { + values.set(name, new ncnn.Value(name, type || null, tensor || null)); + } else if (tensor || (type && !type.equals(values.get(name).type))) { + throw new ncnn.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + for (const layer of layers) { + const attributes = layer.attributes; + for (const [key, list] of attributes) { + if (key === '30' && Array.isArray(list)) { + const value = list.map((item) => parseInt(item, 10)); + for (const output of layer.outputs || []) { + if (value.length > 0 && value[0] <= value.length - 1) { + const shape = new Array(value.shift()); + for (let i = 0; i < shape.length; i++) { + shape[i] = value.shift(); + } + const type = new ncnn.TensorType('float32', new ncnn.TensorShape(shape)); + values.map(output, type); + } + attributes.delete(key); + } + } + } + } + for (const layer of layers) { + if (layer.type === 'Input' || layer.type === 16) { + const dimensions = Array.from(layer.attributes.values()).map((value) => !isNaN(parseInt(value, 10)) ? parseInt(value, 10) : value); + const shape = new ncnn.TensorShape(dimensions); + const type = new ncnn.TensorType('float32', shape); + const input = new ncnn.Argument(layer.name, layer.outputs.map((output) => values.map(output, type))); + this._inputs.push(input); + } else { + const node = new ncnn.Node(metadata, blobReader, layer, values); + this._nodes.push(node); + } + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +ncnn.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +ncnn.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new ncnn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +ncnn.Node = class { + + constructor(metadata, blobReader, layer, values) { + this._inputs = []; + this._outputs = []; + this._chain = []; + this._name = layer.name || ''; + const type = layer.type; + this._type = metadata.type(type); + const attributeMetadata = this._type && this._type.attributes ? this._type.attributes : []; + const attributes = layer.attributes; + const inputs = layer.inputs || []; + let inputIndex = 0; + if (this._type && this._type.inputs) { + for (const inputDef of this._type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => values.map(id)); + this._inputs.push(new ncnn.Argument(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } + this._inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new ncnn.Argument(inputName, [ values.map(input) ]); + })); + + const outputs = layer.outputs || []; + let outputIndex = 0; + if (this._type && this._type.outputs) { + for (const outputDef of this._type.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => values.map(id)); + this._outputs.push(new ncnn.Argument(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } + this._outputs.push(...outputs.slice(outputIndex).map((output, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new ncnn.Argument(outputName, [ values.map(output) ]); + })); + const weight = (blobReader, name, dimensions, dataType) => { + const blob = blobReader.read(dimensions, dataType); + dataType = blob ? (blob.dataType || '?') : (dataType || '?'); + const data = blob ? blob.data : null; + const type = new ncnn.TensorType(dataType, new ncnn.TensorShape(dimensions)); + const tensor = new ncnn.Tensor(type, data); + this._inputs.push(new ncnn.Argument(name, [ values.map('', null, tensor) ])); + }; + switch (this._type.name) { + case 'BatchNorm': { + const channels = parseInt(attributes.get('0') || 0, 10); + weight(blobReader, 'slope', [ channels ], 'float32'); + weight(blobReader, 'mean', [ channels ], 'float32'); + weight(blobReader, 'variance', [ channels ], 'float32'); + weight(blobReader, 'bias', [ channels ], 'float32'); + break; + } + case 'InnerProduct': { + const activation_names = [ '', 'ReLU', 'Leaky ReLU', 'Clip', 'Sigmoid', 'Mish', 'HardSwish' ]; + const activation_type = parseInt(attributes.get('9') || 0, 10); + if (activation_type > 0 && activation_type < activation_names.length) { + const layer = { + type: activation_names[activation_type], + attributes: new Map() + }; + this._chain.push(new ncnn.Node(metadata, blobReader, layer, values)); + } + const num_output = parseInt(attributes.get('0') || 0, 10); + const weight_data_size = parseInt(attributes.get('2') || 0, 10); + weight(blobReader, 'weight', [ num_output, weight_data_size / num_output ]); + if (parseInt(attributes.get('1') || 0, 10) === 1) { + weight(blobReader, 'bias', [ num_output ], 'float32'); + } + attributes.delete('2'); + break; + } + case 'Bias': { + const bias_data_size = parseInt(attributes.get('0') || 0, 10); + weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + break; + } + case 'Embed': { + const num_output = parseInt(attributes.get('0') || 0, 10); + const weight_data_size = parseInt(attributes.get('3') || 0, 10); + weight(blobReader, 'weight', [ weight_data_size / num_output, num_output ]); + if (parseInt(attributes.get('2') || 0, 10) === 1) { + weight(blobReader, 'bias', [ num_output ], 'float32'); + } + attributes.get('3'); + break; + } + case 'Convolution': + case 'ConvolutionDepthWise': + case 'Deconvolution': + case 'DeconvolutionDepthWise': { + const activation_names = [ '', 'ReLU', 'LeakyReLU', 'Clip', 'Sigmoid', 'Mish', 'HardSwish' ]; + const activation_type = parseInt(attributes.get('9') || 0, 10); + if (activation_type > 0 && activation_type < activation_names.length) { + const layer = { + type: activation_names[activation_type], + attributes: new Map() + }; + this._chain.push(new ncnn.Node(metadata, blobReader, layer, values)); + } + const num_output = parseInt(attributes.get('0') || 0, 10); + const kernel_w = parseInt(attributes.get('1') || 0, 10); + const kernel_h = parseInt(attributes.get('11') || kernel_w, 10); + const weight_data_size = parseInt(attributes.get('6') || 0, 10); + weight(blobReader, 'weight', [ num_output, weight_data_size / (num_output * kernel_w * kernel_h), kernel_h, kernel_w ]); + if (parseInt(attributes.get('5') || 0, 10) === 1) { + weight(blobReader, 'bias', [ num_output ], 'float32'); + } + attributes.delete('6'); + break; + } + case 'Convolution1D': + case 'ConvolutionDepthWise1D': { + const activation_names = [ '', 'ReLU', 'LeakyReLU', 'Clip', 'Sigmoid', 'Mish', 'HardSwish' ]; + const activation_type = parseInt(attributes.get('9') || 0, 10); + if (activation_type > 0 && activation_type < activation_names.length) { + const layer = { + type: activation_names[activation_type], + attributes: new Map() + }; + this._chain.push(new ncnn.Node(metadata, blobReader, layer, values)); + } + const num_output = parseInt(attributes.get('0') || 0, 10); + const kernel_w = parseInt(attributes.get('1') || 0, 10); + const weight_data_size = parseInt(attributes.get('6') || 0, 10); + weight(blobReader, 'weight', [ num_output, weight_data_size / (num_output * kernel_w), kernel_w ]); + if (parseInt(attributes.get('5') || 0, 10) === 1) { + weight(blobReader, 'bias', [ num_output ], 'float32'); + } + attributes.delete('6'); + break; + } + case 'Convolution3D': + case 'ConvolutionDepthWise3D': { + const activation_names = [ '', 'ReLU', 'LeakyReLU', 'Clip', 'Sigmoid', 'Mish', 'HardSwish' ]; + const activation_type = parseInt(attributes.get('9') || 0, 10); + if (activation_type > 0 && activation_type < activation_names.length) { + const layer = { + type: activation_names[activation_type], + attributes: new Map() + }; + this._chain.push(new ncnn.Node(metadata, blobReader, layer, values)); + } + const num_output = parseInt(attributes.get('0') || 0, 10); + const kernel_w = parseInt(attributes.get('1') || 0, 10); + const kernel_h = parseInt(attributes.get('11') || kernel_w, 10); + const kernel_d = parseInt(attributes.get('21') || kernel_w, 10); + const weight_data_size = parseInt(attributes.get('6') || 0, 10); + weight(blobReader, 'weight', [ num_output, weight_data_size / (num_output * kernel_w * kernel_h * kernel_d), kernel_d, kernel_h, kernel_w ]); + if (parseInt(attributes.get('5') || 0, 10) === 1) { + weight(blobReader, 'bias', [ num_output ], 'float32'); + } + attributes.delete('6'); + break; + } + case 'Quantize': { + const scale_data_size = parseInt(attributes.get('0') || 1, 10); + weight(blobReader, 'scale', [ scale_data_size ], 'float32'); + break; + } + case 'Dequantize': { + const scale_data_size = parseInt(attributes.get('0') || 1, 10); + const bias_data_size = parseInt(attributes.get('1') || 0, 10); + weight(blobReader, 'scale', [ scale_data_size ], 'float32'); + weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + break; + } + case 'Requantize': { + const scale_in_data_size = parseInt(attributes.get('0') || 1, 10); + const scale_out_data_size = parseInt(attributes.get('1') || 1, 10); + const bias_data_size = parseInt(attributes.get('2') || 0, 10); + weight(blobReader, 'scale_in', [ scale_in_data_size ], 'float32'); + weight(blobReader, 'scale_out', [ scale_out_data_size ], 'float32'); + weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + break; + } + case 'InstanceNorm': { + const affine = parseInt(attributes.get('2') || 1, 10); + if (affine === 1) { + const channels = parseInt(attributes.get('0') || 0, 10); + weight(blobReader, 'gamma', [ channels ], 'float32'); + weight(blobReader, 'beta', [ channels ], 'float32'); + } + break; + } + case 'Scale': { + const scale_data_size = parseInt(attributes.get('0') || 0, 10); + if (scale_data_size != -233) { + weight(blobReader, 'scale', [ scale_data_size], 'float32'); + if (attributes.get('1') == '1') { + weight(blobReader, 'bias', [ scale_data_size ], 'float32'); + } + } + break; + } + case 'Normalize': { + const scale_data_size = parseInt(attributes.get('3') || 0, 10); + weight(blobReader, 'scale', [ scale_data_size ], 'float32'); + break; + } + case 'PReLU': { + const num_slope = parseInt(attributes.get('0') || 0, 10); + weight(blobReader, 'slope', [ num_slope ], 'float32'); + break; + } + case 'Padding': { + const per_channel_pad_data_size = parseInt(attributes.get('6') || 0, 10); + weight(blobReader, 'per_channel_pad_data', [ per_channel_pad_data_size ], 'float32'); + break; + } + case 'MemoryData': { + const w = parseInt(attributes.get('0') || 0, 10); + const h = parseInt(attributes.get('1') || 0, 10); + const d = parseInt(attributes.get('11') || 0, 10); + const c = parseInt(attributes.get('2') || 0, 10); + if (d != 0) { + weight(blobReader, 'data', [ c, d, h, w ], 'float32'); + } else if (c != 0) { + weight(blobReader, 'data', [ c, h, w ], 'float32'); + } else if (h != 0) { + weight(blobReader, 'data', [ h, w ], 'float32'); + } else if (w != 0) { + weight(blobReader, 'data', [ w ], 'float32'); + } else { + weight(blobReader, 'data', [ 1 ], 'float32'); + } + break; + } + case 'GroupNorm': { + const affine = parseInt(attributes.get('3') || 1, 10); + if (affine === 1) { + const channels = parseInt(attributes.get('1') || 0, 10); + weight(blobReader, 'gamma', [ channels ], 'float32'); + weight(blobReader, 'beta', [ channels ], 'float32'); + } + break; + } + case 'LayerNorm': { + const channels = parseInt(attributes.get('0') || 0, 10); + weight(blobReader, 'gamma', [ channels ], 'float32'); + weight(blobReader, 'beta', [ channels ], 'float32'); + break; + } + case 'RNN': { + const num_output = parseInt(attributes.get('0') || 0, 10); + const weight_data_size = parseInt(attributes.get('1') || 0, 10); + const direction = parseInt(attributes.get('2') || 0, 10); + const num_directions = direction == 2 ? 2 : 1; + weight(blobReader, 'weight_xc', [ num_directions, num_output, weight_data_size / num_directions / num_output ]); + weight(blobReader, 'bias_c', [ num_directions, num_output ]); + weight(blobReader, 'weight_hc', [ num_directions, num_output, num_output ]); + attributes.delete('1'); + break; + } + case 'LSTM': { + const num_output = parseInt(attributes.get('0') || 0, 10); + const weight_data_size = parseInt(attributes.get('1') || 0, 10); + const direction = parseInt(attributes.get('2') || 0, 10); + const num_directions = direction == 2 ? 2 : 1; + weight(blobReader, 'weight_xc', [ num_directions, 4, num_output, weight_data_size / num_directions / num_output / 4 ]); + weight(blobReader, 'bias_c', [ num_directions, 4, num_output ]); + weight(blobReader, 'weight_hc', [ num_directions, 4, num_output, num_output ]); + attributes.delete('1'); + break; + } + case 'GRU': { + const num_output = parseInt(attributes.get('0') || 0, 10); + const weight_data_size = parseInt(attributes.get('1') || 0, 10); + const direction = parseInt(attributes.get('2') || 0, 10); + const num_directions = direction == 2 ? 2 : 1; + weight(blobReader, 'weight_xc', [ num_directions, 3, num_output, weight_data_size / num_directions / num_output / 3 ]); + weight(blobReader, 'bias_c', [ num_directions, 4, num_output ]); + weight(blobReader, 'weight_hc', [ num_directions, 3, num_output, num_output ]); + attributes.delete('1'); + break; + } + case 'MultiHeadAttention': { + const embed_dim = parseInt(attributes.get('0') || 0, 10); + // const num_head = parseInt(attributes.get('1') || 0, 10); + // const weight_data_size = parseInt(attributes.get('2') || 0, 10); + weight(blobReader, 'weight_q', [ embed_dim, embed_dim ]); + weight(blobReader, 'bias_q', [ embed_dim ], 'float32'); + weight(blobReader, 'weight_k', [ embed_dim, embed_dim ]); + weight(blobReader, 'bias_k', [ embed_dim ], 'float32'); + weight(blobReader, 'weight_v', [ embed_dim, embed_dim ]); + weight(blobReader, 'bias_v', [ embed_dim ], 'float32'); + weight(blobReader, 'weight_out', [ embed_dim, embed_dim ]); + weight(blobReader, 'bias_out', [ embed_dim ], 'float32'); + attributes.delete('2'); + break; + } + default: { + break; + } + } + + this._attributes = Array.from(attributes).map(([key, value]) => { + const metadata = attributeMetadata[key]; + return new ncnn.Attribute(metadata, key, value); + }); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } +}; + +ncnn.Attribute = class { + + constructor(metadata, key, value) { + this._type = ''; + this._name = key; + this._value = value; + if (metadata) { + this._name = metadata.name; + if (metadata.type) { + this._type = metadata.type; + } + switch (this._type) { + case 'int32': { + this._value = parseInt(this._value, 10); + break; + } + case 'float32': { + this._value = parseFloat(this._value); + break; + } + case 'float32[]': { + this._value = this._value.map((v) => parseFloat(v)); + break; + } + default: { + if (this._type) { + this._value = ncnn.Utility.value(this._value, this._type); + } + break; + } + } + if (metadata && metadata.visible === false) { + this._visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (this._value == metadata.default || (this._value && this._value.toString() == metadata.default.toString())) { + this._visible = false; + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +ncnn.Tensor = class { + + constructor(type, data) { + this._type = type; + this._data = data; + } + + get category() { + return 'Weights'; + } + + get type() { + return this._type; + } + + get values() { + return this._data; + } +}; + +ncnn.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType || '?'; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + equals(obj) { + return obj && this._dataType === obj.dataType && this._shape && this._shape.equals(obj.shape); + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +ncnn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && + Array.isArray(this._dimensions) && this._dimensions.length === obj.dimensions.length + && obj.dimensions.every((value, index) => this._dimensions[index] === value); + } + + toString() { + return this._dimensions ? (`[${this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : ''; + } +}; + +ncnn.Utility = class { + + static value(value, type) { + ncnn.Utility._enum = ncnn.Utility._enum || new Map([ + [ 'BinaryOpType', [ 'Add', 'Sub', 'Mul', 'Div', 'Max', 'Min', 'Pow', 'RSub', 'RDiv' ] ], + [ 'CastOpType', [ 'Auto', 'Float32', 'Float16', 'Int8', 'BFloat16' ] ], + [ 'EltwiseType', [ 'Prod', 'Sum', 'Max' ] ], + [ 'PaddingType', [ 'Constant', 'Replicate', 'Reflect' ] ], + [ 'PoolingType', [ 'Max', 'Average' ] ], + [ 'InterpResizeType', [ '', 'Nearest', 'Bilinear', 'Bicubic' ] ], + [ 'PermuteOrderType', [ 'WH WHC WHDC', 'HW HWC HWDC', 'WCH WDHC', 'CWH DWHC', 'HCW HDWC', 'CHW DHWC', 'WHCD', 'HWCD', 'WCHD', 'CWHD', 'HCWD', 'CHWD', 'WDCH', 'DWCH', 'WCDH', 'CWDH', 'DCWH', 'CDWH', 'HDCW', 'DHCW', 'HCDW', 'CHDW', 'DCHW', 'CDHW' ] ], + [ 'ReductionOpType', [ 'Sum', 'ASum', 'SumSq', 'Mean', 'Max', 'Min', 'Prod', 'L1', 'L2', 'LogSum', 'LogSumExp' ] ], + [ 'UnaryOpType', [ 'Abs', 'Neg', 'Floor', 'Ceil', 'Square', 'Sqrt', 'Rsq', 'Exp', 'Log', 'Sin', 'Cos', 'Tan', 'ASin', 'ACos', 'ATan', 'Reciprocal', 'Tanh' ] ] + ]); + if (this._enum.has(type) && typeof value === 'string') { + const index = parseInt(value, 10); + const list = this._enum.get(type); + if (Number.isInteger(index) && index < list.length) { + return list[index]; + } + } + return value; + } +}; + +ncnn.TextParamReader = class { + + constructor(buffer) { + const reader = text.Reader.open(buffer); + const lines = []; + for (;;) { + const line = reader.read(); + if (line === undefined) { + break; + } + lines.push(line.trim()); + } + const signature = lines.shift(); + const header = (signature !== '7767517' ? signature : lines.shift()).split(' '); + if (header.length !== 2 || !header.every((value) => value >>> 0 === parseFloat(value))) { + throw new ncnn.Error('Invalid header.'); + } + const layers = []; + while (lines.length > 0) { + const line = lines.shift(); + if (line.length > 0) { + const columns = line.split(' ').filter((s) => s.length != 0); + const layer = {}; + layer.type = columns.shift(); + layer.name = columns.shift(); + const inputCount = parseInt(columns.shift(), 10); + const outputCount = parseInt(columns.shift(), 10); + layer.inputs = columns.splice(0, inputCount); + layer.outputs = columns.splice(0, outputCount); + layer.attributes = new Map(); + const attributes = layer.attributes; + let index = 0; + for (const column of columns) { + const parts = column.split('='); + if (parts.length > 2) { + throw new ncnn.Attribute(`Invalid attribute '${column}'.`); + } + let key = (parts.length === 2) ? parts[0].trim() : index.toString(); + let value = (parts.length === 2) ? parts[1].trim() : parts[0].trim(); + const keyInt = parseInt(key, 10); + if (keyInt < 0) { + value = value.split(',').map((v) => v.trim()); + value.shift(); + key = (-(keyInt + 23300)).toString(); + } + attributes.set(key, value); + index++; + } + layers.push(layer); + } + } + this._layers = layers; + } + + get layers() { + return this._layers; + } +}; + +ncnn.BinaryParamReader = class { + + constructor(buffer) { + const reader = new base.BinaryReader(buffer); + if (reader.int32() !== 0x007685DD) { + throw new ncnn.Error('Invalid signature.'); + } + const layerCount = reader.int32(); + /* const blobCount = */ reader.int32(); + this._layers = []; + for (let i = 0; i < layerCount; i++) { + const layer = { + type: reader.int32(), + name: i.toString(), + attributes: new Map(), + inputs: [], + outputs: [] + }; + const inputCount = reader.int32(); + const outputCount = reader.int32(); + for (let j = 0; j < inputCount; j++) { + layer.inputs.push(reader.int32().toString()); + } + for (let j = 0; j < outputCount; j++) { + layer.outputs.push(reader.int32().toString()); + } + const attributes = layer.attributes; + let id = reader.int32(); + while (id != -233) { + const isArray = id <= -23300; + if (isArray) { + id = -id - 23300; + } + const key = id.toString(); + if (isArray) { + const length = reader.int32(); + const values = []; + for (let i = 0; i < length; i++) { + values.push(reader.int32()); + } + attributes.set(key, values); + } else { + const value = reader.int32(); + attributes.set(key, value); + } + id = reader.int32(); + } + this._layers.push(layer); + } + } + + get layers() { + return this._layers; + } +}; + +ncnn.BlobReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + } + + read(shape, dataType) { + if (this._buffer) { + if (!dataType) { + if (this._buffer && this._position + 4 < this._buffer.length) { + const f0 = this._buffer[this._position++]; + const f1 = this._buffer[this._position++]; + const f2 = this._buffer[this._position++]; + const f3 = this._buffer[this._position++]; + const type = f0 | f1 << 8 | f2 << 16 | f3 << 24; + switch (type) { + case 0x00000000: + dataType = 'float32'; + break; + case 0x01306B47: + dataType = 'float16'; + break; + case 0x000D4B38: + dataType = 'int8'; + break; + case 0x00000001: + dataType = 'qint8'; + break; + case 0x0002C056: // size * sizeof(float) - raw data with extra scaling + default: + throw new ncnn.Error(`Unsupported weight type '${type}'.`); + } + } else { + this._buffer = null; + } + } + let data = null; + let size = 1; + if (shape) { + for (const dimension of shape) { + size *= dimension; + } + } else { + this._buffer = null; + } + if (this._buffer) { + if (dataType) { + const position = this._position; + switch (dataType) { + case 'float32': + size *= 4; + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'float16': + size *= 2; + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'int8': + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'qint8': + this._position += size + 1024; + data = null; + break; + default: + throw new ncnn.Error(`Unsupported weight type '${dataType}'.`); + } + } + } + return { dataType: dataType, data: data }; + } + return null; + } +}; + +ncnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading ncnn model.'; + } +}; + +export const ModelFactory = ncnn.ModelFactory; + diff --git a/nnabla-metadata.json b/nnabla-metadata.json new file mode 100644 index 00000000000..7a2ff8f5bf6 --- /dev/null +++ b/nnabla-metadata.json @@ -0,0 +1,8014 @@ +[ + { + "name": "Affine", + "description": "Affine layer, also called as the fully connected layer. It calculates:\n\n.. math::\n {\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}.\n\nwhere :math:`{\\mathbf x}` is the input and :math:`{\\mathbf y}` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array with shape (:math:`M_0 \\times ... \\times M_{B-1} \\times D_B \\times ... \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight matrix with shape (:math:`(D_B \\times ... \\times D_N) \\times L_{0} \\times \\ldots \\times L_{I}`)" + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`L_{0} \\times \\ldots \\times L_{I}`)" + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Base axis of Affine operation. Dimensions up to base_axis is treated as sample dimension." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1)`-D array. (:math:`M_0 \\times ... \\times M_{B-1} \\times L_{0} \\times \\ldots \\times L_{I}`)" + } + ], + "category": "Layer" + }, + { + "name": "RNN", + "description": "RNN function implements Elman RNN with nonlinearity to input sequence.\nRNN function is defined as following:\n\n.. math::\n {\\mathbf h_t} = {\\mathbf \\tanh}( {\\mathbf w_{ih}} *{\\mathbf x_t} + {\\mathbf b_{ih}} + {\\mathbf w_{hh}}* {\\mathbf h_{(t-1)}} + {\\mathbf b_{hh}}).\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n * `Jeffrey Elman, Finding Structure in Time. `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(T, B, I)`." + }, + { + "name": "h", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(L, D, B, H)`." + }, + { + "name": "weight_l0", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(D, H, I + H)`." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "option": "optional", + "description": "Input N-D array with shape :math:`(L-1, D, H, D * H + H)`." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Input N-D array with shape :math:`(L, D, H)`." + } + ], + "attributes": [ + { + "name": "num_layers", + "type": "int64", + "default": 1, + "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1." + }, + { + "name": "nonlinearity", + "type": "string", + "default": "tanh", + "description": "Type of nonlinearity applied to input sequcne. Must be either tanh or relu. Default is tanh." + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0, + "description": "Dropout ratio applied to parameters. Default is 0.0." + }, + { + "name": "bidirectional", + "type": "boolean", + "default": false, + "description": "If True, bidirectional computation will be performed in each layer. Default is False." + }, + { + "name": "training", + "type": "boolean", + "default": true, + "description": "Backpropagation will be performed only when it is true. Default is True." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output :math:`y` with shape :math:`(T, B, D * H)`" + }, + { + "name": "h_n", + "type": "nnabla.Variable", + "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`" + } + ], + "category": "Layer" + }, + { + "name": "LSTM", + "description": "N-Step LSTM layer.\n\n.. math::\n {\\mathbf f_t} &=& {\\mathbf \\sigma}( {\\mathbf W_f} *{\\mathbf x_t} + {\\mathbf U_f}* {\\mathbf h_{(t-1)}} + {\\mathbf b_f})\\\\\n {\\mathbf i_t} &=& {\\mathbf \\sigma}( {\\mathbf W_i} *{\\mathbf x_t} + {\\mathbf U_i}* {\\mathbf h_{(t-1)}} + {\\mathbf b_i})\\\\\n {\\mathbf o_t} &=& {\\mathbf \\sigma}( {\\mathbf W_o} *{\\mathbf x_t} + {\\mathbf U_o}* {\\mathbf h_{(t-1)}} + {\\mathbf b_o})\\\\\n {\\mathbf c_t} &=& {\\mathbf f_t}\\odot {\\mathbf c_{(t-1)}} + {\\mathbf i_t}\\odot {\\mathbf \\tanh}({\\mathbf W_c}*{\\mathbf x_t} + {\\mathbf U_c} *{\\mathbf h_{(t-1)}} + {\\mathbf b_c})\\\\\n {\\mathbf h_t} &=& {\\mathbf o_t} \\odot {\\mathbf \\tanh}({\\mathbf c_t}).\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n * `S. Hochreiter and J. Schmidhuber, Long Short-Term Memory. `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(T, B, I)`." + }, + { + "name": "h", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(L, D, B, H)`." + }, + { + "name": "c", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(L, D, B, H)`." + }, + { + "name": "weight_l0", + "type": "nnabla.Variable", + "description": "weight parameters for the first layer. Shape is :math:`(D, 4, H, I + H)`." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "option": "optional", + "description": "weight parameters for the second layer and above. Shape is :math:`(L-1, D, 4, H, D * H + H)`." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`L`). Shape is :math:`(L, D, 4, H)`." + } + ], + "attributes": [ + { + "name": "num_layers", + "type": "int64", + "default": 1, + "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1." + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0, + "description": "Dropout ratio applied to parameters. Default is 0.0." + }, + { + "name": "bidirectional", + "type": "boolean", + "default": false, + "description": "If True, bidirecitonal computation will be performed in each layer. Default is False." + }, + { + "name": "training", + "type": "boolean", + "default": true, + "description": "Backpropagation will be performed only when it is True. Default is True." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output :math:`y` with shape :math:`(T, B, D * H)`. Its memory layout can be reshaped as :math:`(T, B, D, H)`." + }, + { + "name": "h_n", + "type": "nnabla.Variable", + "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`" + }, + { + "name": "c_n", + "type": "nnabla.Variable", + "description": "Output :math:`c_n` with shape :math:`(L, D, B, H)`" + } + ], + "category": "Layer" + }, + { + "name": "GRU", + "description": "N-Step GRU layer.\n\n.. math::\n {\\mathbf r_t} &=& {\\mathbf \\sigma}( {\\mathbf W_r} *{\\mathbf x_t} + {\\mathbf U_r}* {\\mathbf h_{(t-1)}} + {\\mathbf b_r})\\\\\n {\\mathbf z_t} &=& {\\mathbf \\sigma}( {\\mathbf W_z} *{\\mathbf x_t} + {\\mathbf U_z}* {\\mathbf h_{(t-1)}} + {\\mathbf b_z})\\\\\n {\\mathbf n_t} &=& {\\mathbf \\tanh}( {\\mathbf W_n}{\\mathbf x_t}+ {\\mathbf b_{in}}+ {\\mathbf r_n}\\odot( {\\mathbf U_n}{\\mathbf h_{t-1}}+ {\\mathbf b_{hn}})) \\\\\n {\\mathbf h_t} &=& (1- {\\mathbf z_t})\\odot {\\mathbf n_t} + {\\mathbf z_t}\\odot {\\mathbf h_{t-1}}.\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n\n * `K. cho et al., Learning Phrases Representations using RNN Encoder-Decoder for Statistical Machine Translation. `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(T, B, I)`." + }, + { + "name": "h", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(L, D, B, H)`." + }, + { + "name": "weight_l0", + "type": "nnabla.Variable", + "description": "weight parameters for the first layer. Shape is :math:`(D, 3, H, I + H)`." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "option": "optional", + "description": "weight parameters for the second layer and above. Shape is :math:`(L-1, D, 3, H, D * H + H)`." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`L`). Shape is :math:`(L, D, 4, H)`." + } + ], + "attributes": [ + { + "name": "num_layers", + "type": "int64", + "default": 1, + "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1." + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0, + "description": "Dropout ratio applied to parameters. Default is 0.0." + }, + { + "name": "bidirectional", + "type": "boolean", + "default": false, + "description": "If True, bidirecitonal computation will be performed in each layer. Default is False." + }, + { + "name": "training", + "type": "boolean", + "default": true, + "description": "Backpropagation will be performed only when it is True. Default is True." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output :math:`y` with shape :math:`(T, B, D * H)`. Its memory layout can be reshaped as :math:`(T, B, D, H)`." + }, + { + "name": "h_n", + "type": "nnabla.Variable", + "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`" + } + ], + "category": "Layer" + }, + { + "name": "Convolution", + "description": "N-D Convolution with bias.\n\nSee references for dilated convolution (a.k.a. atrous convolution).\n\nReferences:\n\n * `Chen et al., DeepLab: Semantic Image Segmentation with Deep Convolutional\n Nets, Atrous Convolution, and Fully Connected CRFs.\n `_\n\n * `Yu et al., Multi-Scale Context Aggregation by Dilated Convolutions.\n `_\n\nNote:\n\n Convolution is a computationally intensive operation that\n should preferrably be run with the `cudnn` backend. NNabla\n then uses CuDNN library functions to determine and cache the\n fastest algorithm for the given set of convolution parameters,\n which results in additional memory consumption which may pose\n a problem for GPUs with insufficient memory size. In that\n case, the `NNABLA_CUDNN_WORKSPACE_LIMIT` environment variable\n can be used to restrict the choice of algorithms to those that\n fit the given workspace memory limit, expressed in bytes. In\n some cases it may also be desired to restrict the automatic\n search to algorithms that produce deterministic (reproducable)\n results. This can be requested by setting the the environment\n variable `NNABLA_CUDNN_DETERMINISTIC` to a non-zero value.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": ":math:`(2 + N)`-D array (:math:`C' \\times C \\times K_1 \\times ... \\times K_N`)." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`C'`)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "base axis :math:`B`." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions." + } + ], + "category": "Layer" + }, + { + "name": "FusedConvolution", + "description": "Fused operation of Pad, Convolution, Batch Normalization, Add2 and Activation.\n\nThis is an equivalent operation to the following,\nbut may be more computationally efficient depending on the backend implementation\n(currently we don't provide an efficient implementation on any backend).\n\n.. code-block:: python\n\n h = F.pad(x, *pad_opts)\n h = F.convolution(h, weight, bias, pad=(0, ...), *conv_opts)\n h = F.batch_normalization(h, beta, gamma, mean, variance, *bn_opts)\n y = F.relu(h + z)\n\nYou can optionally disable either of pad, batch normalization, residual addition and activation.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "`weight` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "`bias` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "`beta` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "`gamma` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "mean", + "type": "nnabla.Variable", + "option": "optional", + "description": "`mean` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "variance", + "type": "nnabla.Variable", + "option": "optional", + "description": "`variance` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "z", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of a residual input. By specifying None, the activation function will follow immediately after BN operation." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "`base_axis` in :meth:`~nnabla.functions.convolution`. Note that the batch normalization `axes` is determined by this and `channel_last` option." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "`pad_width` in :meth:`~nnabla.functions.pad`.\nIf `len(pad) == (len(x.shape) - (base_axis+1))`, considered as `pad` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "`stride` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "`dilation` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "`group` in :meth:`~nnabla.functions.convolution`." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "`channel_last` in :meth:`~nnabla.functions.convolution`.group" + }, + { + "name": "decay_rate", + "type": "float32", + "default": 0.9, + "description": "`decay_rate` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "`eps` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "batch_stat", + "type": "boolean", + "default": true, + "description": "`batch_stat` in :meth:`~nnabla.functions.batch_normalization`." + }, + { + "name": "nonlinearity", + "type": "string", + "default": "relu", + "description": "Activation type as string. The following is a list of available activation types\nand optional parameters specified as a vector of float by `nonlinearity_args`.\n\n=============== ===============================\nActivation type Arguments (`nonlinearity_args`)\n=============== ===============================\nidentity No argument\nrelu No argument\nsigmoid No argument\ntanh No argument\nleaky_relu [alpha] (see LeakyReLU doc)\nelu [alpha] (see ELU doc)\nrelu6 No argument\n=============== ===============================" + }, + { + "name": "nonlinearity_args", + "type": "float32[]", + "default": "list()", + "description": "Optional arguments of nonlinearity as a vector of float.\nSee the description of the `nonlinearity` argument." + }, + { + "name": "pad_mode", + "type": "string", + "default": "constant", + "description": "`mode` in :meth:`~nnabla.functions.pad`." + }, + { + "name": "constant_value", + "type": "float32", + "default": 0.0, + "description": "`constant_value` in :meth:`~nnabla.functions.pad`." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Layer" + }, + { + "name": "DepthwiseConvolution", + "description": "N-D Depthwise Convolution with bias.\n\nReferences:\n\n * `F. Chollet. Xception: Deep Learning with Depthwise Separable Convolutions.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": ":math:`(1 + N)`-D array (:math:`C \\times K_1 \\times ... \\times K_N`)." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`C'`)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "base axis :math:`B`." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "multiplier", + "type": "int64", + "default": 1, + "description": "Number of output feature maps per input feature map." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nThe output map size :math:`C'` is :math:`C` multiplied by :math:`m`\n\n.. math::\n\n C' = m \\times C,\n\nwhere :math:`m` is the multiplier.\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions." + } + ], + "category": "Layer" + }, + { + "name": "Deconvolution", + "description": "N-D deconvolution, also known as transposed convolution, with bias operates backward convolution (derivative of the output w.r.t. the input) plus channel-wise learned bias.\n\nThe weights are specified in the same manner as :meth:`~nnabla.functions.convolution` , as if it was an ordinary convolution function.\nThe forward operation of :meth:`~nnabla.functions.deconvolution` will then be operationally equivalent to the backward pass of :meth:`~nnabla.functions.convolution` .\nTherefore, the number of input channels (can be seen as output channels of forward convolution) is specified in the first dimension, and the number of the output channels divided by the number of groups is specified in the second dimension.\n\nFor `stride > 1`, a parameter-wise identical deconvolution on the output\nof a convolution may not produce the same output shape as the input to\nthe convolution if, due to striding, the convolution did not fully cover\nthe input spatial dimension. The `output_padding` parameter can then be\nused to appropriately increase the calculated output shape. Note that\nthis is used to find the output shape for the deconvolution operation,\nbut not to add zero-padding to the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": ":math:`(2 + N)`-D array (:math:`C \\times C' \\times K_1 \\times ... \\times K_N`)." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`C'`)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "base axis :math:`B`." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + }, + { + "name": "output_padding", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Additional size added to the output shape." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i =s_i (L_i - 1) - 2 p_i + d_i (k_i - 1) + 1,\n\nwhere :math:`s_i` is the stride, :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, and :math:`k_i` is the kernel size for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions." + } + ], + "category": "Layer" + }, + { + "name": "DepthwiseDeconvolution", + "description": "Depthwise deconvolution computes the transposed depthwise convolution with bias for one-dimensional and two-dimensional input data.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": ":math:`(1 + N)`-D array (:math:`C \\times K_1 \\times ... \\times K_N`)." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`C'`)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "base axis :math:`B`." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "divisor", + "type": "int64", + "default": 1, + "description": "Number of input feature maps per output feature map." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nThe output map size :math:`C'` is :math:`C` multiplied by :math:`m`\n\n.. math::\n\n C' = \\frac{C}{d},\n\nwhere :math:`d` is the divisor.\n\nA spatial size of the output is calculated as\n\n.. math::\n L'_i =s_i (L_i - 1) - 2 p_i + d_i (k_i - 1) + 1,\n\nwhere :math:`s_i` is the stride, :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, and :math:`k_i` is the kernel size for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions." + } + ], + "category": "Layer" + }, + { + "name": "DeformableConvolution", + "description": "2-D Deformable Convolution with bias.\nAnother convolution with fixed output channels must be passed externally to calculate the offsets and mask.\nMask should be normalized to :math:`[0,1]` interval.\n\n.. math::\n \\begin{eqnarray}\n y(p) = \\sum_{k=1}^{K} w_k \\cdot x(p + p_k + \\Delta p_k) \\cdot \\Delta m_k,\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output, :math:`w_k` is the weight, :math:`p` is the pixel location of interest, :math:`p_k` is the fixed displacement e.g., :math:`p_k \\in \\{(-1, -1), (-1, 0), \\ldots (1, 1)\\}` for the 2D 3x3 receptive field, :math:`\\Delta p_k` is the learnable displacement, and :math:`\\Delta m_k` is the learnable scale normalized in :math:`[0, 1]` by a function like the sigmoid. Note that :math:`\\Delta p_k` and :math:`\\Delta m_k` are sample-dependent, location-dependent, and feature-independent.\n\nReferences:\n\n * `Dai et al., Deformable Convolutional Networks.\n `_\n\n * `Zhu et al., Deformable ConvNets v2: More Deformable, Better Results.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": ":math:`(2 + N)`-D array (:math:`C' \\times C \\times K_1 \\times ... \\times K_N`)." + }, + { + "name": "offset", + "type": "nnabla.Variable", + "description": "Offsets for deformable convolutions. Shape is fixed to :math:`(N, deformable{\\_}group \\times 2 \\times Kh \\times Kw, H, W)`. Offsets must be calculated externally through a separate convolution layer." + }, + { + "name": "mask", + "type": "nnabla.Variable", + "option": "optional", + "description": "Normalized mask for deformable convolutions v2. Shape is fixed to :math:`(N, deformable{\\_}group \\times Kh \\times Kw, H, W)`. Masks must be calculated externally together with the offsets through a separate convolution layer." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias vector (:math:`C'`)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "base axis :math:`B`." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "deformable_group", + "type": "int64", + "default": 1, + "description": "Number of deformable groups of channels." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions." + } + ], + "category": "Layer" + }, + { + "name": "AdaptiveSeparableConvolution", + "description": "2-D Adaptive Separable Convolution for NCHW (the channel-first tensor).\nSample and pixel dependent vertical and horizontal kernels are dynamically generated ones,\nwhich are used for approximating a feature-independent 2-D kernel in this function.\nThus, the kernel used in this function is dependent on samples and pixels but independent on features.\n\nIf the padding is needed, use the pad function to the input :math:`x` before this function.\n\nAdaptive separable convolution is formulated as\n\n.. math::\n\n \\tilde{I}(c, h, w) = \\sum_{j, i} K_v(j, h, w) \\times K_h(i, h, w) \\times I(c, h + j, w + i),\n\nwhere :math:`I(c, h, w)` and :math:`\\tilde{I}(c, h, w)` are the input and output images\nat :math:`c`-th channel, :math:`h`-th height, :math:`w`-th width.\n:math:`K_V(:, h, w)` and :math:`K_h(:, h, w)` are vertical and horizontal 1-D kernels\nat :math:`h`-th height and :math:`w`-th width.\n\nReferences:\n\n * `Simon Niklaus, Long Mai, Feng Liu,\n Video Frame Interpolation via Adaptive Separable Convolution,\n `_\n\n * `Mart Kartasev, Carlo Rapisarda, Dominik Fay,\n Implementing Adaptive Separable Convolution for Video Frame Interpolation,\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": ":math:`4-D` array (:math:`B \\times C \\times H \\times W`)" + }, + { + "name": "vertical_kernel", + "type": "nnabla.Variable", + "description": ":math:`4-D` array (:math:`B \\times K_v \\times H \\times W`)" + }, + { + "name": "horizontal_kernel", + "type": "nnabla.Variable", + "description": ":math:`4-D` array (:math:`B \\times K_h \\times H \\times W`)" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": ":math:`4-D` array (:math:`B \\times C \\times H - K_v + 1 \\times W - K_h + 1`)" + } + ], + "category": "Layer" + }, + { + "name": "MaxPooling", + "description": "Max pooling. It pools the maximum values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\max_{k_1, k_2 \\in K} (x_{i_1 + k_1, i_2 + k_2})\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "attributes": [ + { + "name": "kernel", + "required": true, + "type": "shape", + "description": "Kernel sizes for each spatial axis." + }, + { + "name": "stride", + "type": "shape", + "default": "kernel", + "description": "Subsampling factors for each spatial axis." + }, + { + "name": "ignore_border", + "type": "boolean", + "default": true, + "description": "If false, kernels covering borders are also considered for the output." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * len(kernel)", + "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Maximum values variable" + } + ], + "category": "Pool" + }, + { + "name": "AveragePooling", + "description": "Average pooling. It pools the averaged values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\frac{1}{K_1 K_2} \\sum_{k1} \\sum_{k2} x_{i_1 + k_1, i_2 + k_2}\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "attributes": [ + { + "name": "kernel", + "required": true, + "type": "shape", + "description": "Kernel sizes for each spatial axis." + }, + { + "name": "stride", + "type": "shape", + "default": "kernel", + "description": "Subsampling factors for each spatial axis." + }, + { + "name": "ignore_border", + "type": "boolean", + "default": true, + "description": "If false, kernels covering borders are also considered for the output." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * len(kernel)", + "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + }, + { + "name": "including_pad", + "type": "boolean", + "default": true, + "description": "If true, border padding values are considered for the output." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Average values variable" + } + ], + "category": "Pool" + }, + { + "name": "GlobalAveragePooling", + "description": ".. WARNING::\n This function is experimental support, so please do not actively use it.\n\nGlobal average pooling. It pools an averaged value from the whole image", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Average values variable" + } + ], + "category": "Pool" + }, + { + "name": "SumPooling", + "description": "Sum pooling. It pools the summed values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\sum_{k1} \\sum_{k2} x_{i_1 + k_1, i_2 + k_2}\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "attributes": [ + { + "name": "kernel", + "required": true, + "type": "shape", + "description": "Kernel sizes for each spatial axis." + }, + { + "name": "stride", + "type": "shape", + "default": "kernel", + "description": "Subsampling factors for each spatial axis." + }, + { + "name": "ignore_border", + "type": "boolean", + "default": true, + "description": "If false, kernels covering borders are also considered for the output." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * len(kernel)", + "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Summed values variable" + } + ], + "category": "Pool" + }, + { + "name": "Unpooling", + "description": "Inverse operation of pooling. It spreads the input values:\n\n.. math::\n y_{k_1 i_1 + j_1, k_2 i_2 + j_2} = x_{i_1, i_2}\n\nwhere :math:`_{i_1, i_2}` is the input and :math:`y_{k_1 i_1 + j_1, k_2 i_2 + j_2}` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "attributes": [ + { + "name": "kernel", + "required": true, + "type": "shape", + "description": "Kernel sizes for each spatial axis." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Spread values variable" + } + ], + "category": "Layer" + }, + { + "name": "Embed", + "description": "Embed slices of a matrix/tensor with indexing array/tensor.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Indices with shape :math:`(I_0, ..., I_N)`" + }, + { + "name": "w", + "type": "nnabla.Variable", + "description": "Weights with shape :math:`(W_0, ..., W_M)`" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output with shape :math:`(I_0, ..., I_N, W_1, ..., W_M)`" + } + ], + "category": "Layer" + }, + { + "name": "RoiAlign", + "description": "Map Regions of Interest (RoI) defined by bounding `boxes` to features\n of `output_size` height and width using bilinear interpolation with\n `sampling_ratio` points in the interpolation grid.\n\n >>> import numpy as np, nnabla as nn, nnabla.functions as F\n >>> nn.set_auto_forward(True)\n >>> input = F.pad(F.constant(1, (1, 1, 2, 2)) * 2, (1, 1, 1, 1), \"constant\", 1)\n >>> print(input.d)\n [[[[1. 1. 1. 1.]\n [1. 2. 2. 1.]\n [1. 2. 2. 1.]\n [1. 1. 1. 1.]]]]\n >>> boxes = nn.Variable.from_numpy_array([[0, 0, 0, 4, 4], [0, 1, 1, 3, 3]])\n >>> output = F.roi_align(input, boxes, (2, 2))\n >>> print(output.d[0])\n [[[[1.25 1.25]\n [1.25 1.25]]]\n >>> print(output.d[1])\n [[[2. 2. ]\n [2. 2. ]]]]\n\n The `spatial_scale` argument tuple may be used to appropriately scale\n the box coordinates, for example, to scale normalized box coordinate to\n the input height and width dimensions.\n\n >>> input = F.reshape(F.arange(1, 13), (1, 1, 3, 4))\n >>> print(input.d)\n >>> boxes = nn.Variable.from_numpy_array([[0, 1/4, 1/3, 3/4, 2/30]])\n >>> output = F.roi_align(input, boxes, (1, 2), spatial_scale=(3, 4))\n >>> print(input.d)\n [[[[6. 7.]]]]\n\n References:\n\n * `He et al., Mask R-CNN. `_", + "inputs": [ + { + "name": "input", + "type": "nnabla.Variable", + "description": "N-D array with shape :math:`(N, H, W, C)` or :math:`(N, C, H, W)`." + }, + { + "name": "boxes", + "type": "nnabla.Variable", + "description": "N-D array with shape :math:`(K, 5)` containing box coordinates in (b, x1, y1, x2, y2) format where b is the batch index. Note that an invalid (out-of-range) batch index will generate an error only when running on CPU; when using a GPU context the batch index values are clipped to the range of input samples." + } + ], + "attributes": [ + { + "name": "output_size", + "required": true, + "type": "shape", + "description": "the height and width of the output feature maps." + }, + { + "name": "spatial_scale", + "type": "float32[]", + "default": "(1.0, 1.0)", + "description": "Scaling factor from box to input coordinates, as (x, y)." + }, + { + "name": "sampling_ratio", + "type": "int64", + "default": -1, + "description": "The number of sampling points used for interpolation. Computed as `ceil((y2 - y1) / output_size[0])` for height and likewise for width if `sampling_ratio <= 0`." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with shape :math:`(K, C, output\\_size[0], output\\_size[1])`\nor :math:`(K, output\\_size[0], output\\_size[1], C)`." + } + ], + "category": "Layer" + }, + { + "name": "Sigmoid", + "description": "Element-wise sigmoid function.\n\n.. math::\n\n f(x) = \\frac{1}{1 + \\exp(-x)},", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ], + "category": "Activation" + }, + { + "name": "Swish", + "description": "Element-wise swish function, by Ramachandran et al. (2017).\n\n.. math::\n\n y_i = \\frac{x_i}{1 + \\exp(-x_i)},\n\nReferences:\n * `Prajit Ramachandran, Barret Zoph, and Quoc V. Le, Swish: a Self-Gated Activation Function, arXiv:1710.05941 [cs.NE]\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ], + "category": "Activation" + }, + { + "name": "Tanh", + "description": "Element-wise hyperbolic tangent (tanh) function.\n\n.. math::\n y_i = \\tanh (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "ReLU", + "description": "Element-wise Rectified Linear Unit (ReLU) function.\n\n.. math::\n y_i = \\max (0, x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "LeakyReLU", + "description": "Element-wise Leaky Rectified Linear Unit (ReLU) function.\n\nIt is defined as:\n\n.. math::\n y_i = \\alpha * \\min(0, x_i) + \\max (0, x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "alpha", + "type": "float32", + "default": 0.1, + "description": "The slope value multiplied to negative numbers. :math:`\\alpha` in the definition." + }, + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "Softmax", + "description": "Softmax normalization. Calculates\n\n.. math::\n y_i = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}\n\nalong the dimension specified by `axis`, where :math:`x_i` is the input and :math:`y_i` is the output.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array. Typically indicates a score." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis normalization is taken." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "LogSoftmax", + "description": "Fused operation of Softmax normalization followed by log, which is defined as\n\n.. math::\n y_i = \\log \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)},\n\nwhere :math:`y_i` is the input and :math:`x_i` is the output at i-th channel.\nAn advantage of this fusion is reducing the numerical instability due to the log application.\n\nThe original definition can be rewritten as\n\n.. math::\n y_i = x_i - \\max_j(x_j) - \\log\\left(\\sum_j \\exp(x_j - \\max_k(x_k))\\right).\n\nIt is more stable as a log is always applied to a value :math:`\\ge e`, while a log can be evaluated for 0 in the non-fused operation.\n\nAlso, backward gradient computation is more stable than the original one as it doesn't perform division by x due to a gradient of log. The definition is as following.\n\n.. math::\n dx_i = dy_i - y_i * \\sum_j dy_j\n\nwhere :math:`dx_i` and :math:`dy_i` denote gradients of loss\nwrt :math:`x_i` and :math:`y_i` respectively.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array. Typically indicates a score." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis normalization is taken." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "ELU", + "description": "Element-wise Exponential Linear Unit (ELU) function.\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n x_i & (x > 0)\\\\\n \\alpha (\\exp(x_i) - 1) & (x \\leq 0)\n \\end{array} \\right..\n\nReferences:\n * `Clevart et al., Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs).\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "alpha", + "type": "float64", + "default": 1.0, + "description": "Coefficient for negative outputs. :math:`\\alpha` in definition" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "SELU", + "description": "Element-wise Scaled Exponential Linear Unit (SELU) function by Klambauer et al. (2017).\n\n.. math::\n y_i= \\lambda \\left\\{\n \\begin{array}{ll}\n x_i & (x > 0)\\\\\n \\alpha (\\exp(x_i) - 1) & (x \\leq 0)\n \\end{array} \\right..\n\nThe coefficients :math:`\\lambda` and :math:`\\alpha` default to the following values :math:`\\lambda_{01}` and :math:`\\alpha_{01}`, respectively, provided by Klambauer et al. (2017):\n\n.. math::\n \\begin{array}{lll}\n \\lambda_{01} &=& \\left( 1 - \\operatorname{erfc}\\left( \\frac{1}{\\sqrt{2}} \\right) \\sqrt{e} \\right)\n \\sqrt{2 \\pi} \\\\\n && \\left(\n 2 \\operatorname{erfc} \\left( \\sqrt{2} \\right) e^2\n + \\pi \\operatorname{erfc}\\left( \\frac{1}{\\sqrt{2}} \\right)^2 e\n \\right. \\\\\n && \\left.\n - 2(2 + \\pi) \\operatorname{erfc} \\left( \\frac{1}{\\sqrt{2}} \\right) \\sqrt{e}\n + \\pi + 2\n \\right)^{-1/2} \\\\\n &\\approx& 1.0507 \\\\\n \\alpha_{01} &=& - \\frac\n {\\sqrt {\\frac {2}{\\pi}}}\n {\\operatorname{erfc} \\left( \\frac{1}{\\sqrt{2}} \\right) \\exp \\left(\\frac {1} {2} \\right) - 1} \\\\\n &\\approx& 1.67326\n \\end{array}\n\n\nReferences:\n * `Klambauer, G., Unterthiner, T., Mayr, A., & Hochreiter, S. (2017).\n Self-Normalizing Neural Networks. In Advances in Neural Information\n Processing Systems (NIPS). `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "scale", + "type": "float64", + "default": 1.05070098735548, + "description": "The coefficient :math:`\\lambda` in the definition." + }, + { + "name": "alpha", + "type": "float64", + "default": 1.673263242354377, + "description": "The coefficient :math:`\\alpha` in the definition." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "CReLU", + "description": "Element-wise Concatenated Rectified Linear Unit (CReLU) function.\nThis function calculates the ReLU of :math:`x` and :math:`-x` , then concatenates the results together at a specified axis,\nand returns the resulting array.\n\n\nReferences:\n * `Wenling Shang, Kihyuk Sohn, Diogo Almeida, Honglak Lee.\n Understanding and Improving Convolutional Neural Networks\n via Concatenated Rectified Linear Units.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 1, + "description": "The ReLU activations of positive inputs and negative inputs are concatenated at axis." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array where axis dimension is doubled by concatenating." + } + ], + "category": "Activation" + }, + { + "name": "CELU", + "description": "Element-wise Concatenated Exponential Linear Unit (CELU) function.\nConcatenates ELU outputs of positive and negative inputs together at specified axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "alpha", + "type": "float64", + "default": 1.0, + "description": "Coefficient for negative outputs. :math:`\\alpha` in definition." + }, + { + "name": "axis", + "type": "int64", + "default": 1, + "description": "The ELU activations of positive inputs and negative inputs are concatenated at axis." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array where axis dimension is doubled by concatenating." + } + ], + "category": "Activation" + }, + { + "name": "PReLU", + "description": "Element-wise Parametrized Rectified Linear Unit function. Calculates:\n\n.. math::\n y_i = \\max(0, x_i) + w_i \\min(0, x_i)\n\nwhere negative slope :math:`w` is learned and can vary across channels (an\naxis specified with `base_axis`).", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "(N-D array) Input" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "(N-D array) Weights" + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "category": "Activation" + }, + { + "name": "GELU", + "description": "Gaussian Error Unit (GELU) function.\n\n.. math::\n GELU(x) = xP(X \\leq x) = x \\Phi (x)\n\nwhich is approximated by\n\n.. math::\n GELU(x) = 0.5x (1 + \\tanh ( \\sqrt(2/\\pi)(x + 0.044715x^3) ))\n\nReferences:\n * `Dan Hendrycks and Kevin Gimpel.\n Gaussian Error Linera Units (GELUs).\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "Mish", + "description": "Mish activation function.\n\n.. math::\n Mish(x) = x \\tanh(\\log(1+\\exp(x_i)))\n\n\nReferences:\n * `Diganta Misra.\n Mish A Self Regularized Non-Monotonic Neural Activation Function.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "ReLU6", + "description": "Element-wise ReLU6 function.\nCapping ReLU activation to 6 is often observed to learn sparse features earlier.\n\n.. math::\n ReLU6(x) = \\min(\\max(0,x,),6)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "HardSigmoid", + "description": "Segment-wise linear approximation of sigmoid.\nPreferable when speed of computation is more important than precision.\nReturns :math:`0` if :math:`x < -2.5`.\nReturns :math:`1` if :math:`x> 2.5`.\nReturns :math:`0.2x + 0.5` if :math:`-2.5 <= x <= 2.5`.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "HardTanh", + "description": "Element-wise HardTanh function.\nComputationally cheaper than Tanh function.\nReturns :math:`1` if :math:`x > 1`.\nReturns :math:`-1` if :math:`x < -1`.\nReturns :math:`x` otherwise.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "LogSigmoid", + "description": "Element-wise LogSigmoid function.\n\n.. math::\n LogSigmoid(x) = \\log(1/(1+\\exp(-x_i)))", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "SoftPlus", + "description": "Element-wise SoftPlus function.\nUnlike Sigmoid and Tanh that have upper and lower bound, SoftPlus is only lower-bounded by 0.\n\n.. math::\n SoftPlus(x) = \\frac{1}{\\beta} * \\log(1+\\exp(\\beta * x_i))", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "beta", + "type": "float64", + "default": 1.0, + "description": "the `beta` value for SoftPlus formulation" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "SoftSign", + "description": "Element-wise SoftSign.\nCan be used in place of Tanh function.\nWhile Tanh converges exponentially, SoftSign converges polynomially.\n\n.. math::\n SoftSign(x) = x/(1+|x|)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "TanhShrink", + "description": "Element-wies TanhShrink function.\n\n.. math::\n TanhShrink(x) = x - \\tanh(x)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "Sinc", + "description": "Element-wise Sinc function.\nUnlike other popular activation functions, it has rises and falls.\nreturns :math:`1` if :math:`x = 0`.\nreturns :math:`\\sin(x)/x` otherwise.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Activation" + }, + { + "name": "FusedBatchNormalization", + "description": "Batch normalization fused with add2 (adding a residual input) and activation.\n\nThis is an equivalent operation to the following,\nbut is more computationally efficient:\n\n.. code-block:: python\n\n h = F.batch_normalization(x, beta, gamma, mean, variance, *opts)\n y = F.relu(h + z)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "description": "N-D array of gamma which is learned." + }, + { + "name": "mean", + "type": "nnabla.Variable", + "description": "N-D array of running mean (modified during forward execution)." + }, + { + "name": "variance", + "type": "nnabla.Variable", + "description": "N-D array of running variance (modified during forward execution)." + }, + { + "name": "z", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of a residual input. By specifying None, the activation function will follow immediately after BN operation." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "(1,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "decay_rate", + "type": "float32", + "default": 0.9, + "description": "Decay rate of running mean and variance." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "batch_stat", + "type": "boolean", + "default": true, + "description": "Use mini-batch statistics rather than running ones." + }, + { + "name": "nonlinearity", + "type": "string", + "default": "relu", + "description": "Activation chosen from ('relu')." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "description": "Batch normalization.\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n \\sigma^2 &=& \\frac{1}{M} \\left(\\sum x_i - \\mu\\right)^2 \\\\\n \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\\\\n y_i &=& \\hat{x}_i \\gamma + \\beta.\n \\end{eqnarray}\n\n\nAt testing time, the mean and variance values used are those that were computed during training by moving average.\n\nReferences:\n\n * `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of gamma which is learned." + }, + { + "name": "mean", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of running mean (modified during forward execution)." + }, + { + "name": "variance", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of running variance (modified during forward execution)." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "(1,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "decay_rate", + "type": "float32", + "default": 0.9, + "description": "Decay rate of running mean and variance." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "batch_stat", + "type": "boolean", + "default": true, + "description": "Use mini-batch statistics rather than running ones." + }, + { + "name": "no_scale", + "type": "boolean", + "default": false, + "description": "If `True`, the scale term is omitted." + }, + { + "name": "no_bias", + "type": "boolean", + "default": false, + "description": "If `True`, the bias term is omitted." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "GroupNormalization", + "description": "Applies Group Normalization over an input tensor, which is defined as:\n\n.. math::\n \\begin{eqnarray}\n \\mu^g &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^g \\\\\n \\sigma^g &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^g - \\mu^g\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^g}{\\sigma^g} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^g` and :math:`\\sigma^g` are the mean and std of each group which contains `num_channels / num_groups` channels,\nand :math:`\\gamma` and :math:`\\beta` are adaptive gains and biases.\n\nThe input channels, specified by :attr:`channel_axis`, are separated into :attr:`num_groups` groups,\nand the mean and std are calculated over the each group.\nFor example, if the input shape is [B, C, H, W] (= channel_axis=1, batch_axis=0),\nan input variable is once reshaped to [B, num_groups, C / num_groups, H, W]\nand standardize by its mean and std whose shapes are [B, num_groups, 1, 1, 1].\nFinally, an output variable is reshaped again to the original input shape (= [B, C, H, W] in the case above).\n\nReferences:\n\n * `Yuxin Wu, Kaiming He, Group Normalization.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of gamma which is learned." + } + ], + "attributes": [ + { + "name": "num_groups", + "type": "int64", + "default": 1, + "description": "A number of groups. The channel dim of 'x' must be integer multiple of `num_groups`." + }, + { + "name": "channel_axis", + "type": "int64", + "default": 1, + "description": "Channel axis." + }, + { + "name": "batch_axis", + "type": "int64[]", + "default": "(0,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "no_scale", + "type": "boolean", + "default": false, + "description": "If `True`, the scale term is omitted." + }, + { + "name": "no_bias", + "type": "boolean", + "default": false, + "description": "If `True`, the bias term is omitted." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "InstanceNormalization", + "description": "Applies Instance Normalization over an input tensor, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu^i &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^i \\\\\n \\sigma^i &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^i - \\mu^i\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^i}{\\sigma^i} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^i` and :math:`\\sigma^i` are the mean and std of each instance which is separately calculated for each batch and channel,\nand :math:`\\gamma` and :math:`\\beta` are adaptive gains and biases.\n\nIf the input shape is [B, C, H, W] (= channel_axis=1, batch_axis=0), the shape of calculated mean and std are [B, C, 1, 1]\n\nReferences:\n\n * `Dmitry Ulyanov, Andrea Vedaldi, Victor Lempitsky, Instance Normalization: The Missing Ingredient for Fast Stylization.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of gamma which is learned." + } + ], + "attributes": [ + { + "name": "channel_axis", + "type": "int64", + "default": 1, + "description": "Channel axis." + }, + { + "name": "batch_axis", + "type": "int64[]", + "default": "(0,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "no_scale", + "type": "boolean", + "default": false, + "description": "If `True`, the scale term is omitted." + }, + { + "name": "no_bias", + "type": "boolean", + "default": false, + "description": "If `True`, the bias term is omitted." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "LayerNormalization", + "description": "Applies Layer Normalization over an input tensor, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu^l &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^l \\\\\n \\sigma^l &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^l - \\mu^l\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^l}{\\sigma^l} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^l` and :math:`\\sigma^l` are the mean and std of each layer which is separately calculated for each batch,\nand :math:`\\beta` and :math:`\\gamma` are adaptive biases and gains.\n\nIf the input shape is [B, C, H, W] (= batch_axis=0), the shape of calculated mean and std are [B, 1, 1, 1]\n\nReferences:\n\n * `Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton, Layer Normalization.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of gamma which is learned." + } + ], + "attributes": [ + { + "name": "batch_axis", + "type": "int64[]", + "default": "(0,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "no_scale", + "type": "boolean", + "default": false, + "description": "If `True`, the scale term is omitted." + }, + { + "name": "no_bias", + "type": "boolean", + "default": false, + "description": "If `True`, the bias term is omitted." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "NormNormalization", + "description": "Norm normalization.\n \n.. math::\n y = \\frac{x_i}{\\|x\\|_p}", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "p", + "type": "float32", + "default": 2.0, + "description": "Order of the norm." + }, + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced. If empty list is given, all dimensions are reduced." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-12, + "description": "Epsilon for the normalization. This `eps` is added before taking the p-th root in the norm computation." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "SyncBatchNormalization", + "description": "Synchronized Batch Normalization:\n\nFor some tasks (e.g., semantic segmentation), batch size will be too small and BatchNormalization layer might not work well.\nSyncBatchNorlization layer solves these problems by synchronizing batch stats (mean and var) between multiple processes.\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n \\sigma^2 &=& \\frac{1}{M} \\left(\\sum x_i - \\mu\\right)^2 \\\\\n \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\\\\n y_i &=& \\hat{x}_i \\gamma + \\beta.\n \\end{eqnarray}\n\nReferences:\n\n * Implementing Synchronized Multi-GPU Batch Normalization https://hangzhang.org/PyTorch-Encoding/notes/syncbn.html\n\nNote:\n Since v1.32.0, the gradients of beta and gamma are not synchronized after backward computation (they had been synchronized previously).\n Users are responsible for synchronizing the gradients of beta and gamma by performing all-reduce,\n which is naturally done by performing all-reduce for gradients of all the parameters as we do usually in data parallel distributed training.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "description": "N-D array of gamma which is learned." + }, + { + "name": "mean", + "type": "nnabla.Variable", + "description": "N-D array of running mean (modified during forward execution)." + }, + { + "name": "variance", + "type": "nnabla.Variable", + "description": "N-D array of running variance (modified during forward execution)." + } + ], + "attributes": [ + { + "name": "comm", + "required": true, + "description": "The communicator" + }, + { + "name": "group", + "type": "string", + "default": "world", + "description": "The name of the communicator group" + }, + { + "name": "axes", + "type": "int64[]", + "default": "(1,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "decay_rate", + "type": "float32", + "default": 0.9, + "description": "Decay rate of running mean and variance." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "batch_stat", + "type": "boolean", + "default": true, + "description": "Use mini-batch statistics rather than running ones." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "TensorNormalization", + "description": "General tensor normalization.\nInput variable `x` is normalized by mean and std calculated by `x` itself.\nMean and variance are calculated along `axes`.\nFor example, if the input shape is (B, C, H, W) and axes is [0, 1],\nthe shape of calculated mean and std are (B, C, 1 ,1).", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "beta", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of beta which is learned." + }, + { + "name": "gamma", + "type": "nnabla.Variable", + "option": "optional", + "description": "N-D array of gamma which is learned." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "(1,)", + "description": "Axes mean and variance are taken." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + }, + { + "name": "no_scale", + "type": "boolean", + "default": false, + "description": "If `True`, the scale term is omitted." + }, + { + "name": "no_bias", + "type": "boolean", + "default": false, + "description": "If `True`, the bias term is omitted." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "WeightNormalization", + "description": "Weight normalization.\n\n.. math::\n \\mathbf{w}_{WN} = g \\dfrac{\\mathbf{w}}{\\|\\mathbf{w}\\|}\n\nwhere :math:`\\mathbf{w}` is the input weights to be normalized.\nand :math:`g` is learnable multiplication factors each of which is applied to each data at `dim`.\n\nReferences:\n * `Tim Salimans, Diederik P. Kingma, Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks. `_", + "inputs": [ + { + "name": "w", + "type": "nnabla.Variable", + "description": "N-D array of learnable weights." + }, + { + "name": "g", + "type": "nnabla.Variable", + "description": "1-D array of learnable scales." + } + ], + "attributes": [ + { + "name": "dim", + "type": "int64", + "default": 0, + "description": "Output dimension. For the other dimensions, the norms are computed." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-12, + "description": "Epsilon for the normalization. This `eps` is added before taking the sqrt in the norm computation." + } + ], + "outputs": [ + { + "name": "w_wn", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "WeightStandardization", + "description": "Applies Weight Standardization over an input weight, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu_{W_i} &=& \\frac{1}{I} \\sum_{j=1}^{I} W_{ij} \\\\\n \\sigma_{W_i} &=& \\sqrt{\\frac{1}{I} \\sum_{i=1}^{I} \\left(W_{ij} - \\mu_{W_{i}}\\right)^2 + \\epsilon} \\\\\n \\hat{W_{ij}} &=& \\frac{W_{ij} - \\mu_{W_i}}{\\sigma_{W_i}} \\\\\n y &=& \\hat{W} \\ast x\n \\end{eqnarray}\n\nReferences:\n\n * `Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille, Weight Standardization\n `_", + "inputs": [ + { + "name": "w", + "type": "nnabla.Variable", + "description": "N-D array of learnable weights." + } + ], + "attributes": [ + { + "name": "channel_axis", + "type": "int64", + "default": 0, + "description": "An axis for output channel. Default value is 0 which assumes the weights of convolution." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-05, + "description": "Tiny value to avoid zero division by std." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Normalization" + }, + { + "name": "SpectralNorm", + "description": "Spectral Normalization.\n\n.. math::\n\n W_{sn} = \\frac{W}{\\sigma(W)}\n\nwhere :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration.\n\nReferences:\n\n Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida, \n \"Spectral Normalization for Generative Adversarial Networks\", \n International Conference on Learning Representations. 2018.", + "inputs": [ + { + "name": "w", + "type": "nnabla.Variable", + "description": "N-D array of learnable weights. This is normally network parameter." + }, + { + "name": "u", + "type": "nnabla.Variable", + "description": "1-D array of singular vector. When `test == False`, the data region of `u` will be updated during forward calculation." + } + ], + "attributes": [ + { + "name": "dim", + "type": "int64", + "default": 0, + "description": "Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing." + }, + { + "name": "itr", + "type": "int64", + "default": 1, + "description": "Number of power iterations. Default is 1." + }, + { + "name": "eps", + "type": "float32", + "default": 1e-12, + "description": "Epsilon for the normalization. This `eps` is added before taking the sqrt in the norm computation." + }, + { + "name": "test", + "type": "boolean", + "default": false, + "description": "When in `True`, `u` will not be updated. Default is `False`." + }, + { + "name": "output_u", + "type": "boolean", + "default": false, + "description": "Output original `u` or not. `u` is updated when `test == False` but you can get original `u` as output with this option. Default is `False`." + } + ], + "outputs": [ + { + "name": "w_sn", + "type": "nnabla.Variable", + "description": "Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`." + } + ], + "category": "Normalization" + }, + { + "name": "MeanSubtraction", + "description": "It subtracts the mean of the elements of the input array,\nand normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy\nin various tasks such as image classification.\n\nAt training time, this function is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n y_i &=& x_i - \\mu\n \\end{eqnarray}\n\nAt testing time, the mean values used are those that were computed during training by moving average.\n\nNote:\n The backward performs an approximated differentiation that takes into account only the latest mini-batch.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "rmean", + "type": "nnabla.Variable", + "description": "N-D array of running mean (modified during forward execution)." + }, + { + "name": "t", + "type": "nnabla.Variable", + "description": "Scalar of num of iteration of running mean (modified during forward execution)." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "update_running_mean", + "type": "boolean", + "default": true, + "description": "Update running mean during forward execution." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "category": "Normalization" + }, + { + "name": "ClipGradByValue", + "description": "In forward pass, the function behaves as the identity.\n\nIn backward pass,\n\n .. math::\n g_x = \\begin{cases}\n max & (g_y > max) \\\\\n g_y & (otherwise) \\\\\n min & (g_y < min)\n \\end{cases}.\n\nA typical case for use is to prevent the gradient explosion through a whole computational graph.\nFor example, if you want to clip gradient values for each feature map,\n\n.. code-block:: python\n\n x = nn.Variable([16, 3, 32, 32])\n min = F.broadcast(nn.Variable.from_numpy_array(np.asarray([-1.0]).reshape((1, 1, 1, 1))), (16, 3, 32, 32))\n max = F.broadcast(nn.Variable.from_numpy_array(np.asarray([1.0]).reshape((1, 1, 1, 1))), (16, 3, 32, 32))\n c = F.clip_grad_by_value(x, min=min, max=max)\n h = PF.convolution(c, 64, (3, 3), pad=(1, 1))", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + }, + { + "name": "min", + "type": "nnabla.Variable", + "description": "N-D array of minimum input value by which the gradients of the `y` are clipped. Note that the shape of `min` must be the same as `x`'s and the backward to `min` is not performed." + }, + { + "name": "max", + "type": "nnabla.Variable", + "description": "N-D array of maximum input value by which the gradients of the `y` are clipped. Note that the shape of `max` must be the same as `x`'s and the backward to `max` is not performed." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "category": "Normalization" + }, + { + "name": "ClipGradByNorm", + "description": "In the forward pass, the function behaves like the identity.\n\nIn the backward pass,\n\n.. math::\n\n g_x = N \\times \\frac{g_y}{\\|g_y\\|_2}.\n\nwhere :math:`g_x` is the gradient w.r.t the input, :math:`g_y` is the gradient w.r.t. the output,\nand :math:`N` is `clip_norm` where the norm of :math:`g_y` becomes. this is the case that `axes` is not set.\nWhen `axes` is set, the norm is computed over `axes`.\n\nA typical case for use is to prevent the gradient explosion through a whole computational graph.\nFor example, if you want to normalize gradient values over feature axis,\n\n.. code-block:: python\n\n x = nn.Variable([16, 3, 32, 32])\n c = F.clip_grad_by_norm(x, axes=(1, ))\n h = PF.convolution(c, 64, (3, 3), pad=(1, 1))", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of input." + } + ], + "attributes": [ + { + "name": "clip_norm", + "type": "float32", + "default": 1.0, + "description": "Clip to the norm of input to `clip_norm` in the backward pass." + }, + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar. This is used in the forward pass." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "category": "Normalization" + }, + { + "name": "Sum", + "description": "Reduces a matrix along a specified axis with the sum function.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "CumSum", + "description": "Cumulative sum along a given axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "Axis along which cumulative sum is to be calculated" + }, + { + "name": "exclusive", + "type": "boolean", + "default": false, + "description": "If True, perform exclusive cumsum" + }, + { + "name": "reverse", + "type": "boolean", + "default": false, + "description": "If True, perform cumsum in reverse direction" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Mean", + "description": "Reduces a matrix along a specified axis with the mean function.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Max", + "description": "Reduction along axis or axes with max operation.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + }, + { + "name": "with_index", + "type": "boolean", + "default": false, + "description": "Return values and indices." + }, + { + "name": "only_index", + "type": "boolean", + "default": false, + "description": "Return only indices." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Min", + "description": "Reduction along axis or axes with min operation.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + }, + { + "name": "with_index", + "type": "boolean", + "default": false, + "description": "Return values and indices." + }, + { + "name": "only_index", + "type": "boolean", + "default": false, + "description": "Return only indices." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Norm", + "description": "Reduction along axis or axes with norm operation.\n\n.. math::\n y = \\|x\\|_p = \\left( \\sum_i |x_i|^p \\right)^{\\frac{1}{p}}", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "p", + "type": "float32", + "default": 2.0, + "description": "Order of the norm." + }, + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Prod", + "description": "Reduction along axis or axes with product operation.\n\nNote:\n Backward computation is not accurate in a zero value input.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "range(x.ndim)", + "description": "Axes to be reduced." + }, + { + "name": "keep_dims", + "type": "boolean", + "default": false, + "description": "Flag whether the reduced axis is kept." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "CumProd", + "description": "Cumulative product along a given axis.\n\nNote:\n Backward computation is not accurate in a zero value input.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "Axis along which cumulative product is to be calculated" + }, + { + "name": "exclusive", + "type": "boolean", + "default": false, + "description": "If True, perform exclusive cumprod" + }, + { + "name": "reverse", + "type": "boolean", + "default": false, + "description": "If True, perform cumprod in reverse direction" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "ReduceSum", + "description": "Reduction along an axis with sum operation.\n\nNote:\n This is deprecated. Use ``sum`` instead.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "ReduceMean", + "description": "Reduction by mean along an axis.\n\nNote:\n This is deprecated. Use ``mean`` instead.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Add2", + "description": "Element-wise addition.\n\n.. math::\n y_i = x^{(0)}_i + x^{(1)}_i", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "AddN", + "description": "Element-wise addition.\n\n.. math::\n y_i = x^{(0)}_i + . . . + x^{(n-1)}_i", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "BcAdd2", + "description": "Note: This shouldn't be called by users.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Sub2", + "description": "Element-wise subtraction.\n\n.. math::\n y_i = x^{(0)}_i - x^{(1)}_i", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Mul2", + "description": "Element-wise multiplication.\n\n.. math::\n y_i = x^{(0)}_i x^{(1)}_i", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "MulN", + "description": "Element-wise multiplication.\n\n.. math::\n y_i = x^{(0)}_i . . . x^{(n-1)}_i", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Div2", + "description": "Element-wise division.\n\n.. math::\n y_i = \\frac{x^{(0)}_i} {x^{(1)}_i}", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Pow2", + "description": "Element-wise power function.\n\n.. math::\n y_i = {(x^{(0)}_i)} ^ {x^{(1)}_i}", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "AddScalar", + "description": "Element-wise scalar addition.\n\n.. math::\n y_i = x_i + v", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + }, + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "MulScalar", + "description": "Element-wise scalar multiplication.\n\n.. math::\n y_i = v x_i", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + }, + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "PowScalar", + "description": "Element-wise scalar power function.\n\n.. math::\n y_i = (x_i) ^ v", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + }, + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "RSubScalar", + "description": "Element-wise scalar subtraction.\n\n.. math::\n y_i = v - x_i", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "RDivScalar", + "description": "Element-wise scalar division.\n\n.. math::\n y_i = \\frac{v}{x_i}", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "RPowScalar", + "description": "Element-wise scalar power function.\n\n.. math::\n y_i = v ^ {x_i}", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Sign", + "description": "Element-wise sign function.\n\nIn the forward pass, it is defined as\n\n.. math::\n\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n -1 & (x < 0) \\\\\n \\alpha & (x = 0)\n \\end{cases}.\n\nIn the backward pass, it is defined as\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} = 1,\n\nor in other words, it behaves as the identity function for the gradient in the backward pass.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input" + } + ], + "attributes": [ + { + "name": "alpha", + "type": "float32", + "default": 1.0, + "description": "Value in case of :math:`x = 0`." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "Minimum2", + "description": "Element-wise minimum.\n\n.. math::\n y_i = \\min(x^{(0)}_i, x^{(1)}_i)", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of min value" + } + ], + "category": "Logic" + }, + { + "name": "Maximum2", + "description": "Element-wise maximum.\n\n.. math::\n y_i = \\max(x^{(0)}_i, x^{(1)}_i)", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of max value" + } + ], + "category": "Logic" + }, + { + "name": "MinimumScalar", + "description": "Element-wise scalar minimum.\n\n.. math::\n y_i = \\min(x_i, v)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "MaximumScalar", + "description": "Element-wise scalar maximum.\n\n.. math::\n y_i = \\max (x_i, v)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LogicalAnd", + "description": "Elementwise logical AND.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\neq 0 \\;\\&\\; x^{(1)}_i \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "LogicalOr", + "description": "Elementwise logical OR.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 0 & (x^{(0)}_i = 0 \\;\\&\\; x^{(1)}_i = 0) \\\\\n 1 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "LogicalXor", + "description": "Elementwise logical XOR.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i = 0 \\;\\&\\; x^{(1)}_i = 0) \\\\\n 1 & (x^{(0)}_i \\neq 0 \\;\\&\\; x^{(1)}_i \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "Equal", + "description": "Element wise 'equal'\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i = x^{(1)}_i) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "NotEqual", + "description": "Element wise 'not equal'\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 0 & (x^{(0)}_i = x^{(1)}_i) \\\\\n 1 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "GreaterEqual", + "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\geq x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i < x^{(1)}_i)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "Greater", + "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i > x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i \\leq x^{(1)}_i)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "LessEqual", + "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\leq x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i > x^{(1)}_i)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "Less", + "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i < x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i \\geq x^{(1)}_i)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "No Description" + } + ], + "category": "Logic" + }, + { + "name": "SearchSorted", + "description": "Finds indices in the innermost dimension of a sorted sequance where values must be inserted in order to maintain value", + "inputs": [ + { + "name": "sorted_sequence", + "type": "nnabla.Variable", + "description": "N-D array of sorted sequence where search is to be performed. Note that this must be a sorted array" + }, + { + "name": "values", + "type": "nnabla.Variable", + "description": "N-D array of Search values" + } + ], + "attributes": [ + { + "name": "right", + "type": "boolean", + "default": false, + "description": ":If True, given a value v, the function returns index i such that sorted_sequence[i-1] <= v < sorted_sequence[i] (index of closest upper bound of v). By default, this is false so the function returns index i such that a[i-1] < v <= a[i] (index of closest lower bound of v)" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array containing the required indices" + } + ], + "category": "Logic" + }, + { + "name": "LogicalAndScalar", + "description": "Elementwise logical AND with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i \\neq 0 \\;\\&\\; v \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "required": true, + "type": "boolean", + "description": "No Description" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LogicalOrScalar", + "description": "Elementwise logical OR with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 0 & (x_i = 0 \\;\\&\\; v = 0) \\\\\n 1 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "required": true, + "type": "boolean", + "description": "No Description" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LogicalXorScalar", + "description": "Elementwise logical XOR with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i = 0 \\;\\&\\; v = 0) \\\\\n 1 & (x_i \\neq 0 \\;\\&\\; v \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "required": true, + "type": "boolean", + "description": "No Description" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "EqualScalar", + "description": "Element wise 'equal' with a scalar\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i = v) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "NotEqualScalar", + "description": "Element wise 'not equal' with a scalar\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 0 & (x_i = v) \\\\\n 1 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "GreaterEqualScalar", + "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i \\geq v \\\\\n 0 & (x^{(0)}_i < v\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "GreaterScalar", + "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i > v \\\\\n 0 & (x^{(0)}_i \\leq v\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LessEqualScalar", + "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i \\leq v) \\\\\n 0 & (x^{(0)}_i > v)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LessScalar", + "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i < v) \\\\\n 0 & (x^{(0)}_i \\geq v)\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 1.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "LogicalNot", + "description": "Element-wise logical NOT operation\n\n.. math::\n f(x_i) = \\begin{cases}\n 1 & (x_i = 0) \\\\\n 0 & otherwise\n \\end{cases}.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "IsNaN", + "description": "Test element-wise for NaN and return a ``0/1`` array.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "IsInf", + "description": "Test element-wise for ``inf/-inf`` and return a ``0/1`` array.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "ResetNaN", + "description": "Replace NaNs with a scalar value specified by ``val``.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 0.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "ResetInf", + "description": "Replace ``-inf/inf`` with a scalar value specified by ``val``.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "attributes": [ + { + "name": "val", + "type": "float64", + "default": 0.0, + "description": "Value of the scalar" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ], + "category": "Logic" + }, + { + "name": "Where", + "description": "Return elements, either from ``x_true`` or ``x_false``, depending on ``condition``.\n\nIf rank of ``condition`` is higher than those of ``x_true`` and ``x_false``, the first dimensions of ``x_true`` and ``x_false`` must match the dimensions of ``condition``.\n\nExample:\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n a = nn.Variable.from_numpy_array(np.random.rand(2, 3))\n x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))\n y = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))\n z = F.where(F.greater_scalar(a, 0.5), x, y)\n z.forward()\n\n # Numpy equivalent\n z_numpy = np.where(a.d > 0.5, x.d, y.d)\n assert np.allclose(z_numpy, z.d)", + "inputs": [ + { + "name": "condition", + "type": "nnabla.Variable", + "description": "N-d array. For all i, when ``condition[i] == true``, yield ``x_true[i]``, otherwise ``x_false[i]``." + }, + { + "name": "x_true", + "type": "nnabla.Variable", + "description": "N-d array with higher or equal rank to ``condition``." + }, + { + "name": "x_false", + "type": "nnabla.Variable", + "description": "N-d array with higher or equal rank to ``condition``." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as condition" + } + ], + "category": "Logic" + }, + { + "name": "Constant", + "description": "Generate a constant-valued array.", + "attributes": [ + { + "name": "val", + "type": "float32", + "default": 0.0, + "description": "Constant value." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of the output array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array where all values are the specified constant." + } + ] + }, + { + "name": "Arange", + "description": "Generate a range of values within the half-open interval\n``[start, stop)`` (the interval including start but excluding\nstop) with `step` increments.", + "attributes": [ + { + "name": "start", + "required": true, + "type": "float32", + "description": "Start value." + }, + { + "name": "stop", + "required": true, + "type": "float32", + "description": "End value." + }, + { + "name": "step", + "type": "float32", + "default": 1.0, + "description": "Step value." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "1-D array with the generated values." + } + ] + }, + { + "name": "Linspace", + "description": "Generate a one-dimensional vector/tensor of size `num` whose values are evenly spaced from `start` to `end`, inclusive.", + "attributes": [ + { + "name": "start", + "required": true, + "type": "float32", + "description": "Start value." + }, + { + "name": "stop", + "required": true, + "type": "float32", + "description": "End value." + }, + { + "name": "num", + "required": true, + "type": "int64", + "description": "Size of the constructed vector/tensor." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "1-D array with the generated values." + } + ] + }, + { + "name": "Abs", + "description": "Element-wise absolute value function.\n\n.. math::\n y_i = |x_i|", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Element-wise absolute variable" + } + ] + }, + { + "name": "Exp", + "description": "Element-wise natural exponential function.\n\n.. math::\n y_i = \\exp(x_i).", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Element-wise exp variable" + } + ] + }, + { + "name": "Log", + "description": "Element-wise natural logarithm function.\n\n.. math::\n y_i = \\ln(x_i).", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Element-wise log variable" + } + ] + }, + { + "name": "Identity", + "description": "Identity function.\n\n.. math::\n y = x", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "BatchMatmul", + "description": "Batch matrix multiplication.\n\nTwo of batchs of matrices are multiplied for each sample in a batch.\nA batch of matrices is composed as [..., P, Q] where the last two dimensions compose matrix dimensions,\nand the first dimensions up to the third last dimension are considered as batch samples.\nThese batch dimensions are internally broadcasted when the size of a dimension is 1.\n\nExample:\n\n.. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n\n nn.set_auto_forward(True)\n\n # Same batch size\n a = nn.Variable.from_numpy_array(np.random.rand(2, 2, 3, 4))\n b = nn.Variable.from_numpy_array(np.random.rand(2, 2, 4, 3))\n c = F.batch_matmul(a, b)\n\n # Different batch size with the broadcast\n a = nn.Variable.from_numpy_array(np.random.rand(2, 1, 3, 4))\n b = nn.Variable.from_numpy_array(np.random.rand(1, 3, 4, 3))\n c = F.batch_matmul(a, b)\n\n.. WARNING::\n Since the version 1.13, the behavior of the batch dimensions changed, it supported the internal\n broadcast when the size of a dimension is 1. Accordingly, this function does not supports different\n batch dimensions between two inputs even if the total sample size for each input is same.", + "inputs": [ + { + "name": "a", + "type": "nnabla.Variable", + "description": "N-D array with >= 2-dim. The last two dimensions will be treated as a matrix." + }, + { + "name": "b", + "type": "nnabla.Variable", + "description": "N-D array with >= 2-dim. The last two dimensions will be treated as a matrix. The product of the size of 0-th dimension through the size of the third last dimension must be same as that of the input ``a``." + } + ], + "attributes": [ + { + "name": "transpose_a", + "type": "boolean", + "default": false, + "description": "Transpose the last two axes of ``a`` in matrix multiplication." + }, + { + "name": "transpose_b", + "type": "boolean", + "default": false, + "description": "Transpose the last two axes of ``b`` in matrix multiplication." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output of sample-wise matrix multiplication in a batch. When ``a`` is of a shape of [N, P, Q], ``b`` is of a shape of [N, Q, R], and transpose options are all False, the output will be a shape of [N, P, R]." + } + ] + }, + { + "name": "Round", + "description": "Element-wise round function.\n\nIn the forward pass, this function simply computes `round` to the nearest integer value.\n\n.. math::\n y_i = round(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Ceil", + "description": "Element-wise ceil function.\n\nIn the forward pass, this function simply returns the smallest integer which is not less than the input.\n\n.. math::\n y_i = ceil(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Floor", + "description": "Element-wise floor function.\n\nIn the forward pass, this function simply returns the largest integer which is not greater than the input.\n\n.. math::\n y_i = floor(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Sin", + "description": "Element-wise sine (sin) function.\n\n.. math::\n y_i = \\sin (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Cos", + "description": "Element-wise cosine (cos) function.\n\n.. math::\n y_i = \\cos (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Tan", + "description": "Element-wise tangent (tan) function.\n\n.. math::\n y_i = \\tan (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Sinh", + "description": "Element-wise hyperbolic sine (sinh) function.\n\n.. math::\n y_i = \\sinh (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Cosh", + "description": "Element-wise hyperbolic cosine (cosh) function.\n\n.. math::\n y_i = \\cosh (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ASin", + "description": "Element-wise arcsine (asin) function.\n\n.. math::\n y_i = \\arcsin (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ACos", + "description": "Element-wise arccosine (acos) function.\n\n.. math::\n y_i = \\arccos (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ATan", + "description": "Element-wise arctangent (atan) function.\n\n.. math::\n y_i = \\arctan (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ATan2", + "description": "Element-wise arctangent (atan) function with 2 input variables.\n\n.. math::\n y_i = \\arctan2 (x_{i1}, x_{i2})", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as input variables" + } + ] + }, + { + "name": "ASinh", + "description": "Element-wise hyperbolic arcsine (asinh) function.\n\n.. math::\n y_i = \\text{arcsinh} (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ACosh", + "description": "Element-wise hyperbolic arccosine (acosh) function.\n\n.. math::\n y_i = \\text{arccosh} (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "ATanh", + "description": "Element-wise hyperbolic arctangent (atanh) function.\n\n.. math::\n y_i = \\text{arctanh} (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Erf", + "description": "Element-wise Error function.\n\n.. math::\n y_i = \\text{erf} (x_i)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "Concatenate", + "description": "Concatenate a variable number of input arrays along the specified axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x[0].shape) - 1", + "description": "Axis" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Concatenate variable" + } + ], + "category": "Shape" + }, + { + "name": "Split", + "description": "Split arrays at the specified axis.\n\nnote:\n This function should not be called directly when constructing models.\n Instead, use :meth:`nnabla.functions.split` which\n automatically sets `n_output` from the input's shape and axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "Axis" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "list": true, + "description": "list of N-D arrays" + } + ], + "category": "Shape" + }, + { + "name": "Stack", + "description": "Joins two or more arrays on a new axis.\n\nNote:\n Unlike :meth:`nnabla.functions.concatenate` , which joins arrays on an existing axis,\n Stack joins arrays on a new axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays. The sizes of all the arrays to be stacked must be the same." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "The axis on which to concatenate arrays. Axis indices take on values 0, 1, 2, and so on from the left. For example, to stack four (3,28,28) inputs on the second axis, specify 1. In this case, the output size will be (3,4,28,28)." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ], + "category": "Shape" + }, + { + "name": "Slice", + "description": "Slice arrays along specified axis.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "start", + "type": "int64[]", + "default": "(0,) * len(x.shape)", + "description": "Start indices for each axis" + }, + { + "name": "stop", + "type": "int64[]", + "default": "tuple(x.shape)", + "description": "Stop indices for each axis" + }, + { + "name": "step", + "type": "int64[]", + "default": "(1,) * len(x.shape)", + "description": "Step indices for each axis" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Sliced N-D array" + } + ], + "category": "Shape" + }, + { + "name": "Pad", + "description": "Pad the input N-D array `x` over the number of dimensions given\nby half the length of the `pad_width` iterable, where every two\nvalues in `pad_width` determine the before and after pad size of\nan axis. The `pad_width` iterable must hold an even number of\npositive values which may cover all or fewer dimensions of the\ninput variable `x`. If `pad_width` covers fewer dimensions then\nit applies to the innermost dimensions of `x`.\n\n.. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.ones((2, 3, 4)))\n assert F.pad(x, (1, 1, 2, 2)).shape == (2, 5, 8)\n\nPadding is performed according to the requested `mode`:\n\nconstant\n Pads with a value given by the keyword argument `constant_value`.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'constant', constant_value = -1)\n y.forward()\n assert np.all(y.d == np.array([-1, -1, -1, 1, 2, 3, 4, -1, -1, -1]))\n\nreflect\n Pads with the reflection of the vector mirrored on the first\n and last values of the vector along each axis.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'reflect')\n y.forward()\n assert np.all(y.d == np.array([4, 3, 2, 1, 2, 3, 4, 3, 2, 1]))\n\nrepeat\n Pads with the edge value of the vector along each axis.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'repeat')\n y.forward()\n assert np.all(y.d == np.array([1, 1, 1, 1, 2, 3, 4, 4, 4, 4]))", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "pad_width", + "required": true, + "type": "int64[]", + "description": "Iterable of *before* and *after* pad values." + }, + { + "name": "mode", + "type": "string", + "default": "constant", + "description": "Padding mode string." + }, + { + "name": "constant_value", + "type": "float32", + "default": 0.0, + "description": "Fill value if mode is `constant`." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Padded N-D array with the same number of dimensions as the input.\n\n.. code-block:: python\n\n x = nn.Variable((3, 3, 4, 2)) # a shape like (B, C, H, W)\n # 1-D padding: last dim by 1 left and 2 on the right side\n assert F.pad(x, (1, 2)).shape == (3, 3, 4, 5)\n # 2-D padding: last dim by (1, 1) and 2nd to last by (2, 2)\n assert F.pad(x, (2, 2, 1, 1)).shape == (3, 3, 8, 4)\n # 3-D padding: dims C by (0, 1), H by (2, 1), and W by (3, 3)\n assert F.pad(x, (0, 1, 2, 1, 3, 3)).shape == (3, 4, 7, 8)" + } + ], + "category": "Shape" + }, + { + "name": "Transpose", + "description": "Transposes tensor dimensions.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "axes", + "required": true, + "type": "int64[]", + "description": "Source axis indices for each axis." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Transposed N-D array." + } + ], + "category": "Shape" + }, + { + "name": "Broadcast", + "description": "Broadcasting ND-array to the specified shape.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "shape", + "required": true, + "type": "shape", + "description": "Shape broadcasted to. The size must be the same in axis where ``x``'s shape is not 1." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Broadcasted N-D array" + } + ], + "category": "Shape" + }, + { + "name": "BroadcastTo", + "description": ".. WARNING::\n This function is experimental support, so please do not actively use it.\n\nBroadcasting ND-array to the specified buffer.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + }, + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": -1, + "description": "Target axis to start broadcasting. If this is not set, broadcast will try to fit y to x starting from the last dimension" + } + ], + "outputs": [ + { + "name": "z", + "type": "nnabla.Variable", + "description": "Broadcasted N-D array" + } + ], + "category": "Shape" + }, + { + "name": "Tile", + "description": "Forward input `x` repeated the number of times given by `reps`. If `reps`\nis a sequence, the output has dimension of ``d = max(len(reps), x.ndim)``\nand either `x` is promoted to be d-dimensional by prepending new axes or\n`reps` is promoted to x.ndim by prepending 1's.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "reps", + "required": true, + "type": "int64[]", + "description": "The number of repetitions of `x` along each axis." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Shape" + }, + { + "name": "OneHot", + "description": "This function creates one-hot vector based on input indices.\nThe range [-shape[i], -1] of input indices are regarded as [0, shape[i]-1],\nand an input index outside [-shape[i], shape[i]-1] generates a vector \nfilled with zero.\n\n Example:\n\n .. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n\n labels = nn.Variable.from_numpy_array(np.array([[9], [4], [5], [-9], [10]]))\n print(labels.shape) # (5, 1)\n\n num_class = 10\n\n y_train = F.one_hot(labels, shape=(num_class, ))\n y_train.forward()\n\n print(y_train.shape) # (5, 10)\n print(y_train.d)\n\n # [[0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]\n # [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n # [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n\n # Can also be used for ndarray.\n\n labels = nn.Variable.from_numpy_array(np.array([[1, 7], [4, 7], [8, 6], [5, 0], [2, 6]]))\n print(labels.shape) # (5, 2)\n\n num_class_1, num_class_2 = 10, 8\n\n y_train = F.one_hot(labels, shape=(num_class_1, num_class_2))\n y_train.forward()\n\n print(y_train.shape) # (5, 10, 8)\n print(y_train.d)\n\n # [[[0. 0. 0. 0. 0. 0. 0. 0.] [[0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] ... [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.]], [0. 0. 0. 0. 0. 0. 0. 0.]]]", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array representing label's indice." + } + ], + "attributes": [ + { + "name": "shape", + "required": true, + "type": "shape", + "description": "Number of classes. When nd-labels are given, dimensions must match. See the example above." + } + ], + "outputs": [ + { + "name": "output", + "type": "nnabla.Variable", + "description": "N-D array one-hot vector/tensor." + } + ], + "category": "Shape" + }, + { + "name": "Flip", + "description": "Reverses the order of elements of the specified dimension of an array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "[len(x.shape) - 1]", + "description": "The index of the dimension to reverse the order of the elements. Axis indices take on values 0, 1, 2, and so on from the left. For example, to flip a 32 (W) by 24 (H) 100 RGB image (100,3,24,32) vertically and horizontally, specify (2,3)." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Shape" + }, + { + "name": "Shift", + "description": "Shifts the array elements by the specified amount.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "shifts", + "type": "int64[]", + "default": "(0,) * len(x.shape)", + "description": "The amount to shift elements. For example, to shift image data to the right by 2 pixels and up 3 pixels, specify (-3,2)." + }, + { + "name": "border_mode", + "type": "string", + "default": "nearest", + "description": "Specify how to process the ends of arrays whose values will be undetermined as a result of shifting. nearest: The data at the ends of the original array is copied and used. reflect: Original data reflected at the ends of the original array is used." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "category": "Shape" + }, + { + "name": "Sort", + "description": "Sorts the elements of `x` along a given `axis` in ascending\norder by value. A negative `axis` counts from the last dimension\nof `x`, so the default of -1 sorts along the last dimension. If\n`reverse` is True, then the elements are sorted in descending\norder.\n\nIf `with_index` is True, result is a tuple ``(sorted, indices)``\nor only ``indices`` if `only_index` is True. Setting\n`only_index` to True implies that `with_index` is also True.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": -1, + "description": "Axis along which to sort." + }, + { + "name": "reverse", + "type": "boolean", + "default": false, + "description": "Sort in descending order." + }, + { + "name": "with_index", + "type": "boolean", + "default": false, + "description": "Return sorted values and index." + }, + { + "name": "only_index", + "type": "boolean", + "default": false, + "description": "Return only the sort index." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "list": true, + "description": "list of N-D arrays" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "description": "Reshapes the input variable in-place. It does not create a copy of the variable.\nThe output variable (y) has a new shape but points to the same data as the input variable (x).\nThis means that if the data in the output variable (y) is modified, the data in the input\nvariable (x) also gets modified since the reshape was done in-place.\n\nNote:\n This function has the same behavior as the :meth:`nnabla.Variable.reshape` method.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "shape", + "required": true, + "type": "shape", + "description": "Dimensions for each axis. ``-1`` can be specified only in one shape dimension. The value is calculated from the size of the array and remaining dimensions." + }, + { + "name": "inplace", + "type": "boolean", + "default": true, + "description": "The output array is shared with the input array if True." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Reshaped N-D array" + } + ], + "category": "Shape" + }, + { + "name": "Shape", + "description": "Get the shape of a tensor. Optional attributes start and end can be used to compute\na slice of the input tensor's shape. If start axis is omitted, the slice starts from\naxis 0.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "start", + "type": "int64", + "default": 0, + "description": "If start axis is omitted, the slice starts from axis 0." + }, + { + "name": "end", + "type": "int64", + "default": 0, + "description": "The end axis, if specified, is exclusive (and the returned value will not include." + } + ], + "outputs": [ + { + "name": "shape", + "type": "nnabla.Variable", + "description": "1-D array" + } + ], + "category": "Shape" + }, + { + "name": "MatrixDiag", + "description": "Returns an array where the last two dimensions consist of the diagonal matrix.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N \\times M_N`)." + } + ], + "category": "Shape" + }, + { + "name": "MatrixDiagPart", + "description": "Returns an array in which the values of the last dimension consist of the diagonal\nelements of the last two dimensions of an input array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N \\times M_N`)." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)." + } + ], + "category": "Shape" + }, + { + "name": "Trilu", + "description": "Returns an array in which the values of the last dimension consist of the triangular\nmatrix of the last two dimensions of an input array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)." + } + ], + "attributes": [ + { + "name": "k", + "type": "int64", + "default": 0, + "description": "The number diagonals above or below the main diagonal to exclude or include." + }, + { + "name": "upper", + "type": "boolean", + "default": true, + "description": "Determine whether upper or lower part of matrix is retained." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)." + } + ], + "category": "Shape" + }, + { + "name": "Meshgrid", + "description": "Return coordinate matrices from coordinate vectors. Given N 1-D arrays, this function returns N-D coordinate arrays for vectorized evaluations on an N-D grid.\nExample: \n >>> x,y = F.meshgrid(F.arange(0,3), F.arange(0,2))\n >>> x.d\n array([[0., 1., 2.],\n [0., 1., 2.]], dtype=float32)\n >>> y.d \n array([[0., 0., 0.],\n [1., 1., 1.]], dtype=float32)\n\n >>> i,j = F.meshgrid(F.arange(0,3), F.arange(0,2), ij_indexing=True)\n >>> i.d \n array([[0., 0.],\n [1., 1.],\n [2., 2.]], dtype=float32)\n >>> j.d \n array([[0., 1.],\n [0., 1.],\n [0., 1.]], dtype=float32)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays." + } + ], + "attributes": [ + { + "name": "ij_indexing", + "type": "boolean", + "default": false, + "description": "If set true (Matrix ('ij') indexing ), the broadcasting dimensions are swapped. Default is False (Cartesian ('xy') indexing )." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "list": true, + "description": "N-D arrays" + } + ], + "category": "Shape" + }, + { + "name": "BatchDet", + "description": "Batch-wise determinant function.\n\n.. math::\n Y_b = \\det(X_b), \n\nwhere :math:`X_b` and :math:`Y_b` are the :math:`b`-th input and output, respectively.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "batched N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "batched N-D array of determinant" + } + ], + "category": "Shape" + }, + { + "name": "BatchInv", + "description": "Returns an array of inverted matrix", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "batched N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "batched N-D array of inverted matrix" + } + ], + "category": "Shape" + }, + { + "name": "BatchLogdet", + "description": "Batch-wise log absolute determinant function.\n\n.. math::\n Y_b = \\log(|\\det(X_b)|), \n\nwhere :math:`X_b` and :math:`Y_b` are the :math:`b`-th input and output, respectively.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "batched N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "batched N-D array of log absolute determinant" + } + ], + "category": "Shape" + }, + { + "name": "BatchCholesky", + "description": "Batch-wise cholesky decomposition of symmetric positive definite matrix.\nThe gradient of this function will be a symmetric matrix.\nThis function does not check whether given matrix is symmetric positive define matrix or not.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "batched N-D array" + } + ], + "attributes": [ + { + "name": "upper", + "type": "boolean", + "default": false, + "description": "If true, will return an upper triangular matrix. Otherwise will return a lower triangular matrix." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "batched N-D array of lower/upper triangular matrix." + } + ], + "category": "Shape" + }, + { + "name": "Assign", + "description": "Assign source array to destination array just like `tf.assign`.\nThis is useful to synchronize or manually update parameters.\n\n.. code-block:: python\n\n dst = nn.Variable((2, 3, 4))\n src = nn.Variable((2, 3, 4))\n assign = F.assign(dst, src)\n\n assign.forward()\n assert np.allclose(dst.d, src.d) # dst and src have identical values.\n assert np.allclose(assign.d dst.d) # returned Variable is also identical to dst.\n\nUnlike TensorFlow, the returned Variable has a backward path to `dst`:\n\n.. math::\n\n g_{dst} = g_{y}", + "inputs": [ + { + "name": "dst", + "type": "nnabla.Variable", + "description": "A destination N-D array" + }, + { + "name": "src", + "type": "nnabla.Variable", + "description": "A source N-D array" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "An assigned array" + } + ], + "category": "Shape" + }, + { + "name": "Gather", + "description": "Gather from the input data according to the index.\n\nGiven the input data :math:`X` of :math:`(D_{0}, \\ldots, D_{N-1})` shape and\nthe indices :math:`IDX` of :math:`(I_{0}, \\ldots, I_{M-1})` shape, in case of `batch_dims = 0`,\nthe gather outputs\n\n.. math::\n && Y[d_{0}, \\ldots, d_{axis - 1}, i_{0}, \\ldots, i_{M-1}, d_{axis + 1}, \\ldots, d_{N-1}] = \\\\\n && X[d_{0}, \\ldots, d_{axis - 1}, IDX[i_{0}, \\ldots, i_{M-1}], d_{axis + 1}, \\ldots, d_{N-1}].\n\nGenerally, the gather outputs\n\n.. math::\n && Y[d_{0}, \\ldots, d_{axis - 1}, i_{B}, \\ldots, i_{M-1}, d_{axis + 1}, \\ldots, d_{N-1}] = \\\\\n && X[d_{0}, \\ldots, d_{axis - 1}, IDX[i_{0}, \\ldots, i_{B - 1}, i_{B} \\ldots, i_{M-1}], d_{axis + 1}, \\ldots d_{N-1}].\n\nwhere :math:`B` = `batch_dims`.\n\n`x.shape[:batch_dims]` must be equal to `indices.shape[:batch_dims]`.\n\nOutput shape is `x.shape[:axis] + indices.shape[batch_dims:] + x.shape[axis + 1]`.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Data from which to gather." + }, + { + "name": "Indices", + "type": "nnabla.Variable", + "description": "Index with which to gather." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "Axis in `x` to gather from. `axis` must be greater than or equal to `batch_dims`." + }, + { + "name": "batch_dims", + "type": "int64", + "default": 0, + "description": "The number of batch dimensions." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Gathered output." + } + ], + "category": "Shape" + }, + { + "name": "GatherNd", + "description": "Gather elements or slices from `data` according to `indices`, which must\nbe at least two-dimensional with the first dimension :math:`M` being less\nor equal to the :math:`N` dimensions of `data`. Given `data` with shape\n:math:`(X_0, X_1, ..., X_{N-1})` and indices with shape\n:math:`(M, Y_0, ..., Y_{K-1})` output has shape\n:math:`(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`. If :math:`M == N`, output\nshape is simply :math:`(Y_0, ..., Y_{K-1})`.\n\nThe forward of :func:`~nnabla.functions.gather_nd` is equivalent to:\n\n.. code-block:: python\n\n def gather_nd(data, index):\n import numpy as np\n tmp_index = index.reshape(index.shape[0], -1)\n tmp_index = (idx + (Ellipsis,) for idx in zip(*new_index))\n out_shape = index.shape[1:] + data.shape[index.shape[0]:]\n return np.vstack(data[idx] for idx in tmp_index).reshape(*out_shape)\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> data = F.arange(1, 11).reshape([2, 5])\n>>> print(data.d)\n[[ 1. 2. 3. 4. 5.]\n [ 6. 7. 8. 9. 10.]]\n>>> F.gather_nd(data, [[1, 1, 0]]).shape\n(3, 5)\n>>> F.gather_nd(data, [[1, 1, 0], [0, 1, 0]]).shape\n(3,)\n>>> print(F.gather_nd(data, [[1, 1, 0], [0, 1, 0]]).d)\n[6. 7. 1.]\n>>> print(F.gather_nd(data, [[1, 1, 0]]).d)\n[[ 6. 7. 8. 9. 10.]\n [ 6. 7. 8. 9. 10.]\n [ 1. 2. 3. 4. 5.]]\n\nWhen `indices` is provided as a :obj:`~nnabla.Variable` it will be\npossible to change the actual index values after function creation.\nIt is important to note that out-of-bound indices raise error when\nrunning on CPU but are ignored when using an accelerated computation\ncontext.\n\n>>> indices = nn.Variable((2, 1))\n>>> indices.d = [[0], [0]]\n>>> y = F.gather_nd(data, indices)\n>>> print(y.d)\n[1.]\n>>> indices.d = [[1], [4]]\n>>> y.forward()\n>>> print(y.d)\n[10.]", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array input data" + }, + { + "name": "indices", + "type": "nnabla.Variable", + "description": "N-D array indices" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "category": "Shape" + }, + { + "name": "BoolGather", + "description": "Gather from the input data according to the mask. \n\nGiven an input of :math:`(B_1, \\ldots, B_N, D_1, \\ldots, D_M)` shape and mask of :math:`(B_1, \\ldots, B_N)` shape, the function returns an output of :math:`(nnz, D_1, \\ldots, D_M)` shape and :math:`nnz` is the number of non-zero elements in mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input = nn.Variable.from_numpy_array([[1, 2], [3, 4], [5, 6]])\n mask = nn.Variable.from_numpy_array([1, 0, 1])\n output = F.bool_gather(input, mask)\n \n print(output.d) # [[1, 2], [5, 6]]\n\n\nNote that this function is normally used with the dynamic graph \nsince this function outputs a variable-length output. If used with the static graph, \na network has to be constructed all time in iteration.", + "inputs": [ + { + "name": "input", + "type": "nnabla.Variable", + "description": "Data from which to gather." + }, + { + "name": "mask", + "type": "nnabla.Variable", + "description": "Mask with which to gather. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask." + } + ], + "outputs": [ + { + "name": "output", + "type": "nnabla.Variable", + "description": "Gathered output." + } + ], + "category": "Shape" + }, + { + "name": "ScatterNd", + "description": "Scatter `data` into a new array of given `shape` according to `indices`.\nThis operation is the inverse of :func:`~nnabla.functions.gather_nd`.\n\nThe forward of :func:`~nnabla.functions.scatter_nd` is equivalent to:\n\n.. code-block:: python\n\n def scatter_nd(data, indices, shape):\n import numpy as np\n if isinstance(indices, np.ndarray)\n indices = indices.tolist()\n result = np.zeros(shape, dtype=data.dtype)\n result[indices] = data\n return result\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> data = nn.Variable.from_numpy_array(np.array([9, 10, 11, 12]))\n>>> indices = nn.Variable.from_numpy_array(np.array([[4, 3, 1, 7]]))\n>>> scattered = F.scatter_nd(data, indices, shape=(8,))\n>>> print(scatterd.d)\n[ 0. 11. 0. 10. 9. 0. 0. 12.]\n>>> print(F.gather_nd(scattered, indices).d)\n[ 9. 10. 11. 12.]", + "inputs": [ + { + "name": "data", + "type": "nnabla.Variable", + "description": "N-D array input data." + }, + { + "name": "indices", + "type": "nnabla.Variable", + "description": "N-D array scatter indices." + }, + { + "name": "out", + "type": "nnabla.Variable", + "option": "optional", + "description": "existing output array" + } + ], + "attributes": [ + { + "name": "shape", + "type": "int64[]", + "default": "None", + "description": "Shape of output variable." + }, + { + "name": "add", + "type": "boolean", + "default": false, + "description": "Add the input data to the same destination specified by the indices." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of given `shape`." + } + ], + "category": "Shape" + }, + { + "name": "ScatterAdd", + "description": "Add all values from `x1` into the `x0` according to index specified by `indices`.\nThis function adds `x1` into the copy of `x0` and outputs the copy.\nThe original `x0` will not be changed.\n`x0`, `indices` and `x1` must have same number of dimensions.\n\nThe forward of :func:`~nnabla.functions.scatter_add` is equivalent to:\n\n.. code-block:: python\n\n def scatter_add(x0, indices, x1, axis):\n # Assuming each input is 3 dimensional\n import numpy as np\n output = np.copy(x0)\n for i in range(indices.shape[0]):\n for j in range(indices.shape[1]):\n for k in range(indices.shape[2]):\n if axis == 0:\n output[indices[i][j][k]][j][k] += x1[i][j][k]\n elif axis == 1:\n output[i][indices[i][j][k]][k] += x1[i][j][k]\n elif axis == 2:\n output[i][j][indices[i][j][k]] += x1[i][j][k]\n return output", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array which the data is added to its copy." + }, + { + "name": "indices", + "type": "nnabla.Variable", + "description": "N-D array scatter indices. The size of each dimension must be equal or smaller than that of x0 except for the specified axis. The value of indices must be smaller than the size of specified axis' dimension of x0. The size of each dimension must be equal or smaller than that of x1. Indices must not be negative." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array which is scattered and added to x0." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": 0, + "description": "Axis along which to index. The axis must not exceed the inputs' dimension." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array which contains the result of scatter addition. The shape is same as x0." + } + ], + "category": "Shape" + }, + { + "name": "BoolScatter", + "description": "Scatter the `input` according to the `mask`.\n\nGiven an input of :math:`(nnz, D_1, \\ldots, D_M)` shape and mask of :math:`(B_1, \\ldots, B_N)` shape, the function returns an output :math:`(B_1, \\ldots, B_N, D_1, \\ldots, D_M)` and :math:`nnz` is the number of non-zero elements in the mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input0 = nn.Variable.from_numpy_array([[1, 2], [3, 4], [5, 6]])\n mask = nn.Variable.from_numpy_array([1, 0, 1])\n output0 = F.bool_gather(input0, mask)\n \n input1 = output0 + 10\n output1 = F.bool_scatter(input1, mask)\n \n print(output1.d) # [[11, 12], [0, 0], [15, 16]] \n\nNote that the higher-order gradients of this function relies on F.gather, thus \nthe higher-order gradients of this function is normally used with the dynamic graph.", + "inputs": [ + { + "name": "input", + "type": "nnabla.Variable", + "description": "Data to be scattered." + }, + { + "name": "mask", + "type": "nnabla.Variable", + "description": "Mask with which to scatter. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask." + }, + { + "name": "output", + "type": "nnabla.Variable", + "option": "optional", + "description": "Destination of output. If specified, data are inplaced." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Scattered output." + } + ], + "category": "Shape" + }, + { + "name": "BoolFill", + "description": "Fill the data with the value to according to the mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input = nn.Variable.from_numpy_array([[np.inf, 2], [3, np.nan]])\n mask = nn.Variable.from_numpy_array([[1, 0], [0, 1]])\n output = F.bool_fill(input, mask, -1)\n \n print(output.d) # [[-1, 2], [3, -1]]", + "inputs": [ + { + "name": "data", + "type": "nnabla.Variable", + "description": "Data to be filled." + }, + { + "name": "mask", + "type": "nnabla.Variable", + "description": "Mask with which to fill. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask." + } + ], + "attributes": [ + { + "name": "value", + "type": "float32", + "default": 0.0, + "description": "Value to fill." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Filled output." + } + ], + "category": "Shape" + }, + { + "name": "PackPaddedSequence", + "description": "Pack a padded variable-length sequences.\n\nThis method packs a padded variable-length sequences.\n\n:math:`T_i` is the length of the :math:`i`-th Variable in the sequences.\n:math:`B` is the batch size equal to the length of the sequences.\n:math:`T` is the max of :math:`T_i` for all :math:`i`.\n:math:`*` is the remaining dimensions including none.\n\n.. note::\n This function assumes the length-sorted padded sequence in the decreasing order\n and must be used by :func:`~nnabla.utils.rnn.pack_padded_sequence` in the dynamic computation mode.\n See :", + "inputs": [ + { + "name": "padded_sequence", + "type": "nnabla.Variable", + "description": "Padded sequence of (:math:`T \\times B \\times *`) or (:math:`B \\times T \\times *`) shape." + }, + { + "name": "lengths", + "type": "nnabla.Variable", + "description": "Sequence length for each batch and always resides in CPU." + } + ], + "attributes": [ + { + "name": "batch_first", + "type": "boolean", + "default": false, + "description": "`padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,\notherwise (:math:`B`, :math:`T`, :math:`*`)." + } + ], + "outputs": [ + { + "name": "pack_sequence", + "type": "nnabla.Variable", + "description": "Packed sequence of (:math:`N`, :math:`*`) shape." + }, + { + "name": "batch_sizes", + "type": "nnabla.Variable", + "description": "Batch size for each time and always resides in CPU." + } + ], + "category": "Shape" + }, + { + "name": "PadPackedSequence", + "description": "Pad packed sequence.\n\nThis method unpacks the packed sequqnce and pad it, the inverse operation of :func:`pack_padded_sequence`.\n\n:math:`T_i` is the length of the :math:`i`-th Variable in the sequences.\n:math:`B` is the batch size equal to the length of the sequences.\n:math:`T` is the max of :math:`T_i` for all :math:`i`.\n:math:`*` is the remaining dimensions including none.\n\n.. note::\n This function assumes the output of the length-sorted padded sequence in the decreasing order\n and must be used by :func:`~nnabla.utils.rnn.pad_packed_sequence` in the dynamic computation mode.", + "inputs": [ + { + "name": "packed_sequence", + "type": "nnabla.Variable", + "description": "Packed sequence of (:math:`N`, :math:`*`) shape." + }, + { + "name": "batch_sizes", + "type": "nnabla.Variable", + "description": "Batch size for each time and always resides in CPU." + } + ], + "attributes": [ + { + "name": "batch_first", + "type": "boolean", + "default": false, + "description": "`padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,\notherwise (:math:`B`, :math:`T`, :math:`*`)." + }, + { + "name": "padding_value", + "type": "float32", + "default": 0.0, + "description": "Padding value." + }, + { + "name": "total_length", + "type": "int64", + "default": -1, + "description": "If not None, the outputs are padded up to the `total_length`.\nIf the `total_length` is less than the max length in the `sequences`,\nthe error is thrown." + } + ], + "outputs": [ + { + "name": "padded_sequence", + "type": "nnabla.Variable", + "description": "Padded sequence of (:math:`T \\times B \\times *`) or (:math:`B \\times T \\times *`) shape." + }, + { + "name": "lengths", + "type": "nnabla.Variable", + "description": "Sequence length for each batch and always resides in CPU." + } + ], + "category": "Shape" + }, + { + "name": "NonZero", + "description": "Find indices of non-zero elements.\n\nNonZero behaves similar to NonZero Operator in ONNX.\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> x = F.arange(1, 10).reshape([3, 3])\n>>> x.d[0, 1] = x.d[1, 2] = x.d[2, 2] = 0\n>>> print(x.d)\n[[1. 0. 3.],\n [4. 5. 0.],\n [7. 8. 0.]]\n>>> y = F.nonzero(x)\n>>> print(y.shape)\n(2, 6)\n>>> print(y.d)\n[[0 0 1 1 2 2],\n [0 2 0 1 0 1]]\n\nNote that this function is normally used with the dynamic graph \nsince this function outputs a variable-length output. If used with \nthe static graph, a network has to be constructed all time in iteration.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D arrays." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array indices." + } + ], + "category": "Shape" + }, + { + "name": "Interpolate", + "description": "Resize an ND array with interpolation.\n\nThe last ``len(output_size)`` dimensions of the input ``x`` are considered as the spatial dimensions to be resized.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "output_size", + "required": true, + "type": "int64[]", + "description": "Output size." + }, + { + "name": "mode", + "required": true, + "type": "string", + "description": "Interpolation mode chosen from ('nearest'|'linear')." + }, + { + "name": "align_corners", + "type": "boolean", + "default": true, + "description": "If true, the corner pixels of input and output arrays are aligned, such that the output corner pixels have the same values with the input corner pixels. The default is ``None``, and it becomes `True` if mode is 'linear', otherwise `False`." + }, + { + "name": "half_pixel", + "type": "boolean", + "default": false, + "description": "If true, in the coordinate transformation, 0.5 is added to the output coordinate and 0.5 is subtracted from the input coordinate after scaling." + }, + { + "name": "half_pixel_for_nn", + "type": "boolean", + "default": false, + "description": "This is a special argument to support the backward-compatibility of the nearest neighbor interpolation. Default is `False`. When in ``True``, the implementation of nearest neighbor interpolation is the old one." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "ONNXResize", + "description": "Resize an ND array with interpolation. This function provides a \ncompatible interface to ONNX Resize.\n\nReferences:\n * `ONNX Operators documentation.\n `", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "roi", + "type": "float32[]", + "default": "()", + "description": "RoIs for tf_crop_and_resize." + }, + { + "name": "scales", + "type": "float32[]", + "default": "()", + "description": "Scale factors along axes." + }, + { + "name": "sizes", + "type": "int64[]", + "default": "()", + "description": "Output size." + }, + { + "name": "mode", + "type": "string", + "default": "nearest", + "description": "Interpolation mode chosen from ('nearest'|'linear'|'cubic')." + }, + { + "name": "coordinate_transformation_mode", + "type": "string", + "default": "half_pixel", + "description": "How to transform the coordinate in the resized tensor to the coordinate in the original tensor. This mode is chosen from ('half_pixel'|'pytorch_half_pixel'|'align_corners'|'asymmetric'|'tf_crop_and_resize')." + }, + { + "name": "cubic_coeff_a", + "type": "float32", + "default": -0.75, + "description": "The coefficient used in cubic interpolation." + }, + { + "name": "exclude_outside", + "type": "int64", + "default": 0, + "description": "Whether to set coefficients to zero when sampling locations is outside the input tensor." + }, + { + "name": "extrapolation_value", + "type": "float32", + "default": 0.0, + "description": "An extrapolation value used when a sampling location is outside the input tensor at tf_crop_and_resize mode." + }, + { + "name": "nearest_mode", + "type": "string", + "default": "round_prefer_floor", + "description": "Rounding mode for nearest-neighbor interpolation." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "FFT", + "description": "Complex-to-complex Discrete Fourier Transform,\n\n.. math::\n\n X_{k_1, \\ldots, k_d} = \\sum_{n_1=0}^{N_1-1} \\dots \\sum_{n_d=0}^{N_d-1} x_{n_1, \\ldots, n_d} \\exp\\left(-2 \\pi j \\left( \\sum_{i=0}^{d} \\frac{k_i n_i}{N_i} \\right) \\right),\n\nwhere\n\n.. math::\n\n k_i = 0, \\ldots, N_i - 1.\n\nThis function now supports 1-D, 2-D, and 3-D DFT with or without the leading batch dimension(s).\n\nThe input is expected to be complex-valued with at least signal_ndim + 1 dimensions.\nThe last dimension has a shape of two where x[..., 0] is the real part and x[..., 1] the imaginary part.\n\nExample:\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n from nnabla.ext_utils import get_extension_context\n\n ctx = get_extension_context(\"cudnn\")\n nn.set_default_context(ctx)\n\n # Example for a batched 2D-FFT and 2D-IFFT (batch-size: 2, data-size: 4x3)\n x_data = np.random.rand(2, 4, 3) + 1j * np.random.rand(2, 4, 3)\n x = nn.Variable.from_numpy_array(np.stack([np.real(x_data), np.imag(x_data)], axis=3))\n y = F.fft(x, signal_ndim=2, normalized=True)\n z = F.ifft(y, signal_ndim=2, normalized=True)\n z.forward()\n\n np.allclose(z.d[..., 0] + 1j*z.d[...,1], x_data)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input." + } + ], + "attributes": [ + { + "name": "signal_ndim", + "required": true, + "type": "int64", + "description": "The number of dimensions for each signal. It must be 1, 2, or 3." + }, + { + "name": "normalized", + "type": "boolean", + "default": false, + "description": "Use unitary normalization. If `True`, the normalization constant :math:`\\sqrt{\\frac{1}{\\prod_{i=1}^{d} N_i}}` is multiplied." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "FFT transformed signal." + } + ] + }, + { + "name": "IFFT", + "description": "Complex-to-complex inverse Discrete Fourier Transform,\n\n.. math::\n\n X_{k_1, \\ldots, k_d} = \\frac{1}{\\prod_{i=1}^{d} N_i} \\sum_{n_1=0}^{N_1-1} \\dots \\sum_{n_d=0}^{N_d-1} x_{n_1, \\ldots, n_d} \\exp\\left(2 \\pi j \\left( \\sum_{i=0}^{d} \\frac{k_i n_i}{N_i} \\right) \\right),\n\nwhere\n\n.. math::\n\n k_i = 0, \\ldots, N_i - 1.\n\nThis function now supports 1-D, 2-D, and 3-D DFT with or without the leading batch dimension(s).\n\nThe input is expected to be complex-valued with at least signal_ndim + 1 dimensions.\nThe last dimension has a shape of two where x[..., 0] is the real part and x[..., 1] the imaginary part.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input." + } + ], + "attributes": [ + { + "name": "signal_ndim", + "required": true, + "type": "int64", + "description": "The number of dimensions for each signal. It must be 1, 2, or 3." + }, + { + "name": "normalized", + "type": "boolean", + "default": false, + "description": "Use unitary normalization. If `True`, the normalization constant :math:`\\frac{1}{\\prod_{i=1}^{d} N_i}` becomes :math:`\\sqrt{\\frac{1}{\\prod_{i=1}^{d} N_i}}`." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "IFFT transformed signal." + } + ] + }, + { + "name": "STFT", + "description": "Short-time Fourier transform.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Time domain sequence of size `batch_size x sample_size`." + } + ], + "attributes": [ + { + "name": "window_size", + "required": true, + "type": "int64", + "description": "Size of STFT analysis window." + }, + { + "name": "stride", + "required": true, + "type": "int64", + "description": "Number of samples that we shift the window, also called `hop size`." + }, + { + "name": "fft_size", + "required": true, + "type": "int64", + "description": "Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins." + }, + { + "name": "window_type", + "type": "string", + "default": "hanning", + "description": "Analysis window, can be either `hanning`, `hamming` or `rectangular`." + }, + { + "name": "center", + "type": "boolean", + "default": true, + "description": "If `True`, then the signal `x` is padded by half the FFT size using reflection padding." + }, + { + "name": "pad_mode", + "type": "string", + "default": "reflect", + "description": "Padding mode, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`." + }, + { + "name": "as_istft_backward", + "type": "boolean", + "default": false, + "description": "If `True`, then forward execution behaves as backward execution of ISTFT, treating input `x` as output gradient of ISTFT and outputs `y_r` and `y_i` as inputs gradient of ISTFT. This option is only used in nn.grad operator." + } + ], + "outputs": [ + { + "name": "y_r", + "type": "nnabla.Variable", + "description": "Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`." + }, + { + "name": "y_i", + "type": "nnabla.Variable", + "description": "Imaginary part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`." + } + ] + }, + { + "name": "ISTFT", + "description": "Inverse short-time Fourier transform.\n\n.. note::\n We use a constant square inverse window for the reconstruction of the time-domain signal, therefore, the first and last `window_size - stride` are not perfectly reconstructed.", + "inputs": [ + { + "name": "y_r", + "type": "nnabla.Variable", + "description": "Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`." + }, + { + "name": "y_i", + "type": "nnabla.Variable", + "description": "Imaginary part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`." + } + ], + "attributes": [ + { + "name": "window_size", + "required": true, + "type": "int64", + "description": "Size of STFT analysis window." + }, + { + "name": "stride", + "required": true, + "type": "int64", + "description": "Number of samples that we shift the window, also called `hop size`." + }, + { + "name": "fft_size", + "required": true, + "type": "int64", + "description": "Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins." + }, + { + "name": "window_type", + "type": "string", + "default": "hanning", + "description": "Analysis window, can be either `hanning`, `hamming` or `rectangular`." + }, + { + "name": "center", + "type": "boolean", + "default": true, + "description": "If `True`, then the signal `x` is padded by half the FFT size using reflection padding." + }, + { + "name": "pad_mode", + "type": "string", + "default": "reflect", + "description": "Padding mode corresponding to STFT `pad_mode`, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`. This option is ignored for the normal use of ISTFT. You need to set the same `pad_mode` only when `as_stft_backward == True`." + }, + { + "name": "as_stft_backward", + "type": "boolean", + "default": false, + "description": "If `True`, then forward execution behaves as backward execution of STFT, treating inputs `y_r` and `y_i` as outputs gradient of STFT and output `x` as input gradient of STFT. This option is only used in nn.grad operator." + } + ], + "outputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Time domain sequence of size `batch_size x sample_size`." + } + ] + }, + { + "name": "Dropout", + "description": "Dropout.\nSamples a number :math:`u` from a uniform distribution in :math:`[0, 1]` ,\nand ignores the input if :math:`u \\leq p`.\n\n.. math::\n y = \\left\\{\n \\begin{array}{ll}\n \\frac{x}{1 - p} & (u > p) \\\\\n 0 & ({\\rm otherwise})\n \\end{array} \\right.\n\nNote:\n Usually dropout only applied during training as below\n (except `MC dropout`_). If you want to use dropout as an MC dropout, remove 'if train:'.\n\n .. code-block:: python\n\n h = PF.affine(x, num_hidden)\n if train:\n h = F.dropout(h, 0.5)\n\n.. _MC dropout: https://arxiv.org/abs/1506.02142", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "p", + "type": "float64", + "default": 0.5, + "description": ":math:`p` in definition." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "TopKData", + "description": "Select the `k` largest values from each sample in `x` to\npropagate unmodified and set all other values to 0. If `abs` is\nTrue, the `k` largest values are selected by magnitude. If\n`reduce` is True (the default), all feature dimensions are\nreduced to a single dimension of size `k` that propagates only\nthe `k` largest values. Otherwise, if `reduce` is False, input\nand output dimensions are identical. Dimensions before\n`base_axis` are treated as number of sample dimensions and `k`\nvalues get selected from all elements of a sample (dimensions\nfrom `base_axis`) regardless of shape.\n\n>>> import nnabla as nn, nnabla.functions as F\n>>> x = nn.Variable((4, 5, 6))\n>>> F.top_k_data(x, 3, reduce=False).shape\n(4, 5, 6)\n>>> F.top_k_data(x, 3, reduce=True).shape\n(4, 3)\n>>> F.top_k_data(x, 3, reduce=True, base_axis=2).shape\n(4, 5, 3)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "k", + "required": true, + "type": "int64", + "description": "Number of largest data values to propagate." + }, + { + "name": "abs", + "type": "boolean", + "default": false, + "description": "Determine largest data values by magnitude." + }, + { + "name": "reduce", + "type": "boolean", + "default": true, + "description": "Reduce feature size to one dimension of size `k`." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "First dimension of the sample shape." + }, + { + "name": "largest", + "type": "boolean", + "default": true, + "description": "Whether to select the `k` largest or smallest values." + }, + { + "name": "with_index", + "type": "boolean", + "default": false, + "description": "Return top-k values and indices." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + }, + { + "name": "indices", + "type": "nnabla.Variable", + "description": "N-D array of top-k indices." + } + ] + }, + { + "name": "TopKGrad", + "description": "Select the `k` largest gradients for each sample in `x` to\nback-propagate unmodified and set all other gradients to 0. If\n`abs` is True, the `k` largest gradients are selected by\nmagnitude. Dimensions before `base_axis` are treated as number\nof sample dimensions and `k` gradients get selected from all\ngradients of a sample (dimensions from `base_axis`) regardless\nof shape.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "k", + "required": true, + "type": "int64", + "description": "Number of largest gradients to propagate." + }, + { + "name": "abs", + "type": "boolean", + "default": false, + "description": "Determine largest gradients by magnitude." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "First dimension of the sample shape." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with same shape and data as `x`." + } + ] + }, + { + "name": "Rand", + "description": "Samples numbers from a uniform distribution :math:`x \\sim U(low, high)`\ngiven lowest value :math:`low`, upper bound :math:`high`,\nand shape of the returned Variable.", + "attributes": [ + { + "name": "low", + "type": "float32", + "default": 0.0, + "description": ":math:`low` in definition." + }, + { + "name": "high", + "type": "float32", + "default": 1.0, + "description": ":math:`high` in definition." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument." + } + ] + }, + { + "name": "Randint", + "description": "Samples integer numbers from a uniform distribution :math:`x \\sim U(low, high)`\ngiven lowest value :math:`low`, upper bound :math:`high`, and the shape of the returned Variable. The lowest\nvalue :math:`low` is included in the range, while the upper bound :math:`high` is excluded, corresponding to the half-open\ninterval :math:`[low, high)`.", + "attributes": [ + { + "name": "low", + "type": "int64", + "default": 0, + "description": ":math:`low` in definition." + }, + { + "name": "high", + "type": "int64", + "default": 1, + "description": ":math:`high` in definition." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument. The dtype is int32." + } + ] + }, + { + "name": "Randn", + "description": "Samples numbers from a normal distribution :math:`x \\sim N(\\mu, \\sigma)`\ngiven mean :math:`\\mu`, standard deviation :math:`\\sigma`,\nand shape of the returned Variable.", + "attributes": [ + { + "name": "mu", + "type": "float32", + "default": 0.0, + "description": ":math:`\\mu` in definition." + }, + { + "name": "sigma", + "type": "float32", + "default": 1.0, + "description": ":math:`\\sigma` in definition." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument." + } + ] + }, + { + "name": "RandBinomial", + "description": "Samples numbers from a binomial distribution :math:`x \\sim B(n, p)`\ngiven the numbers of trials :math:`n`, probability :math:`p`,\nand shape of the returned Variable.\nWhen :math:`n = 1`, this behaves like the Bernoulli distriburion.", + "attributes": [ + { + "name": "n", + "type": "int64", + "default": 1, + "description": ":math:`n` in definition, the number of trials." + }, + { + "name": "p", + "type": "float32", + "default": 0.5, + "description": ":math:`p` in definition, probability of success." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument." + } + ] + }, + { + "name": "RandBeta", + "description": "Samples numbers from a beta distribution :math:`x \\sim \\beta(\\alpha, \\beta)`.", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "default": 0.5, + "description": ":math:`\\alpha`, scale parameter." + }, + { + "name": "beta", + "type": "float32", + "default": 0.5, + "description": ":math:`\\beta`, scale parameter." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument." + } + ] + }, + { + "name": "RandGamma", + "description": "Samples numbers from a gamma distribution :math:`x \\sim \\frac {\\gamma(k, \\frac {x}{\\theta})}{\\Gamma(k)}`.", + "attributes": [ + { + "name": "k", + "type": "float32", + "default": 0.5, + "description": "k, scale parameter." + }, + { + "name": "theta", + "type": "float32", + "default": 1.0, + "description": ":math:`\\theta`, scale parameter." + }, + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Shape of returned variable." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Variable with the shape specified in the argument." + } + ] + }, + { + "name": "RandomChoice", + "description": "Generate random samples from population `x` with selection probabilities\ndetermined by the relative weights `w`. The number of samples to draw is\ngiven by the product of `shape`\\s dimensions, and the samples are returned\nwith the given `shape`. By default, samples are drawn with replacement,\ni.e. selection of a specific population member is solely determined by\nits associated weight. Sampling without replacement, where any population\nmember may be drawn only once, is used if `replace` is set to False.\n\nFor both `x` and `w` the innermost dimension corresponds to the individual\npopulations and their weights from which samples are returned with the\nrequested `shape` following all outermost dimensions of the input.\n\n.. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n nn.set_auto_forward(True)\n\n # x holds two populations\n x = nn.Variable.from_numpy_array(np.array([[11, 22, 33], [110, 220, 330]]))\n # w holds the weights for each population\n w = nn.Variable.from_numpy_array(np.array([[10, 20, 70], [70, 20, 10]]))\n\n # draw one sample from each population\n y = F.random_choice(x, w) # y.shape => (2, 1)\n\n # draw 12 samples with shape (3, 4) from each population\n y = F.random_choice(x, w, shape=(3, 4)) # y.shape => (2, 3, 4)\n\nNote that weights must not be less than zero and for each population the\nsum of weights must be greater than zero. Additionally, sampling without\nreplacement requires that the number of non-zero weights is not less than\nthe number of samples to be drawn. These conditions are verified in \"cpu\"\ncomputation context but not when using \"cuda\" or \"cudnn\" acceleration\n(this would require additional device synchronization steps penalizing\nperformance).\n\nRandom sampling from an implicit array of index values (like categorical\nor multinomial) can be realized with input `x` constructed as indices.\n\n.. code-block:: python\n\n w = nn.Variable.from_numpy_array(np.array([1, 2, 3, 2, 1]))\n y = F.random_choice(F.arange(0, 5), w)", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array from which a random sample is generated." + }, + { + "name": "w", + "type": "nnabla.Variable", + "description": "N-D array of associated weights of elements in `x`." + } + ], + "attributes": [ + { + "name": "shape", + "type": "shape", + "default": "[]", + "description": "Number and shape of generated samples." + }, + { + "name": "replace", + "type": "boolean", + "default": true, + "description": "Whether sampling is with or without replacement." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "RandomCrop", + "description": "RandomCrop randomly extracts a portion of an array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "shape", + "type": "shape", + "default": "x.shape", + "description": "The data size to extract. For example, to randomly extract a portion of the image (3,48,48) from a 3,64,64 image, specify (3,48,48)." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "No Description" + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "RandomFlip", + "description": "Reverses the order of elements of the specified dimension of an array at 50% probability.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "default": "[len(x.shape) - 1]", + "description": "The index of the axis to reverse the order of the elements. Axis indices take on values 0, 1, 2, and so on from the left. For example, to flip a 32 (W) by 24 (H) 100 RGB images (100, 3,24,32) vertically and horizontally at random, specify (2,3)." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "No Description" + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "RandomShift", + "description": "Randomly shifts the array elements within the specified range.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "shifts", + "type": "int64[]", + "default": "(0,) * len(x.shape)", + "description": "Max absolute amount to shift elements. For example, to shift image data horizontally by :math:`\\pm 2` pixels and vertically by :math:`\\pm 3` pixels, specify (3,2)." + }, + { + "name": "border_mode", + "type": "string", + "default": "nearest", + "description": "Specify how to process the ends of arrays whose values will be undetermined as a result of shifting. nearest: The data at the ends of the original array is copied and used. reflect: Original data reflected at the ends of the original array is used. constant: Constant value is used." + }, + { + "name": "constant_value", + "type": "float32", + "default": 0.0, + "description": "Value used for outside of the original array if border_mode='constant'." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "No Description" + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "RandomErase", + "description": "Randomly erase patches of the inputs and replace with random values.\n\nErasing is applied for each sample and for each `n` with the given probability, the randomly\nselected area ratio and aspect ratio if `share` is `True`;\notherwise (`share`=`False`), for each feature additionally.\n\nRandom patch are selected by random coordinates as the following,\n\n.. math::\n\n S_e &&= Uniform(s_l, s_h) \\times S \\\\\n r_e &&= Uniform(r_l, r_h) \\\\\n H_e &&= \\sqrt{S_e \\times r_e} \\\\\n W_e &&= \\sqrt{S_e / r_e} \\\\\n y_e &&= Uniform(0, H - H_e) \\\\\n x_e &&= Uniform(0, W - W_e),\n\nwhere :math:`S` is the area, :math:`s_l` and :math:`s_h` are the low and high values of\nthe area ratio range, :math:`r_l` and :math:`r_h` are the low and high values\nof the aspect ratio range, :math:`H_e` and :math:`W_e` are height and width of a patch,\nand :math:`y_e` and :math:`x_e` are the start coordinates of a patch. If a pixel of the inputs\nfalls in this patch, the value of that pixel is replaced with a random value in `replacements`\nrange.\n\nBackward is implemented as passing gradients if `ste_fine_grained` is False; otherwise,\nthe backward only occurs in regions not erased.\n\nReferences:\n\n * `Zhun Zhong, Liang Zheng, Guoliang Kang, Shaozi Li, Yi Yang,\n Random Erasing Data Augmentation,\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "prob", + "type": "float32", + "default": 0.5, + "description": "Probability to erase." + }, + { + "name": "area_ratios", + "type": "float32[]", + "default": "(0.02, 0.4)", + "description": "Low and high of the area ratio range." + }, + { + "name": "aspect_ratios", + "type": "float32[]", + "default": "(0.3, 3.3333)", + "description": "Low and high of the aspect ratios range." + }, + { + "name": "replacements", + "type": "float32[]", + "default": "(0.0, 255.0)", + "description": "Low and high of the replacement value range." + }, + { + "name": "n", + "type": "int64", + "default": 1, + "description": "Max number of patches to be erased." + }, + { + "name": "share", + "type": "boolean", + "default": true, + "description": "Use a same bounding box randomly picked over the feature dimension when being True. Default is True." + }, + { + "name": "inplace", + "type": "boolean", + "default": false, + "description": "This option is obsolete and ignored. Output is never in-placed with input." + }, + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + }, + { + "name": "ste_fine_grained", + "type": "boolean", + "default": true, + "description": "Straight Through Estimator is fine-grained or not. Default is True." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "ImageAugmentation", + "description": "ImageAugmentation randomly alters the input image.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "shape", + "type": "shape", + "default": "x.shape", + "description": "The output image data size." + }, + { + "name": "pad", + "type": "shape", + "default": "(0, 0)", + "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension." + }, + { + "name": "min_scale", + "type": "float32", + "default": 1.0, + "description": "The minimum scale ratio when randomly scaling the image. For example, to scale down to 0.8 times the size of the original image, specify \"0.8\". To not apply random scaling, set both min_scale and max_scale to \"1.0\"." + }, + { + "name": "max_scale", + "type": "float32", + "default": 1.0, + "description": "The maximum scale ratio when randomly scaling the image. For example, to scale down to 2 times the size of the original image, specify \"2.0\"." + }, + { + "name": "angle", + "type": "float32", + "default": 0.0, + "description": "The rotation angle range in radians when randomly rotating the image. The image is randomly rotated in the -Angle to +Angle range. For example, to rotate in a +-15 degree range, specify \"0.26\" (15 degrees/360 degrees * 2PI). To not apply random rotation, specify \"0.0\"." + }, + { + "name": "aspect_ratio", + "type": "float32", + "default": 1.0, + "description": "The aspect ratio range when randomly deforming the image. For example, to deform aspect ratio of image from 1:1.3 to 1.3:1, specify \"1.3\". To not apply random deforming, specify \"1.0\"." + }, + { + "name": "distortion", + "type": "float32", + "default": 0.0, + "description": "The distortion range when randomly distorting the image. To not apply distortion, specify \"0.0\"." + }, + { + "name": "flip_lr", + "type": "boolean", + "default": false, + "description": "Whether to randomly flip the image horizontally at 50% probability." + }, + { + "name": "flip_ud", + "type": "boolean", + "default": false, + "description": "Whether to randomly flip the image vertically at 50% probability." + }, + { + "name": "brightness", + "type": "float32", + "default": 0.0, + "description": "The absolute range of values to randomly add to the brightness. A random value in the -Brightness to +Brightness range is added to the brightness. For example, to vary the brightness in the -0.05 to +0.05 range, specify \"0.05\". To not apply random addition to brightness, specify \"0.0\"." + }, + { + "name": "brightness_each", + "type": "boolean", + "default": false, + "description": "Whether to apply the random addition to brightness (as specified by brightness) to each color channel. True: brightness is added based on a different random number for each channel. False: brightness is added based on a random number common to all channels." + }, + { + "name": "contrast", + "type": "float32", + "default": 1.0, + "description": "The range in which to randomly vary the image contrast. The contrast is varied in the 1/Contrast times to Contrast times range. The output brightness is equal to (input - contrast_center) * contrast + contrast_center. For example, to vary the contrast in the 0.91 times to 1.1 times range, specify \"1.1\". To not apply random contrast variation, specify \"1.0\"." + }, + { + "name": "contrast_center", + "type": "float32", + "default": 0.0, + "description": "Intensity center used for applying contrast." + }, + { + "name": "contrast_each", + "type": "boolean", + "default": false, + "description": "Whether to apply the random contrast variation (as specified by contrast) to each color channel. True: contrast is varied based on a different random number for each channel. False: contrast is varied based on a random number common to all channels." + }, + { + "name": "noise", + "type": "float32", + "default": 0.0, + "description": "Sigma of normal random number to be added." + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "SigmoidCrossEntropy", + "description": "Element-wise cross entropy between `x` and the target variables, passed to a sigmoid function.\n\n.. math::\n y_i = - \\left(x^{(1)}_i \\ln \\left(\\sigma \\left(x^{(0)}_i \\right)\\right) + \\\n \\left(1 - x^{(1)}_i\\right) \\ln \\left(1 - \\sigma \\left(x^{(0)}_i \\\n \\right)\\right)\\right)\n\nwhere :math:`\\sigma(s)=\\frac{1}{1+\\exp(-s)}`.\n\nNote:\n SigmoidCrossEntropy is equivalent to Sigmoid+BinaryCrossEntropy, but computing them at once has the effect of reducing computational error.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array. Typically indicates a score. The value lies in :math:`[-\\infty, \\infty]`" + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "N-D array of labels. Only 0 or 1 value is allowed." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses." + } + ] + }, + { + "name": "BinaryCrossEntropy", + "description": "Element-wise cross entropy between `x` and the target variables.\n\n.. math::\n y_i = - \\left(x^{(1)}_i * \\ln \\left(x^{(0)}_i\\right) + \\left(1 - \\\n x^{(1)}_i\\right) * \\ln \\left(1 - x^{(0)}_i\\right)\\right).", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Probabilities N-D array. :math:`-\\infty` to :math:`\\infty`." + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "N-D array of labels. Usually set as 0 or 1, but, unlike SigmoidCrossEntropy, it allows probability (0 to 1) as inputs and backpropagation can be done." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses." + } + ] + }, + { + "name": "SoftmaxCrossEntropy", + "description": "Element-wise cross entropy between the variables and the variables of a label given by a category index with Softmax normalization.\n\n.. math::\n y_{j} = -\\ln \\left(\\frac{\\exp(x_{j,t_j})}{\\sum_{i'} \\exp(x_{j,i'})}\\right)\n\nalong dimension specified by axis (:math:`i` is the axis where normalization is performed on).\n\nNote:\n SoftmaxCrossEntropy is equivalent to Softmax+CategoricalCrossEntropy, but computing them at once has the effect of reducing computational error.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array. Typically indicates a score. :math:`(D_1 \\times ... \\times D_i \\times ... \\times D_N)`" + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "N-D array of labels. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)` , each label should be the index from 0 to n-class, -1 if not belongs any class." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis normalization is taken." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`" + } + ] + }, + { + "name": "CategoricalCrossEntropy", + "description": "Element-wise cross entropy between `x` and the target `t` where targets are given by a category index.\n\n.. math::\n y_{j} = -\\ln \\left( x_{j, t_j} \\right)\n\nalong dimension specified by axis (:math:`i` is the axis where normalization is performed on).", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array. Typically indicates a score. :math:`(D_1 \\times ... \\times D_i \\times ... \\times D_N)`" + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "N-D array of labels. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`, each label should be the index from 0 to n-class, -1 if not belongs any class." + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis normalization is taken." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`" + } + ] + }, + { + "name": "SquaredError", + "description": "Element-wise squared error\n\n.. math::\n y_i = \\left(x^{(0)}_i - x^{(1)}_i\\right)^2.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "AbsoluteError", + "description": "Element-wise absolute error\n\n.. math::\n y_i = | x^{(0)}_i - x^{(1)}_i |.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "HuberLoss", + "description": "Element-wise Huber loss\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n d^2 & (|d| < \\delta)\\\\\n \\delta (2 |d| - \\delta) & ({\\rm otherwise})\n \\end{array} \\right.\n\nwhere :math:`d = x^{(0)}_i - x^{(1)}_i`", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "delta", + "type": "float32", + "default": 1.0, + "description": "Delta" + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses." + } + ] + }, + { + "name": "EpsilonInsensitiveLoss", + "description": "Element-wise Epsilon Insensitive Loss\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n | x^{(0)}_i - x^{(1)}_i | - \\epsilon & if \\ \\ | x^{(0)}_i - x^{(1)}_i | > \\epsilon \\\\\n\t\t\t0 & otherwise\n \\end{array} \\right.", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "N-D array." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "attributes": [ + { + "name": "epsilon", + "required": true, + "type": "float32", + "description": "Insensitive parameter." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array of element-wise losses." + } + ] + }, + { + "name": "KLMultinomial", + "description": "The Kullback Leibler Divergence for multinomial distributions.\n\n.. math::\n D = \\sum_i p_i \\log \\left( \\frac{p_i}{q_i} \\right)", + "inputs": [ + { + "name": "p", + "type": "nnabla.Variable", + "description": "N-D array of the source categorical probabilities" + }, + { + "name": "q", + "type": "nnabla.Variable", + "description": "N-D array of the target categorical probabilities" + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + } + ], + "outputs": [ + { + "name": "D", + "type": "nnabla.Variable", + "description": "Kullback Leibler divergence :math:`KL(p \\parallel q)`." + } + ] + }, + { + "name": "AffineGrid", + "description": "Generate the source grid based on the normalized target grid with `size`.\nThe target grid is first normalized in [-1, 1], then\ntranformed by the affine transformation :math:`\\theta` to generate\nthe source grid. 2D and 3D grid are supported now.\n\nThis function is normally used with the `warp_by_grid` function for\nconstructing the spatial transformer.", + "inputs": [ + { + "name": "theta", + "type": "nnabla.Variable", + "description": "N-D array with the shape (:math:`B \\times 2 \\times 3`), the sample-wise affine transformation matrix." + } + ], + "attributes": [ + { + "name": "size", + "required": true, + "type": "int64[]", + "description": "The grid size of (:math:`H \\times W`) for 2D and (:math:`D \\times H \\times W`) for 3D." + }, + { + "name": "align_corners", + "type": "boolean", + "default": false, + "description": "If `True`, the top-left and bottom-right pixels correspond to (-1, -1) and (1, 1) respectively since a pixel is located on the corner of a grid, and the target grid is normalized in [-1, 1].\nIf `False`, the normalized target grid in [-1, 1] is scaled by `size - 1 / size` according to the respective spatial size (e.g., :math:`H` and :math:`W`) before the transformation since a pixel is located on a center of a cell in a grid." + } + ], + "outputs": [ + { + "name": "grid", + "type": "nnabla.Variable", + "description": "N-D array with the shape (:math:`B \\times H \\times W \\times 2`) for 2D and (:math:`B \\times D \\times H \\times W \\times 3`) for 3D. The last dimension of 2 is for (x, y) and of 3 for (x, y, z). The `gird` is used as the source grid for the warping." + } + ] + }, + { + "name": "WarpByGrid", + "description": "Warp the input data by the grid.\nThis function is normally used with the generated normalized grid by\nthe `affine_grid` function for constructing the spatial transformer.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input data to be warped with the shape (:math:`B \\times C \\times H_{in} \\times W_{in}`) for 2D and (:math:`B \\times C \\times D_{in} \\times H_{in} \\times W_{in}`) for 3D." + }, + { + "name": "grid", + "type": "nnabla.Variable", + "description": "Grid warping the input data with the shape (:math:`B \\times H_{out} \\times W_{out} \\times 2`) for 2D and (:math:`B \\times D_{out} \\times H_{out} \\times W_{out} \\times 3`) for 3D. The last dimension of 2 is for (x, y) or 3 for (x, y, z)." + } + ], + "attributes": [ + { + "name": "mode", + "type": "string", + "default": "linear", + "description": "Interpolation mode, linear or nearest." + }, + { + "name": "padding_mode", + "type": "string", + "default": "zero", + "description": "Padding mode when the grid value is outside [-1, 1]. If this is \"zero\", 0 is used for padding. \"reflect\" uses the values reflected at the ends of the original input data like the mirror. \"repeat\" used the values at the ends of the original input data." + }, + { + "name": "align_corners", + "type": "boolean", + "default": false, + "description": "The target grid normalized in [-1, 1] is scaled by `size - 1 / size` according to the respective spatial size (e.g., :math:`H` and :math:`W`) before the transformation if this is `False`. If this is `True`, the top-left and bottom-right pixels correspond to (-1, -1) and (1, 1) respectively." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output data warped by the grid." + } + ] + }, + { + "name": "WarpByFlow", + "description": "Transform the image(s) *data* by *flow* field(s) of offset vectors such\nthat each output pixel corresponds to the input image pixel at the\nrelative offset location given by horizontal and vertical flow values\n(in other words, the flow field describes the coordinate displacements\nfor each output pixel to the corresponding input pixel). Both *data* and\n*flow* are 4-D variables (in \"NCHW\" layout) with identical shape except\nthe *flow* channel dimension (which is always 2).\n\n.. math::\n output_{n,c,y,x} = data_{n,c,y',x'},\n\nwhere\n\n.. math::\n y' &=& y + flow_{n,1,y,x}, \\\\\n x' &=& x + flow_{n,0,y,x}.\n\nThe output pixel values at :math:`y'` and :math:`x'` locations are\nobtained by bilinear interpolating between the 4 closest pixels of the\ninput image. Pixel values outside of the input image are implicitly\npadded with the value of the closest boundary pixel.", + "inputs": [ + { + "name": "data", + "type": "nnabla.Variable", + "description": "Input image data with shape `(N, Channels, Height, Width)`." + }, + { + "name": "flow", + "type": "nnabla.Variable", + "description": "Flow field vectors with shape `(N, 2, Height, Width)`." + } + ], + "outputs": [ + { + "name": "warped_image", + "type": "nnabla.Variable", + "description": "Transformed image data with shape `(N, Channels, Height, Width)`." + } + ] + }, + { + "name": "BinarySigmoid", + "description": "Element-wise binary sigmoid function. In the forward pass, it computes\n\n.. math::\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n 0 & ({\\rm otherwise})\\end{cases},\n\nbut in the backward pass, a straight-through approximation of the gradient\nis used, i.e.,\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} =\n \\begin{cases}\n 0 & (|x| \\geq 1) \\\\\n \\frac{1}{2} & ({\\rm otherwise})\n \\end{cases}.\n\nReferences:\n\n * `Courbariaux, Matthieu, and Yoshua Bengio. Binarynet: Training deep\n neural networks with weights and activations constrained to+ 1 or-1.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input ." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output." + } + ] + }, + { + "name": "BinaryTanh", + "description": "Element-wise binary tanh function. In the forward pass, it computes\n\n.. math::\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n -1 & ({\\rm otherwise})\n \\end{cases},\n\nbut in the backward pass, a straight-through approximation of the gradient\nis used, i.e.,\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} =\n \\begin{cases}\n 0 & (|x| \\geq 1) \\\\\n 1 & ({\\rm otherwise}) \\end{cases}.\n\nReferences:\n\n * `Courbariaux, Matthieu, and Yoshua Bengio. Binarynet: Training deep\n neural networks with weights and activations constrained to+ 1 or-1.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input ." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output." + } + ] + }, + { + "name": "BinaryConnectAffine", + "description": "This function provides a BinaryConnect affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\sum_{i} sign(w_{j,i}) x_i,\n\ni.e., the weights :math:`w_{j,i}` are binarized to :math:`sign(w_{j,i})` and,\nhence, each weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations do not require any multiplications anymore as\nthey turn into additions/subtractions.\n\nThis function should be used together with\n:meth:`~nnabla.functions.batch_normalization`.\n\n.. note::\n\n 1) If you would like to share the binary weights between other\n layers, please use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.\n\nReferences:\n\n * `M. Courbariaux, Y. Bengio, and J.-P. David. BinaryConnect:\n Training Deep Neural Networks with binary weights during propagations.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input ." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight ." + }, + { + "name": "binary_weight", + "type": "nnabla.Variable", + "description": "Binarized weight ." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "quantize_zero_to", + "type": "float32", + "default": 1.0, + "description": "Input value at zero is quantized to this value." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output." + } + ] + }, + { + "name": "BinaryConnectConvolution", + "description": "This function provides a BinaryConnect convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\sum_{m} \\sum_{i} \\sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j},\n\ni.e., the weights :math:`w_{n, m, i, j}` are binarized to\n:math:`sign(w_{n, m, i, j})` and, hence,\neach weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations do not require any multiplications anymore as\nthey turn into additions/subtractions.\n\nThis function should be used together with :meth:`~nnabla.functions.batch_normalization`.\n\nReference\n\n * `M. Courbariaux, Y. Bengio, and J.-P. David. BinaryConnect:\n Training Deep Neural Networks with binary weights during propagations.\n `_\n\n\n.. note::\n\n 1) If you would like to share the binary weights between other\n layers, please use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight." + }, + { + "name": "binary_weight", + "type": "nnabla.Variable", + "description": "Binarized weight." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "quantize_zero_to", + "type": "float32", + "default": 1.0, + "description": "Input value at zero is quantized to this value." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ] + }, + { + "name": "BinaryWeightAffine", + "description": "This function provides a Binary Weight Network affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \\sum_{i} sign(w_{j,i}) x_i\n\ni.e., the weights :math:`w_{j,i}` are binarized to :math:`sign(w_{j,i})` and,\nhence, each weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations turn into additions/subtractions which are followed\nby multiplication with the scaling factor\n:math:`\\alpha_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`.\n\nReference\n\n * `Rastegari, Mohammad, et al. XNOR-Net: ImageNet Classification Using\n Binary Convolutional Neural Networks.\n `_\n\n.. note::\n\n 1) If you would like to share the binary weights with other layers, please\n use the standard, floating value weights (`weight`) and not the binary\n weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input ." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight." + }, + { + "name": "binary_weight", + "type": "nnabla.Variable", + "description": "Binarized weight." + }, + { + "name": "alpha", + "type": "nnabla.Variable", + "description": "Alpha." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "quantize_zero_to", + "type": "float32", + "default": 1.0, + "description": "Input value at zero is quantized to this value." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output." + } + ] + }, + { + "name": "BinaryWeightConvolution", + "description": "This function provides a Binary Weight Network convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}} \\sum_{m} \\sum_{i} \\sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.\n\ni.e., the weights :math:`w_{n, m, i, j}` are binarized to\n:math:`sign(w_{n, m, i, j})` and, hence, each weight is in :math:`\\{-1,\\,1\\}`.\nBy this weight binarization, the inner product computations turn into\nadditions/subtractions which are followed by multiplication with the scaling\nfactor :math:`\\alpha_n = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}}`.\n\nReference\n\n * `Rastegari, Mohammad, et al. XNOR-Net: ImageNet Classification Using\n Binary Convolutional Neural Networks.\n `_\n\n.. note::\n\n 1) If you would like to share the binary weights between other standard layers, please\n use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once\n before doing so, otherwise the weights and the binary weights will not be\n in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight." + }, + { + "name": "binary_weight", + "type": "nnabla.Variable", + "description": "Binarized weight." + }, + { + "name": "alpha", + "type": "nnabla.Variable", + "description": "Alpha." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "quantize_zero_to", + "type": "float32", + "default": 1.0, + "description": "Input value at zero is quantized to this value." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ] + }, + { + "name": "INQAffine", + "description": "This function provides a INQ affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\sum_{i} w_{j,i} x_i,\n\nwhere the weights :math:`w_{j,i}` are quantized sequentially during\ntraining to power-of-two numbers. In the backward pass, only the non-fixed\n(i.e., learnable) weights are updated.\n\nReferences:\n\n * `Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:\n Towards lossless CNNs with low-precision weights.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input ." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight ." + }, + { + "name": "indicator_fixedweights", + "type": "nnabla.Variable", + "description": "Indicates which weights are already fixed (0 = not fixed, 1 = fixed) ." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "num_bits", + "type": "int64", + "default": 4, + "description": "Number of bits per weight. Needs to be >= 2 as two bits are used to code `zero` and sign of weight." + }, + { + "name": "inq_iterations", + "type": "int64[]", + "default": "()", + "description": "List which specifies after how many forward passes we fix 50% of the learnable weights. If we have done as many iterations as specified in the last element of `inq_iterations`, then all weights are fixed." + }, + { + "name": "selection_algorithm", + "type": "string", + "default": "largest_abs", + "description": "Chooses algorithm that we use for selecting the weights to fix (\"largest_abs\" ... fix weights with largest absolute value, \"random\" ... fix weights randomly)" + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output." + } + ] + }, + { + "name": "INQConvolution", + "description": "This function provides a INQ convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\sum_{m} \\sum_{i} \\sum_{j} w_{n, m, i, j} x_{m, a + i, b + j},\n\nwhere the weights :math:`w_{j,i}` are quantized sequentially during\ntraining to power-of-two numbers. In the backward pass, only the non-fixed\n(i.e., learnable) weights are updated.\n\nReference\n\n * `Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:\n Towards lossless CNNs with low-precision weights.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input." + }, + { + "name": "weight", + "type": "nnabla.Variable", + "description": "Weight." + }, + { + "name": "indicator_fixedweights", + "type": "nnabla.Variable", + "description": "Indicates which weights are already fixed (0 = not fixed, 1 = fixed) ." + }, + { + "name": "bias", + "type": "nnabla.Variable", + "option": "optional", + "description": "Bias." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * (len(x.shape) - (base_axis+1))", + "description": "Padding sizes for dimensions." + }, + { + "name": "stride", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Stride sizes for dimensions." + }, + { + "name": "dilation", + "type": "shape", + "default": "(1,) * (len(x.shape) - (base_axis+1))", + "description": "Dilation sizes for dimensions." + }, + { + "name": "group", + "type": "int64", + "default": 1, + "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction." + }, + { + "name": "num_bits", + "type": "int64", + "default": 4, + "description": "Number of bits per weight. Needs to be >= 2 as two bits are used to code `zero` and sign of weight." + }, + { + "name": "inq_iterations", + "type": "int64[]", + "default": "()", + "description": "List which specifies after how many forward passes we fix 50% of the learnable weights. If we have done as many iterations as specified in the last element of `inq_iterations`, then all weights are fixed." + }, + { + "name": "selection_algorithm", + "type": "string", + "default": "largest_abs", + "description": "Chooses algorithm that we use for selecting the weights to fix (\"largest_abs\" ... fix weights with largest absolute value, \"random\" ... fix weights randomly)" + }, + { + "name": "seed", + "type": "int64", + "default": -1, + "description": "Random seed. When -1, seed is sampled from global random number generator." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Output" + } + ] + }, + { + "name": "FixedPointQuantize", + "description": "This function simulates to uniformly quantize values in fixed-point number representation.\n\nIn the forward pass,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax & if \\ \\ \\ x_i > max \\\\\n\t\t sign(x_i) \\times floor(|x_i| \\delta^{-1} + 2^{-1}) \\times \\delta & if \\ \\ min \\le x_i \\le max \\\\\n\t \tmin & if \\ \\ x_i < min \\\\\n\t \\end{array} \\right.,\n\nwhere :math:`\\delta` is the step size,\n:math:`(min, max) :=(- (2^{n-1} - 1)\\delta, (2^{n-1} - 1)\\delta)` if :math:`sign` is true,\n:math:`(min, max) := (0, (2^n - 1) \\delta)` otherwise, and\n:math:`n` is the total bit-width used.\n\nIn the backward pass when using `ste_fine_grained` as false,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\nIn the backward pass when using `ste_fine_grained` as true,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n\t \\begin{array}{ll}\n\t\t\t0 & if \\ \\ \\ x_i > max \\\\\n\t\t 1 & if \\ \\ min \\le x_i \\le max \\\\\n\t \t0 & if \\ \\ x_i < min \\\\\n\t \\end{array} \\right..\n\n.. note::\n\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "sign", + "type": "boolean", + "default": true, + "description": "Indicate the signed number or the unsigned number. Default is true." + }, + { + "name": "n", + "type": "int64", + "default": 8, + "description": "Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case." + }, + { + "name": "delta", + "type": "float32", + "default": 0.0625, + "description": "Step size." + }, + { + "name": "ste_fine_grained", + "type": "boolean", + "default": true, + "description": "Straight Through Estimator is fine-grained or not." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "MinMaxQuantize", + "description": "This function simulates to uniformly quantize values in the range of min and max quantization levels.\n\nMin-max quantization is defined as the following equation\n\n.. math::\n\n y = round \\left(\\frac{\\min(\\max(x, m), M) - m}{scale} \\right) \\times scale + m,\n\nwhere the :math:`scale` is defined as\n\n.. math::\n\n scale = \\frac{M - m}{M_q - m_q},\n\nand\n\n.. math::\n\n m_q = ql_{min}, \\\\\n M_q = ql_{max}, \\\\\n m = qr_{min}, \\\\\n M = qr_{max}.\n\nIn the backward pass when using `ste_fine_grained` as false,\n\n .. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\n\nIn the backward pass when using `ste_fine_grained` as true,\n\n .. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n \\begin{array}{ll}\n 0 & if \\ \\ \\ x_i > M \\\\\n 1 & if \\ \\ m \\le x_i \\le M \\\\\n 0 & if \\ \\ x_i < m \\\\\n \\end{array} \\right..\n\n:math:`qr_{min}` and :math:`qr_{max}` are treaded as follows.\n\n * `x_min_max` is `True` and `ema` is `True`:\n Exponential moving average are computed for each :math:`min(x)` and :math:`max(x)`\n then stored in :math:`qr_{min}` and :math:`qr_{max}`.\n * `x_min_max` is `True` and `ema` is `False`:\n :math:`min(x)` and :math:`max(x)` are computed then stored in :math:`qr_{min}` and :math:`qr_{max}`.\n * `x_min_max` is `False` and `ema` is `True`:\n Exponential moving average stored in :math:`qr_{min}` and :math:`qr_{max}` are used.\n * `x_min_max` is `False` and `ema` is `False`\n Gradients of :math:`qr_{min}` and :math:`qr_{max}` are computed in the backward pass.\n\nMore precisely, in inference of the min-max quantization, one has to consider *zero-point (zp)*\nwhich corresponds\nto the real value 0, and its data type is an integer. *zero-point* is defined as\n\n .. math::\n\n && zp_f = ql_{min} -\\frac{qr_{min}}{scale}, \\\\\n && zp = \\left\\{\n \\begin{array}{ll}\n ql_{max} & if \\ \\ \\ zp_f >= ql_{max} \\\\\n round(zp_f) & if \\ \\ otherwise \\\\\n ql_{min} & if \\ \\ zp_f <= ql_{min} \\\\\n \\end{array} \\right..\n\nAccordingly, in order to simulate quantization effect of *zero-point*,\nduring both forward and backward pass, :math:`qr_{min}` and :math:`qr_{max}` are adjusted as follows,\n\n .. math::\n\n qr_{min}^{adj} = ql_{min} - zp * scale, \\\\\n qr_{max}^{adj} = ql_{max} - zp * scale.\n\nThese operations are often called *nudge*.\n\nFinally, in the formulas of the min-max quantization, :math:`m` and :math:`M` are replaced by\n:math:`qr_{min}^{adj}` and :math:`qr_{max}^{adj}` respectively.\n\n.. note::\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array innput." + }, + { + "name": "qr_min", + "type": "nnabla.Variable", + "description": "Minimum value for the quantization range, modified during forward execution when x_min_max is True." + }, + { + "name": "qr_max", + "type": "nnabla.Variable", + "description": "Maximum value for the quantization range, modified during forward execution when x_min_max is True." + }, + { + "name": "ql_min", + "type": "nnabla.Variable", + "description": "Minimum value for the quantization level, typically 0." + }, + { + "name": "ql_max", + "type": "nnabla.Variable", + "description": "Maximum value for the quantization level, typically 255." + } + ], + "attributes": [ + { + "name": "decay", + "type": "float32", + "default": 0.999, + "description": "Decay rate for the exponential moving average." + }, + { + "name": "x_min_max", + "type": "boolean", + "default": false, + "description": "Use the min and max of x to compute quantization ranges." + }, + { + "name": "ema", + "type": "boolean", + "default": false, + "description": "Use the exponential moving average for the min and max quantization ranges." + }, + { + "name": "ste_fine_grained", + "type": "boolean", + "default": true, + "description": "Straight Through Estimator is fine-grained or not." + }, + { + "name": "eps", + "type": "float32", + "default": 0.01, + "description": "Epsilon, or small value to ensure :math:`qr_{max} - qr_{min}` must be greater than the epsilon." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "Pow2Quantize", + "description": "This function simulates to quantize values in the power of 2 number representation,\nin other words, it is linear (uniform) quantization in :math:`log_2` domain.\n\nIn the forward pass of `signed` case,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax_{+} & if \\ \\ \\overline{q_i} > max_{+} \\\\\n\t\t\t\\overline{q_i} & if \\ \\ min_{+} \\le \\overline{q_i} \\le max_{+} \\\\\n\t\t min_{+} & if \\ \\ 0 \\le \\overline{q_i} < min_{+} \\\\\n\t\t min_{-} & if \\ \\ min_{-} < \\overline{q_i} < 0 \\\\\n\t\t \\overline{q_i} & if \\ \\ max_{-} \\le \\overline{q_i} \\le min_{-}\\\\\n\t \tmax_{-} & if \\ \\ \\overline{q_i} < max_{-} \\\\\n\t \\end{array} \\right.,\n\nwhere\n\n.. math::\n\n && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\\\\n && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\\\\n && \\overline{q_i} = sign(x_i) \\times 2^{round(\\log_2 |x_i|)}.\n\nThis quantization uses the geometric mean between two power-of-two numbers\nas quantization threshold.\n\nIn the forward pass of `unsigned` case,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax & if \\ \\ \\overline{q_i} > max \\\\\n\t\t\t\\overline{q_i} & if \\ \\ min \\le \\overline{q_i} \\le max \\\\\n\t\t min & if \\ \\ 0 < \\overline{q_i} < min \\\\\n\t \\end{array} \\right.,\n\nwhere\n\n.. math::\n\n && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\\\\n && \\overline{q_i} = 2^{int(\\log_2 |x_i|)}.\n\n\nWhen using `with_zero` as true, a pruning threshold is used to round an input to\n0 or :math:`min`. The pruning threshold is defined in this function as the following,\n\n.. math::\n\n pruning\\ threshold = min \\times 2^{-\\frac{1}{2}}.\n\nIf an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.\n\nIn the backward pass when using ste_fine_grained as false,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\nIn the backward pass when using ste_fine_grained as true,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n\t \\begin{array}{ll}\n\t\t\t0 & if \\ \\ \\overline{q_i} > max_{+} \\\\\n\t\t\t1 & if \\ \\ otherwise \\\\\n\t \t0 & if \\ \\ \\overline{q_i} < max_{-} \\\\\n\t \\end{array} \\right..\n\n\nThere are some literatures using pow2 quantization in their proposed methods.\n\nReferences:\n\n * `Miyashita Daisuke, Lee H. Edward, Murmann Boris.\n Convolutional Neural Networks using Logarithmic Data Representation.\n `_\n\n * `Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu, Yurong Chen.\n Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights.\n `_\n\n.. note::\n\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "sign", + "type": "boolean", + "default": true, + "description": "Indicate the signed number or the unsigned number. Default is true." + }, + { + "name": "with_zero", + "type": "boolean", + "default": true, + "description": "Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit." + }, + { + "name": "n", + "type": "int64", + "default": 8, + "description": "Bit width used, Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8." + }, + { + "name": "m", + "type": "int64", + "default": 1, + "description": ":math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \\in \\mathcal{Z}`. Default is 1." + }, + { + "name": "ste_fine_grained", + "type": "boolean", + "default": true, + "description": "Straight Through Estimator is fine-grained or not." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "Prune", + "description": "Prune the input as the following equation,\n\n.. math::\n\n q_i = \\left \\{\n \\begin{array}{ll}\n 0 & abs(x_i) < threshold \\\\\n x_i & otherwise\n \\end{array}\n \\right.\n\nwhere :math:`threshold` is determined by `threshold = np.sort(np.abs(x))[int((x.size - 1) * rate)]`.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array" + } + ], + "attributes": [ + { + "name": "rate", + "type": "float32", + "default": 0.9, + "description": "Sparse rate, or pruning rate." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with the same shape as x" + } + ] + }, + { + "name": "QuantizeLinear", + "description": "Quantize linearly inputs with the scale and zero point.\n\n.. math::\n\n y = saturate(round(x / scale) + zero_point).\n\n:math:`saturate` rage is determined by `dtype` and :math:`round` mode is selected\nby `round_mode`. :math:`zero_point` is constrained by the `dtype` range and its values are\nrounded by `round_mode`.\n\nThis function aligns with ONNX.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array." + }, + { + "name": "scale", + "type": "nnabla.Variable", + "description": "Scale N-D array. The values must be positive number." + }, + { + "name": "zero_point", + "type": "nnabla.Variable", + "description": "Zero point N-D array." + } + ], + "attributes": [ + { + "name": "round_mode", + "type": "string", + "default": "HALF_AWAY_FROM_ZERO", + "description": "Rounding mode. HALF_AWAY_FROM_ZERO or HALF_TO_EVEN." + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false, + "description": "If true, this function does not use the minimum quantized value. For example, if `dtype` is int8 (the range is in [-128, 127]), the output range is corrected in [-127, 127]." + }, + { + "name": "dtype", + "type": "int64", + "default": 1, + "description": "Data type for the output. The int value is compatible to the enumtype for the data type defined in `the numpy `_." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Input N-D array." + } + ] + }, + { + "name": "DequantizeLinear", + "description": "Dequantize linearly inputs with the scale and zero point.\n\n.. math::\n\n y = (x - zero_point) * scale.\n\n:math:`zero_point` is constrained by the `dtype` range.\n\nThis function aligns with ONNX.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input N-D array." + }, + { + "name": "scale", + "type": "nnabla.Variable", + "description": "Scale N-D array. The values must be positive number. This should be same as one used in QuantizeLinear." + }, + { + "name": "zero_point", + "type": "nnabla.Variable", + "description": "Zero point N-D array. This should be same as one used in QuantizeLinear." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Input N-D array." + } + ] + }, + { + "name": "TopNError", + "description": "Top N error along the dimension specified by the axis, the element of outputs is\n\n.. math::\n\n y_i = \\left \\{\n \\begin{array}{l}\n 1 \\ (x_i \\ is \\ not \\ within \\ N-th \\ place) \\\\\n 0 \\ (x_i \\ is \\ within \\ N-th \\ place)\n \\end{array}\n \\right.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Probabilities N-D array. :math:`D_1 \\times ... \\times D_i \\times ... \\times D_N`" + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "N-D array of labels. :math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`" + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis on which the top N error is calculated." + }, + { + "name": "n", + "type": "int64", + "default": 1, + "description": "top N" + } + ], + "outputs": [ + { + "name": "output", + "type": "nnabla.Variable", + "description": "Element-wise error N-D array. (:math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`)" + } + ] + }, + { + "name": "BinaryError", + "description": "Elementwise binary error.\n\n.. math::\n y_i = \\left \\{\n \\begin{array}{l}\n 0 ((x^{(0)} \\geq 0.5) = (x^{(1)} \\geq 0.5)) \\\\\n 1 ((x^{(0)} \\geq 0.5) \\neq (x^{(1)} \\geq 0.5))\n \\end{array}\n \\right.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Probabilities N-D array. :math:`-\\infty` to :math:`\\infty`." + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "Labels N-D array. Usually set as 0 or 1, but, it allows probability (0 to 1) as inputs." + } + ], + "outputs": [ + { + "name": "output", + "type": "nnabla.Variable", + "description": "Element-wise errors N-D array." + } + ] + }, + { + "name": "ConfusionMatrix", + "description": "Confusion matrix.\nThe return value is already summed over samples.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "Probabilities N-D array. (:math:`D_1 \\times ... \\times D_i \\times ... \\times D_N`)" + }, + { + "name": "target", + "type": "nnabla.Variable", + "description": "Labels N-D array. (:math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`)" + } + ], + "attributes": [ + { + "name": "axis", + "type": "int64", + "default": "len(x.shape) - 1", + "description": "Axis on which the confusion matrix is calculated." + } + ], + "outputs": [ + { + "name": "output", + "type": "nnabla.Variable", + "description": "Confusion matrix 2-D array. Col index is estimated class. Row index is label class." + } + ] + }, + { + "name": "VATNoise", + "description": "Noise for virtual adversarial training.\n\nThis layer is a special layer for GUI network designing, specialized for getting\nthe noise of virtual adversarial training.\n\nIn the backward process, the weight parameter will be replaced with the gradient.\n\nForward\n\n.. math::\n y_i = \\frac{\\epsilon x_i}{\\sqrt{\\sum_k x_k^2 + c}}\n\nBackward\n\n.. math::\n \\delta x_i = 0\n\n.. math::\n w_i = \\epsilon \\delta y_i\n\nNote:\n This layer is a special layer for GUI network designing.\n\nReferences:\n * `Miyato et.al, Distributional Smoothing with Virtual Adversarial Training.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array of noise input. Noise is standard Gaussian noise initially, but the next step, fed back gradient variable." + }, + { + "name": "w", + "type": "nnabla.Variable", + "description": "N-D array for keep gradient values." + } + ], + "attributes": [ + { + "name": "base_axis", + "type": "int64", + "default": 1, + "description": "Dimensions up to base_axis is treated as sample dimension." + }, + { + "name": "eps", + "type": "float32", + "default": 1.0, + "description": "Noise norm (l2) factor." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array" + } + ] + }, + { + "name": "Unlink", + "description": "This function behaves as an identity function on the forward pass,\nand deletes the gradient for the background pass.\n\nThis layer is a special layer for GUI network designing, used for getting\nzero backward operation by adding this layer.\n\nForward\n\n.. math::\n y_i = x_i\n\nBackward\n\n.. math::\n \\delta x_i = 0\n\nNote:\n This layer is a special layer for GUI network designing.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "N-D array." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array." + } + ] + }, + { + "name": "Sink", + "description": "Creates a dummy variable used to call forward or backward function\nof multiple variables at one place.\n\nThis takes any numbers of input variables with any shape,\nand creates a single 0-shape outputs.\nThe forward pass does nothing. The backward pass set ones\nto the input grads if one_input_grad is set as true.\n\nNote:\n ``sink`` can only be called at the very end of the graph, and\n ``grad`` of input variables are cleared\n when ``y.backward(clear_buffer=True)`` is called.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "Any number of inputs with any shape." + } + ], + "attributes": [ + { + "name": "one_input_grad", + "type": "boolean", + "default": true, + "description": "Set grads of inputs as one during backward. It is useful to set false if you want to set external gradients to the input variables." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "Dummy variable." + } + ] + }, + { + "name": "NmsDetection2d", + "description": "Non-Maximum Suppression (NMS) to 2D Object detector output.\nThe input is a 3-dimensional tensor with shape of ``(B, N, 5 + C)``\nwhere ``B`` denotes batch size, ``N`` denotes the number of detection box\ncandidates, and ``C`` denotes the number of classes of object detection.\n``5 + C`` consists of the box coordinates ``x, y, w, h`` in normalized\ncoordinates (size of each x and y are 1.0), objectness\n(learned to predict IoU value to ground truth box), and the class probabilities of ``C`` classes.\nIt outputs a tensor with the same dimensions as the input, where all\nvalues are copied from the input to the output, except the class\nprobabilities are multiplied by objectness, and possibly suppressed to 0\nby NMS.\nDuring NMS, all of combination of pairs of bounding boxes is compared.\nFor each pair, the bounding box with a lower detection score\n(described below) is suppressed if the overlap ratio (the IoU)\nis greater than the value of ``nms``.\n\nThere are two suppression modes for NMS.\n\n1. Suppress by class probability (``nms_per_class`` is ``True``):\nFor each bounding box, the detection score is calculated by\n``objectness * probability[class_id]`` for each class.\nThe suppression is done for each class independently.\n\n2. Suppress by objectness (``nms_per_class`` is ``False``):\nThe suppression is done for each bounding box using ``objectness``\nas a detection score. All class probabilities becomes 0 for\nevery suppressed boxes.\n\nReferences:\n * `Joseph Redmon, Ali Farhadi, YOLO9000: Better, Faster, Stronger.\n `_", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "A 3-dimensional array." + } + ], + "attributes": [ + { + "name": "thresh", + "type": "float32", + "default": 0.5, + "description": "Detection score threshold." + }, + { + "name": "nms", + "type": "float32", + "default": 0.45, + "description": "IoU threshold for Non-maximum suppression (NMS)." + }, + { + "name": "nms_per_class", + "type": "boolean", + "default": true, + "description": "If true, NMS is applied for each class." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A 3-dim array with the same dimensions with the input." + } + ] + }, + { + "name": "ONNXNonMaxSuppression", + "description": "Non-Maximum Suppression (NMS) to 2D Object detector output. This function\nprovides a ONNX-compatible interface of Non-Maximum Suppression.\nThe first input is a 3-dimensional bounding box tensor with shape of\n``(B, N, 4)`` where ``B`` denotes batch size and ``N`` denotes the \nnumber of detection box candidates.\n``4`` consists of the box coordinates ``y1, x1, y2, x2`` in normalized\ncoordinates (size of each x and y are 1.0).\nThe second input is a 3-dimensional score tensor with shape of\n``(B, C, N)`` where ``C`` denotes the number of classes of object\ndetection.\nIt outputs the indices of the selected boxes as a tensor with shape of\n``(M, 3)`` where ``M`` denotes the number of the selected boxes.\n``3`` consists of 3-dimensional indices\n``batch_index, class_index, box_index``.\n\nReferences:\n * `Joseph Redmon, Ali Farhadi, YOLO9000: Better, Faster, Stronger.\n `_\n * `ONNX Operators documentation.\n `", + "inputs": [ + { + "name": "boxes", + "type": "nnabla.Variable", + "description": "A 3-dimensional array." + }, + { + "name": "scores", + "type": "nnabla.Variable", + "description": "A 3-dimensional array." + } + ], + "attributes": [ + { + "name": "center_point_box", + "type": "int64", + "default": 0, + "description": "Bounding box format (0 or 1)." + }, + { + "name": "max_output_boxes_per_class", + "type": "int64", + "default": 0, + "description": "The maximum number of boxes selected per batch per class." + }, + { + "name": "iou_threshold", + "type": "float32", + "default": 0.0, + "description": "IoU threshold for Non-maximum suppression (NMS)." + }, + { + "name": "score_threshold", + "type": "float32", + "default": 0.0, + "description": "Detection score threshold." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A 2-dimensional array." + } + ] + }, + { + "name": "MaxPoolingBackward", + "description": "Max pooling backward. This aims to support the n-th order gradients of \nthe max pooling. The document of this function must not be shown, and \nthe function must not be called in the end-user side.", + "inputs": [ + { + "name": "dy", + "type": "nnabla.Variable", + "description": "Input variable." + }, + { + "name": "x", + "type": "nnabla.Variable", + "description": "Input variable." + } + ], + "attributes": [ + { + "name": "kernel", + "required": true, + "type": "shape", + "description": "Kernel sizes for each spatial axis." + }, + { + "name": "stride", + "type": "shape", + "default": "kernel", + "description": "Subsampling factors for each spatial axis." + }, + { + "name": "ignore_border", + "type": "boolean", + "default": true, + "description": "If false, kernels covering borders are also considered for the output." + }, + { + "name": "pad", + "type": "shape", + "default": "(0,) * len(kernel)", + "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension." + }, + { + "name": "channel_last", + "type": "boolean", + "default": false, + "description": "If True, the last dimension is considered as channel dimension, a.k.a. NHWC order." + } + ], + "outputs": [ + { + "name": "dx", + "type": "nnabla.Variable", + "description": "Output" + } + ], + "category": "Pool" + }, + { + "name": "PatchCorrelation", + "description": "Multiplicative patch-wise comparision between inputs `x1` and `x2`, which\n must both be 4-dimensional NCHW (with `channel_last=False`) or NHWC (with\n `channel_last=True`) arrays (where *N* is the number of samples, *H* and\n *W* are the sample height and width and *C* is the number of channels).\n The function returns a 5-D array with shape :math:`(N, C_y, C_x, H_o, W_o)`\n where :math:`H_o, W_o` are determined by the possible patch locations within\n the, optionally padded, input image size and :math:`C_y, C_x` are determined\n by the optionally shifted patch positions.\n\n Mathmatically, the patch correlation is formulated as\n\n .. math::\n\n O(s_y, s_x, h_0, w_0) =\n \\sum_{c} \\sum_{k_h} \\sum_{k_w} I_1(c, h + k_h, w + k_w) \\times I_2(c, h + k_h + s_h, w + k_w + s_w),\n\n where :math:`I_1(c, h, w)` and :math:`I_2(c, h, w)` are the inputs at :math:`c`-th channel,\n :math:`h`-th height, and :math:`w`-th width, :math:`k_h, k_w` indices for the patch size\n and :math:`s_h, s_w` indices for the shifts.\n\n A single correlation value (per sample) is produced if the patch extends\n to the image dimensions and all other parameters use the default values.\n\n >>> import numpy as np, nnabla as nn, nnabla.functions as F\n >>> nn.set_auto_forward(True)\n >>> N, C, H, W = (1, 2, 3, 4)\n >>> x = nn.Variable.from_numpy_array(np.ones([N, C, H, W]))\n >>> F.patch_correlation(x, x, patch=(H, W)).d\n array([[[[[24.]]]]], dtype=float32)\n\n A patch that is smaller than the image size moves horizontal and vertical\n producing a value per position. The `patch_step` argument may be used to\n control the position increments.\n\n >>> F.patch_correlation(x, x, patch=(H-1, W-1)).d\n array([[[[[12., 12.],\n [12., 12.]]]]], dtype=float32)\n >>> F.patch_correlation(x, x, patch=(H-1, W-1), patch_step=(2, 1)).d\n array([[[[[12., 12.]]]]], dtype=float32)\n\n Multiple correlations may be performed at each position between the patch\n from `x1` and patches from `x2` at relative offsets striding the maximum\n vertical and horizontal distance given by the `shift` values at increments\n of `shift_step`. The shifted correlation values can be obtained for the\n from the second and third output dimension for the vertical and horizontal\n shifts.\n\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1)).shape\n (1, 1, 3, 1, 4)\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1)).d\n array([[[[[0., 6., 6., 6.]],\n [[6., 6., 6., 6.]],\n [[6., 6., 6., 0.]]]]], dtype=float32)\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1), shift_step=(1, 2)).d\n array([[[[[0., 6., 6., 6.]],\n [[6., 6., 6., 0.]]]]], dtype=float32)\n\n Padding with zero values may be applied individually to the top, bottom,\n left and right side of the input image.\n\n >>> F.patch_correlation(x, x, patch=(H, W), padding=(0, 1, W, W)).d\n array([[[[[ 0., 6., 12., 18., 24., 18., 12., 6., 0.],\n [ 0., 4., 8., 12., 16., 12., 8., 4., 0.]]]]], dtype=float32)\n\n This function may be used to implement the FlowNetC correlation layer.\n\n >>> N, C, H, W = (1, 256, 44, 60)\n >>> x1, x2 = nn.Variable((N, C, H, W)), nn.Variable((N, C, H, W))\n >>> F.patch_correlation(x1, x2, shift=20, shift_step=2).shape\n (1, 21, 21, 44, 60)\n\n References:\n\n * `Fischer et al., FlowNet: Learning Optical Flow with Convolutional\n Networks. `_", + "inputs": [ + { + "name": "x1", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(N, H, W, C)`." + }, + { + "name": "x2", + "type": "nnabla.Variable", + "description": "Input N-D array with shape :math:`(N, H, W, C)`." + } + ], + "attributes": [ + { + "name": "patch", + "type": "shape", + "default": "(1, 1)", + "description": "A tuple with height and width of the correlation patch. A single integer expands to identical height and width." + }, + { + "name": "shift", + "type": "shape", + "default": "(0, 0)", + "description": "A tuple of maximum vertical and horizontal displacement of patches from `x2` that are correlated with a single patch from `x1`. A single integer expands to identical vertical and horizontal displacement." + }, + { + "name": "patch_step", + "type": "shape", + "default": "(1, 1)", + "description": "A tuple of vertical and horizontal increments for advancing the position of the correlation patch within the input image shape. A single integer expands to identical vertical and horizontal increments." + }, + { + "name": "shift_step", + "type": "shape", + "default": "(1, 1)", + "description": "A tuple of vertical and horizontal increments for advancing the relative offset position within the shift range. A single integer expands to identical vertical and horizontal increments." + }, + { + "name": "padding", + "type": "shape", + "default": "(0, 0, 0, 0)", + "description": "A tuple of top, bottom, left and right padding extent. A tuple of two values yields identical top/bottom and left/right padding from the first and second tuple value. A single integer expands to identical padding extent for all sides." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "N-D array with shape :math:`(N, C_y, C_x, H_o, W_o)`.\n\nA spatial size of the output is calculated as\n\n.. math::\n\n H_o = \\frac{H + (top\\_pad + bottom\\_pad) - patch_v }{patch\\_step_v} + 1.\n\nA channel size of the output is calculated as\n\n.. math::\n\n C_y = \\frac{2 \\times shift_v}{shift\\_step_v} + 1.\n\n:math:`W_o` and :math:`C_x` are the same calculation with differenct components." + } + ] + }, + { + "name": "Unique", + "description": "Find the unique elements of input array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "A N-D array." + } + ], + "attributes": [ + { + "name": "flatten", + "type": "boolean", + "default": true, + "description": "If True, unique values of the flatten input array are returned." + }, + { + "name": "axis", + "type": "int64", + "default": "None", + "description": "If flatten is True and axis is specified, unique slices along axis are returned." + }, + { + "name": "sorted", + "type": "boolean", + "default": true, + "description": "If True, unique values/slices sorted in ascending order are returned." + }, + { + "name": "with_index", + "type": "boolean", + "default": false, + "description": "If True, `indices` is returned." + }, + { + "name": "with_inverse", + "type": "boolean", + "default": false, + "description": "If True, `inverse_indices` is returned." + }, + { + "name": "with_counts", + "type": "boolean", + "default": false, + "description": "If True, `counts` is returned." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A N-D array." + }, + { + "name": "indices", + "type": "nnabla.Variable", + "description": "A 1-D array. It's indices of `y` elements first occurance in `x`. If `flatten` is True, it contains indices to flattend input array `x`. If `flatten` is False and `axis` is specified, it contains indices to input array `x` on `axis`." + }, + { + "name": "inverse_indices", + "type": "nnabla.Variable", + "description": "A 1-D array. It's indices of `x` elements corresponding to `y`. If `flatten` is True, it contains indices to output array `y`. If `flatten` is False and `axis` is specified, it contains indices to output array `y` on `axis`." + }, + { + "name": "counts", + "type": "nnabla.Variable", + "description": "A 1-D array. It's the count of each element of 'y' in input array `x`." + } + ] + }, + { + "name": "EyeLike", + "description": "Generate a 2-D array with ones on the diagonal, specified by `k`, and zeros elsewhere.\nThe shape of the output array is the same as the input array.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "A 2-D array." + } + ], + "attributes": [ + { + "name": "k", + "type": "int64", + "default": 0, + "description": "Index of the diagonal. The default value 0 means the main diagonal, a positive value means an upper diagonal, and a negative value means a lower diagonal." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A 2-D array." + } + ] + }, + { + "name": "Mod2", + "description": "Element-wise remainder function.\nThe behavior of this opeator is determined by x0's dtype and the `fmod` argument:\n\n.. math::\n y_i = \\left\\{\n \\begin{array}{ll}\n \\text{numpy.fmod}(x_{0,i}, x_{1,i})\n & (x_{0} \\text{has a floating-point type or fmod is True})\\\\\n \\text{numpy.mod}(x_{0,i}, x_{1,i})\n & (\\text{otherwise})\n \\end{array} \\right..", + "inputs": [ + { + "name": "x0", + "type": "nnabla.Variable", + "description": "A N-D array." + }, + { + "name": "x1", + "type": "nnabla.Variable", + "description": "A N-D array." + } + ], + "attributes": [ + { + "name": "fmod", + "type": "boolean", + "default": false, + "description": "If True, this operator behaves like numpy.fmod, otherwise it behaves like numpy.mod." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A N-D array." + } + ] + }, + { + "name": "BitShift", + "description": "Element-wise bit shift function.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "description": "A N-D array. Its dtype must be one of the unsigned integer types." + }, + { + "name": "shift", + "type": "nnabla.Variable", + "description": "A N-D array. Its dtype is casted to x's dtype at run-time." + } + ], + "attributes": [ + { + "name": "direction", + "type": "string", + "default": "LEFT", + "description": "Direction of bit shift." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A N-D array." + } + ] + }, + { + "name": "Einsum", + "description": "Evaluates the Einstein summation convention on the inputs.\nSee the numpy.einsum documentation for more information about equation.", + "inputs": [ + { + "name": "x", + "type": "nnabla.Variable", + "list": true, + "description": "List of N-D array." + } + ], + "attributes": [ + { + "name": "equation", + "type": "string", + "default": "", + "description": "A string that folllows Einstein summation convention." + } + ], + "outputs": [ + { + "name": "y", + "type": "nnabla.Variable", + "description": "A N-D array." + } + ] + } +] \ No newline at end of file diff --git a/nnabla-proto.js b/nnabla-proto.js new file mode 100644 index 00000000000..7d2878ada6b --- /dev/null +++ b/nnabla-proto.js @@ -0,0 +1,12888 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('nnabla'); + +$root.nnabla = {}; + +$root.nnabla.Shape = class Shape { + + constructor() { + this.dim = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Shape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim = reader.array(message.dim, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Shape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + reader.array(message.dim, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Communicator = class Communicator { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Communicator(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Communicator(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Context = class Context { + + constructor() { + this.backends = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Context(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backends.push(reader.string()); + break; + case 2: + message.array_class = reader.string(); + break; + case 3: + message.device_id = reader.string(); + break; + case 4: + message.backend = reader.string(); + break; + case 5: + message.compute_backend = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Context(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "backends": + reader.array(message.backends, () => reader.string()); + break; + case "array_class": + message.array_class = reader.string(); + break; + case "device_id": + message.device_id = reader.string(); + break; + case "backend": + message.backend = reader.string(); + break; + case "compute_backend": + message.compute_backend = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Context.prototype.array_class = ""; +$root.nnabla.Context.prototype.device_id = ""; +$root.nnabla.Context.prototype.backend = ""; +$root.nnabla.Context.prototype.compute_backend = ""; + +$root.nnabla.NNablaProtoBuf = class NNablaProtoBuf { + + constructor() { + this.network = []; + this.parameter = []; + this.dataset = []; + this.optimizer = []; + this.monitor = []; + this.executor = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.NNablaProtoBuf(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 2: + message.global_config = $root.nnabla.GlobalConfig.decode(reader, reader.uint32()); + break; + case 10: + message.training_config = $root.nnabla.TrainingConfig.decode(reader, reader.uint32()); + break; + case 100: + message.network.push($root.nnabla.Network.decode(reader, reader.uint32())); + break; + case 200: + message.parameter.push($root.nnabla.Parameter.decode(reader, reader.uint32())); + break; + case 300: + message.dataset.push($root.nnabla.Dataset.decode(reader, reader.uint32())); + break; + case 400: + message.optimizer.push($root.nnabla.Optimizer.decode(reader, reader.uint32())); + break; + case 500: + message.monitor.push($root.nnabla.Monitor.decode(reader, reader.uint32())); + break; + case 600: + message.executor.push($root.nnabla.Executor.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NNablaProtoBuf(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.string(); + break; + case "global_config": + message.global_config = $root.nnabla.GlobalConfig.decodeText(reader); + break; + case "training_config": + message.training_config = $root.nnabla.TrainingConfig.decodeText(reader); + break; + case "network": + message.network.push($root.nnabla.Network.decodeText(reader)); + break; + case "parameter": + message.parameter.push($root.nnabla.Parameter.decodeText(reader)); + break; + case "dataset": + message.dataset.push($root.nnabla.Dataset.decodeText(reader)); + break; + case "optimizer": + message.optimizer.push($root.nnabla.Optimizer.decodeText(reader)); + break; + case "monitor": + message.monitor.push($root.nnabla.Monitor.decodeText(reader)); + break; + case "executor": + message.executor.push($root.nnabla.Executor.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NNablaProtoBuf.prototype.version = ""; +$root.nnabla.NNablaProtoBuf.prototype.global_config = null; +$root.nnabla.NNablaProtoBuf.prototype.training_config = null; + +$root.nnabla.GlobalConfig = class GlobalConfig { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GlobalConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.default_context = $root.nnabla.Context.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GlobalConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "default_context": + message.default_context = $root.nnabla.Context.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GlobalConfig.prototype.default_context = null; + +$root.nnabla.TrainingConfig = class TrainingConfig { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.TrainingConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.max_epoch = reader.int64(); + break; + case 2: + message.iter_per_epoch = reader.int64(); + break; + case 100: + message.save_best = reader.bool(); + break; + case 200: + message.monitor_interval = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TrainingConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "max_epoch": + message.max_epoch = reader.int64(); + break; + case "iter_per_epoch": + message.iter_per_epoch = reader.int64(); + break; + case "save_best": + message.save_best = reader.bool(); + break; + case "monitor_interval": + message.monitor_interval = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TrainingConfig.prototype.max_epoch = protobuf.Int64.create(0); +$root.nnabla.TrainingConfig.prototype.iter_per_epoch = protobuf.Int64.create(0); +$root.nnabla.TrainingConfig.prototype.save_best = false; +$root.nnabla.TrainingConfig.prototype.monitor_interval = protobuf.Int64.create(0); + +$root.nnabla.Network = class Network { + + constructor() { + this.repeat_info = []; + this.variable = []; + this["function"] = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Network(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 10: + message.batch_size = reader.int64(); + break; + case 11: + message.repeat_info.push($root.nnabla.RepeatInfo.decode(reader, reader.uint32())); + break; + case 100: + message.variable.push($root.nnabla.Variable.decode(reader, reader.uint32())); + break; + case 200: + message["function"].push($root.nnabla.Function.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Network(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "batch_size": + message.batch_size = reader.int64(); + break; + case "repeat_info": + message.repeat_info.push($root.nnabla.RepeatInfo.decodeText(reader)); + break; + case "variable": + message.variable.push($root.nnabla.Variable.decodeText(reader)); + break; + case "function": + message["function"].push($root.nnabla.Function.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Network.prototype.name = ""; +$root.nnabla.Network.prototype.batch_size = protobuf.Int64.create(0); + +$root.nnabla.RepeatInfo = class RepeatInfo { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RepeatInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.times = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RepeatInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "id": + message.id = reader.string(); + break; + case "times": + message.times = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RepeatInfo.prototype.id = ""; +$root.nnabla.RepeatInfo.prototype.times = protobuf.Int64.create(0); + +$root.nnabla.RepeatParameter = class RepeatParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RepeatParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.repeat_id = reader.string(); + break; + case 2: + message.times = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RepeatParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "repeat_id": + message.repeat_id = reader.string(); + break; + case "times": + message.times = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RepeatParameter.prototype.repeat_id = ""; +$root.nnabla.RepeatParameter.prototype.times = protobuf.Int64.create(0); + +$root.nnabla.RecurrentParameter = class RecurrentParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RecurrentParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.repeat_id = reader.string(); + break; + case 2: + message.length = reader.int64(); + break; + case 3: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RecurrentParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "repeat_id": + message.repeat_id = reader.string(); + break; + case "length": + message.length = reader.int64(); + break; + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RecurrentParameter.prototype.repeat_id = ""; +$root.nnabla.RecurrentParameter.prototype.length = protobuf.Int64.create(0); +$root.nnabla.RecurrentParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.Variable = class Variable { + + constructor() { + this.repeat_id = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Variable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.repeat_id.push(reader.string()); + break; + case 20: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 100: + message.initializer = $root.nnabla.Initializer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Variable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "repeat_id": + reader.array(message.repeat_id, () => reader.string()); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "initializer": + message.initializer = $root.nnabla.Initializer.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Variable.prototype.name = ""; +$root.nnabla.Variable.prototype.type = ""; +$root.nnabla.Variable.prototype.shape = null; +$root.nnabla.Variable.prototype.initializer = null; + +$root.nnabla.Initializer = class Initializer { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Initializer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.type = reader.string(); + break; + case 10: + message.multiplier = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Initializer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "multiplier": + message.multiplier = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Initializer.prototype.type = ""; +$root.nnabla.Initializer.prototype.multiplier = 0; + +$root.nnabla.Parameter = class Parameter { + + constructor() { + this.data = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 20: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 100: + message.data = reader.floats(message.data, tag); + break; + case 101: + message.need_grad = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "data": + reader.array(message.data, () => reader.float()); + break; + case "need_grad": + message.need_grad = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Parameter.prototype.variable_name = ""; +$root.nnabla.Parameter.prototype.shape = null; +$root.nnabla.Parameter.prototype.need_grad = false; + +$root.nnabla.Dataset = class Dataset { + + constructor() { + this.variable = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Dataset(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 10: + message.uri = reader.string(); + break; + case 20: + message.batch_size = reader.int64(); + break; + case 30: + message.cache_dir = reader.string(); + break; + case 31: + message.overwrite_cache = reader.bool(); + break; + case 32: + message.create_cache_explicitly = reader.bool(); + break; + case 100: + message.shuffle = reader.bool(); + break; + case 101: + message.no_image_normalization = reader.bool(); + break; + case 200: + message.variable.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Dataset(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "uri": + message.uri = reader.string(); + break; + case "batch_size": + message.batch_size = reader.int64(); + break; + case "cache_dir": + message.cache_dir = reader.string(); + break; + case "overwrite_cache": + message.overwrite_cache = reader.bool(); + break; + case "create_cache_explicitly": + message.create_cache_explicitly = reader.bool(); + break; + case "shuffle": + message.shuffle = reader.bool(); + break; + case "no_image_normalization": + message.no_image_normalization = reader.bool(); + break; + case "variable": + reader.array(message.variable, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Dataset.prototype.name = ""; +$root.nnabla.Dataset.prototype.type = ""; +$root.nnabla.Dataset.prototype.uri = ""; +$root.nnabla.Dataset.prototype.batch_size = protobuf.Int64.create(0); +$root.nnabla.Dataset.prototype.cache_dir = ""; +$root.nnabla.Dataset.prototype.overwrite_cache = false; +$root.nnabla.Dataset.prototype.create_cache_explicitly = false; +$root.nnabla.Dataset.prototype.shuffle = false; +$root.nnabla.Dataset.prototype.no_image_normalization = false; + +$root.nnabla.Optimizer = class Optimizer { + + constructor() { + this.dataset_name = []; + this.data_variable = []; + this.generator_variable = []; + this.loss_variable = []; + this.parameter_variable = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Optimizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 3: + message.order = reader.int64(); + break; + case 10: + message.network_name = reader.string(); + break; + case 20: + message.dataset_name.push(reader.string()); + break; + case 30: + message.solver = $root.nnabla.Solver.decode(reader, reader.uint32()); + break; + case 40: + message.update_interval = reader.int64(); + break; + case 50: + message.data_variable.push($root.nnabla.DataVariable.decode(reader, reader.uint32())); + break; + case 60: + message.generator_variable.push($root.nnabla.GeneratorVariable.decode(reader, reader.uint32())); + break; + case 70: + message.loss_variable.push($root.nnabla.LossVariable.decode(reader, reader.uint32())); + break; + case 80: + message.parameter_variable.push($root.nnabla.ParameterVariable.decode(reader, reader.uint32())); + break; + case 100: + message.start_iter = reader.int64(); + break; + case 101: + message.end_iter = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Optimizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "order": + message.order = reader.int64(); + break; + case "network_name": + message.network_name = reader.string(); + break; + case "dataset_name": + reader.array(message.dataset_name, () => reader.string()); + break; + case "solver": + message.solver = $root.nnabla.Solver.decodeText(reader); + break; + case "update_interval": + message.update_interval = reader.int64(); + break; + case "data_variable": + message.data_variable.push($root.nnabla.DataVariable.decodeText(reader)); + break; + case "generator_variable": + message.generator_variable.push($root.nnabla.GeneratorVariable.decodeText(reader)); + break; + case "loss_variable": + message.loss_variable.push($root.nnabla.LossVariable.decodeText(reader)); + break; + case "parameter_variable": + message.parameter_variable.push($root.nnabla.ParameterVariable.decodeText(reader)); + break; + case "start_iter": + message.start_iter = reader.int64(); + break; + case "end_iter": + message.end_iter = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Optimizer.prototype.name = ""; +$root.nnabla.Optimizer.prototype.order = protobuf.Int64.create(0); +$root.nnabla.Optimizer.prototype.network_name = ""; +$root.nnabla.Optimizer.prototype.solver = null; +$root.nnabla.Optimizer.prototype.update_interval = protobuf.Int64.create(0); +$root.nnabla.Optimizer.prototype.start_iter = protobuf.Int64.create(0); +$root.nnabla.Optimizer.prototype.end_iter = protobuf.Int64.create(0); + +$root.nnabla.SolverStateParameter = class SolverStateParameter { + + constructor() { + this.data = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.SolverStateParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 100: + message.data = reader.floats(message.data, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SolverStateParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "data": + reader.array(message.data, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SolverStateParameter.prototype.shape = null; + +$root.nnabla.SolverState = class SolverState { + + constructor() { + this.state_parameter = {}; + } + + static decode(reader, length) { + const message = new $root.nnabla.SolverState(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.t = reader.uint32(); + break; + case 2: + reader.entry(message.state_parameter, () => reader.string(), () => $root.nnabla.SolverStateParameter.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SolverState(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "t": + message.t = reader.uint32(); + break; + case "state_parameter": + reader.entry(message.state_parameter, () => reader.string(), () => $root.nnabla.SolverStateParameter.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SolverState.prototype.t = 0; + +$root.nnabla.Solver = class Solver { + + constructor() { + this.states = {}; + } + + get parameter() { + $root.nnabla.Solver.parameterSet = $root.nnabla.Solver.parameterSet || new Set([ "sgd_param", "sgdw_param", "momentum_param", "lars_param", "nesterov_param", "adadelta_param", "adagrad_param", "adabelief_param", "rmsprop_param", "rmsprop_graves_param", "adam_param", "adamw_param", "adabound_param", "adamax_param", "amsgrad_param", "amsbound_param", "lamb_param", "lion_param"]); + return Object.keys(this).find((key) => $root.nnabla.Solver.parameterSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.nnabla.Solver(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.type = reader.string(); + break; + case 10: + message.context = $root.nnabla.Context.decode(reader, reader.uint32()); + break; + case 20: + message.weight_decay = reader.float(); + break; + case 40: + reader.entry(message.states, () => reader.string(), () => $root.nnabla.SolverState.decode(reader, reader.uint32())); + break; + case 100: + message.sgd_param = $root.nnabla.SgdParameter.decode(reader, reader.uint32()); + break; + case 101: + message.sgdw_param = $root.nnabla.SgdWParameter.decode(reader, reader.uint32()); + break; + case 102: + message.momentum_param = $root.nnabla.MomentumParameter.decode(reader, reader.uint32()); + break; + case 103: + message.lars_param = $root.nnabla.LarsParameter.decode(reader, reader.uint32()); + break; + case 104: + message.nesterov_param = $root.nnabla.NesterovParameter.decode(reader, reader.uint32()); + break; + case 105: + message.adadelta_param = $root.nnabla.AdadeltaParameter.decode(reader, reader.uint32()); + break; + case 106: + message.adagrad_param = $root.nnabla.AdagradParameter.decode(reader, reader.uint32()); + break; + case 107: + message.adabelief_param = $root.nnabla.AdaBeliefParameter.decode(reader, reader.uint32()); + break; + case 108: + message.rmsprop_param = $root.nnabla.RMSpropParameter.decode(reader, reader.uint32()); + break; + case 109: + message.rmsprop_graves_param = $root.nnabla.RMSpropGravesParameter.decode(reader, reader.uint32()); + break; + case 110: + message.adam_param = $root.nnabla.AdamParameter.decode(reader, reader.uint32()); + break; + case 111: + message.adamw_param = $root.nnabla.AdamWParameter.decode(reader, reader.uint32()); + break; + case 112: + message.adabound_param = $root.nnabla.AdaBoundParameter.decode(reader, reader.uint32()); + break; + case 113: + message.adamax_param = $root.nnabla.AdamaxParameter.decode(reader, reader.uint32()); + break; + case 114: + message.amsgrad_param = $root.nnabla.AMSGRADParameter.decode(reader, reader.uint32()); + break; + case 115: + message.amsbound_param = $root.nnabla.AMSBoundParameter.decode(reader, reader.uint32()); + break; + case 116: + message.lamb_param = $root.nnabla.LambParameter.decode(reader, reader.uint32()); + break; + case 117: + message.lion_param = $root.nnabla.LionParameter.decode(reader, reader.uint32()); + break; + case 200: + message.lr_scheduler_type = reader.string(); + break; + case 210: + message.polynomial_scheduler_param = $root.nnabla.PolynomialSchedulerParameter.decode(reader, reader.uint32()); + break; + case 211: + message.cosine_scheduler_param = $root.nnabla.CosineSchedulerParameter.decode(reader, reader.uint32()); + break; + case 212: + message.exponential_scheduler_param = $root.nnabla.ExponentialSchedulerParameter.decode(reader, reader.uint32()); + break; + case 213: + message.step_scheduler_param = $root.nnabla.StepSchedulerParameter.decode(reader, reader.uint32()); + break; + case 299: + message.custom_scheduler_param = $root.nnabla.CustomSchedulerParameter.decode(reader, reader.uint32()); + break; + case 300: + message.lr_warmup_scheduler_type = reader.string(); + break; + case 310: + message.linear_warmup_scheduler_param = $root.nnabla.LinearWarmupSchedulerParameter.decode(reader, reader.uint32()); + break; + case 30: + message.lr_decay = reader.float(); + break; + case 31: + message.lr_decay_interval = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Solver(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "context": + message.context = $root.nnabla.Context.decodeText(reader); + break; + case "weight_decay": + message.weight_decay = reader.float(); + break; + case "states": + reader.entry(message.states, () => reader.string(), () => $root.nnabla.SolverState.decodeText(reader)); + break; + case "sgd_param": + message.sgd_param = $root.nnabla.SgdParameter.decodeText(reader); + break; + case "sgdw_param": + message.sgdw_param = $root.nnabla.SgdWParameter.decodeText(reader); + break; + case "momentum_param": + message.momentum_param = $root.nnabla.MomentumParameter.decodeText(reader); + break; + case "lars_param": + message.lars_param = $root.nnabla.LarsParameter.decodeText(reader); + break; + case "nesterov_param": + message.nesterov_param = $root.nnabla.NesterovParameter.decodeText(reader); + break; + case "adadelta_param": + message.adadelta_param = $root.nnabla.AdadeltaParameter.decodeText(reader); + break; + case "adagrad_param": + message.adagrad_param = $root.nnabla.AdagradParameter.decodeText(reader); + break; + case "adabelief_param": + message.adabelief_param = $root.nnabla.AdaBeliefParameter.decodeText(reader); + break; + case "rmsprop_param": + message.rmsprop_param = $root.nnabla.RMSpropParameter.decodeText(reader); + break; + case "rmsprop_graves_param": + message.rmsprop_graves_param = $root.nnabla.RMSpropGravesParameter.decodeText(reader); + break; + case "adam_param": + message.adam_param = $root.nnabla.AdamParameter.decodeText(reader); + break; + case "adamw_param": + message.adamw_param = $root.nnabla.AdamWParameter.decodeText(reader); + break; + case "adabound_param": + message.adabound_param = $root.nnabla.AdaBoundParameter.decodeText(reader); + break; + case "adamax_param": + message.adamax_param = $root.nnabla.AdamaxParameter.decodeText(reader); + break; + case "amsgrad_param": + message.amsgrad_param = $root.nnabla.AMSGRADParameter.decodeText(reader); + break; + case "amsbound_param": + message.amsbound_param = $root.nnabla.AMSBoundParameter.decodeText(reader); + break; + case "lamb_param": + message.lamb_param = $root.nnabla.LambParameter.decodeText(reader); + break; + case "lion_param": + message.lion_param = $root.nnabla.LionParameter.decodeText(reader); + break; + case "lr_scheduler_type": + message.lr_scheduler_type = reader.string(); + break; + case "polynomial_scheduler_param": + message.polynomial_scheduler_param = $root.nnabla.PolynomialSchedulerParameter.decodeText(reader); + break; + case "cosine_scheduler_param": + message.cosine_scheduler_param = $root.nnabla.CosineSchedulerParameter.decodeText(reader); + break; + case "exponential_scheduler_param": + message.exponential_scheduler_param = $root.nnabla.ExponentialSchedulerParameter.decodeText(reader); + break; + case "step_scheduler_param": + message.step_scheduler_param = $root.nnabla.StepSchedulerParameter.decodeText(reader); + break; + case "custom_scheduler_param": + message.custom_scheduler_param = $root.nnabla.CustomSchedulerParameter.decodeText(reader); + break; + case "lr_warmup_scheduler_type": + message.lr_warmup_scheduler_type = reader.string(); + break; + case "linear_warmup_scheduler_param": + message.linear_warmup_scheduler_param = $root.nnabla.LinearWarmupSchedulerParameter.decodeText(reader); + break; + case "lr_decay": + message.lr_decay = reader.float(); + break; + case "lr_decay_interval": + message.lr_decay_interval = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Solver.prototype.type = ""; +$root.nnabla.Solver.prototype.context = null; +$root.nnabla.Solver.prototype.weight_decay = 0; +$root.nnabla.Solver.prototype.lr_scheduler_type = ""; +$root.nnabla.Solver.prototype.polynomial_scheduler_param = null; +$root.nnabla.Solver.prototype.cosine_scheduler_param = null; +$root.nnabla.Solver.prototype.exponential_scheduler_param = null; +$root.nnabla.Solver.prototype.step_scheduler_param = null; +$root.nnabla.Solver.prototype.custom_scheduler_param = null; +$root.nnabla.Solver.prototype.lr_warmup_scheduler_type = ""; +$root.nnabla.Solver.prototype.linear_warmup_scheduler_param = null; +$root.nnabla.Solver.prototype.lr_decay = 0; +$root.nnabla.Solver.prototype.lr_decay_interval = protobuf.Int64.create(0); + +$root.nnabla.SgdParameter = class SgdParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SgdParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SgdParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SgdParameter.prototype.lr = 0; + +$root.nnabla.SgdWParameter = class SgdWParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SgdWParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.momentum = reader.float(); + break; + case 3: + message.wd = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SgdWParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + case "wd": + message.wd = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SgdWParameter.prototype.lr = 0; +$root.nnabla.SgdWParameter.prototype.momentum = 0; +$root.nnabla.SgdWParameter.prototype.wd = 0; + +$root.nnabla.MomentumParameter = class MomentumParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MomentumParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.momentum = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MomentumParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MomentumParameter.prototype.lr = 0; +$root.nnabla.MomentumParameter.prototype.momentum = 0; + +$root.nnabla.LarsParameter = class LarsParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LarsParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.momentum = reader.float(); + break; + case 3: + message.coefficient = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LarsParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + case "coefficient": + message.coefficient = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LarsParameter.prototype.lr = 0; +$root.nnabla.LarsParameter.prototype.momentum = 0; +$root.nnabla.LarsParameter.prototype.coefficient = 0; +$root.nnabla.LarsParameter.prototype.eps = 0; + +$root.nnabla.NesterovParameter = class NesterovParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.NesterovParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.momentum = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NesterovParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NesterovParameter.prototype.lr = 0; +$root.nnabla.NesterovParameter.prototype.momentum = 0; + +$root.nnabla.AdadeltaParameter = class AdadeltaParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdadeltaParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.decay = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdadeltaParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "decay": + message.decay = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdadeltaParameter.prototype.lr = 0; +$root.nnabla.AdadeltaParameter.prototype.decay = 0; +$root.nnabla.AdadeltaParameter.prototype.eps = 0; + +$root.nnabla.AdagradParameter = class AdagradParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdagradParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdagradParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdagradParameter.prototype.lr = 0; +$root.nnabla.AdagradParameter.prototype.eps = 0; + +$root.nnabla.AdaBeliefParameter = class AdaBeliefParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdaBeliefParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.wd = reader.float(); + break; + case 6: + message.amsgrad = reader.bool(); + break; + case 7: + message.weight_decouple = reader.bool(); + break; + case 8: + message.fixed_decay = reader.bool(); + break; + case 9: + message.rectify = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdaBeliefParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "wd": + message.wd = reader.float(); + break; + case "amsgrad": + message.amsgrad = reader.bool(); + break; + case "weight_decouple": + message.weight_decouple = reader.bool(); + break; + case "fixed_decay": + message.fixed_decay = reader.bool(); + break; + case "rectify": + message.rectify = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdaBeliefParameter.prototype.alpha = 0; +$root.nnabla.AdaBeliefParameter.prototype.beta1 = 0; +$root.nnabla.AdaBeliefParameter.prototype.beta2 = 0; +$root.nnabla.AdaBeliefParameter.prototype.eps = 0; +$root.nnabla.AdaBeliefParameter.prototype.wd = 0; +$root.nnabla.AdaBeliefParameter.prototype.amsgrad = false; +$root.nnabla.AdaBeliefParameter.prototype.weight_decouple = false; +$root.nnabla.AdaBeliefParameter.prototype.fixed_decay = false; +$root.nnabla.AdaBeliefParameter.prototype.rectify = false; + +$root.nnabla.RMSpropParameter = class RMSpropParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RMSpropParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.decay = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RMSpropParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "decay": + message.decay = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RMSpropParameter.prototype.lr = 0; +$root.nnabla.RMSpropParameter.prototype.decay = 0; +$root.nnabla.RMSpropParameter.prototype.eps = 0; + +$root.nnabla.RMSpropGravesParameter = class RMSpropGravesParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RMSpropGravesParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.decay = reader.float(); + break; + case 3: + message.momentum = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RMSpropGravesParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "decay": + message.decay = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RMSpropGravesParameter.prototype.lr = 0; +$root.nnabla.RMSpropGravesParameter.prototype.decay = 0; +$root.nnabla.RMSpropGravesParameter.prototype.momentum = 0; +$root.nnabla.RMSpropGravesParameter.prototype.eps = 0; + +$root.nnabla.AdamParameter = class AdamParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdamParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdamParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdamParameter.prototype.alpha = 0; +$root.nnabla.AdamParameter.prototype.beta1 = 0; +$root.nnabla.AdamParameter.prototype.beta2 = 0; +$root.nnabla.AdamParameter.prototype.eps = 0; + +$root.nnabla.AdamWParameter = class AdamWParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdamWParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.wd = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdamWParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "wd": + message.wd = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdamWParameter.prototype.alpha = 0; +$root.nnabla.AdamWParameter.prototype.beta1 = 0; +$root.nnabla.AdamWParameter.prototype.beta2 = 0; +$root.nnabla.AdamWParameter.prototype.eps = 0; +$root.nnabla.AdamWParameter.prototype.wd = 0; + +$root.nnabla.AdaBoundParameter = class AdaBoundParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdaBoundParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.final_lr = reader.float(); + break; + case 6: + message.gamma = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdaBoundParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "final_lr": + message.final_lr = reader.float(); + break; + case "gamma": + message.gamma = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdaBoundParameter.prototype.alpha = 0; +$root.nnabla.AdaBoundParameter.prototype.beta1 = 0; +$root.nnabla.AdaBoundParameter.prototype.beta2 = 0; +$root.nnabla.AdaBoundParameter.prototype.eps = 0; +$root.nnabla.AdaBoundParameter.prototype.final_lr = 0; +$root.nnabla.AdaBoundParameter.prototype.gamma = 0; + +$root.nnabla.AdamaxParameter = class AdamaxParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AdamaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AdamaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AdamaxParameter.prototype.alpha = 0; +$root.nnabla.AdamaxParameter.prototype.beta1 = 0; +$root.nnabla.AdamaxParameter.prototype.beta2 = 0; +$root.nnabla.AdamaxParameter.prototype.eps = 0; + +$root.nnabla.AMSGRADParameter = class AMSGRADParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AMSGRADParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.bias_correction = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AMSGRADParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "bias_correction": + message.bias_correction = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AMSGRADParameter.prototype.alpha = 0; +$root.nnabla.AMSGRADParameter.prototype.beta1 = 0; +$root.nnabla.AMSGRADParameter.prototype.beta2 = 0; +$root.nnabla.AMSGRADParameter.prototype.eps = 0; +$root.nnabla.AMSGRADParameter.prototype.bias_correction = false; + +$root.nnabla.AMSBoundParameter = class AMSBoundParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AMSBoundParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.final_lr = reader.float(); + break; + case 6: + message.gamma = reader.float(); + break; + case 7: + message.bias_correction = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AMSBoundParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "final_lr": + message.final_lr = reader.float(); + break; + case "gamma": + message.gamma = reader.float(); + break; + case "bias_correction": + message.bias_correction = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AMSBoundParameter.prototype.alpha = 0; +$root.nnabla.AMSBoundParameter.prototype.beta1 = 0; +$root.nnabla.AMSBoundParameter.prototype.beta2 = 0; +$root.nnabla.AMSBoundParameter.prototype.eps = 0; +$root.nnabla.AMSBoundParameter.prototype.final_lr = 0; +$root.nnabla.AMSBoundParameter.prototype.gamma = 0; +$root.nnabla.AMSBoundParameter.prototype.bias_correction = false; + +$root.nnabla.LambParameter = class LambParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LambParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.eta = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + case 4: + message.gamma_l = reader.float(); + break; + case 5: + message.gamma_u = reader.float(); + break; + case 6: + message.eps = reader.float(); + break; + case 7: + message.bias_correction = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LambParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "eta": + message.eta = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + case "gamma_l": + message.gamma_l = reader.float(); + break; + case "gamma_u": + message.gamma_u = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "bias_correction": + message.bias_correction = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LambParameter.prototype.eta = 0; +$root.nnabla.LambParameter.prototype.beta1 = 0; +$root.nnabla.LambParameter.prototype.beta2 = 0; +$root.nnabla.LambParameter.prototype.gamma_l = 0; +$root.nnabla.LambParameter.prototype.gamma_u = 0; +$root.nnabla.LambParameter.prototype.eps = 0; +$root.nnabla.LambParameter.prototype.bias_correction = false; + +$root.nnabla.LionParameter = class LionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lr = reader.float(); + break; + case 2: + message.beta1 = reader.float(); + break; + case 3: + message.beta2 = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lr": + message.lr = reader.float(); + break; + case "beta1": + message.beta1 = reader.float(); + break; + case "beta2": + message.beta2 = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LionParameter.prototype.lr = 0; +$root.nnabla.LionParameter.prototype.beta1 = 0; +$root.nnabla.LionParameter.prototype.beta2 = 0; + +$root.nnabla.PolynomialSchedulerParameter = class PolynomialSchedulerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PolynomialSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.max_iter = reader.float(); + break; + case 2: + message.power = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PolynomialSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "max_iter": + message.max_iter = reader.float(); + break; + case "power": + message.power = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PolynomialSchedulerParameter.prototype.max_iter = 0; +$root.nnabla.PolynomialSchedulerParameter.prototype.power = 0; + +$root.nnabla.CosineSchedulerParameter = class CosineSchedulerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CosineSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.max_iter = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CosineSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "max_iter": + message.max_iter = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CosineSchedulerParameter.prototype.max_iter = 0; + +$root.nnabla.ExponentialSchedulerParameter = class ExponentialSchedulerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ExponentialSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.float(); + break; + case 2: + message.iter_interval = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ExponentialSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "gamma": + message.gamma = reader.float(); + break; + case "iter_interval": + message.iter_interval = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ExponentialSchedulerParameter.prototype.gamma = 0; +$root.nnabla.ExponentialSchedulerParameter.prototype.iter_interval = protobuf.Int64.create(0); + +$root.nnabla.StepSchedulerParameter = class StepSchedulerParameter { + + constructor() { + this.iter_steps = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.StepSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.float(); + break; + case 2: + message.iter_steps = reader.array(message.iter_steps, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.StepSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "gamma": + message.gamma = reader.float(); + break; + case "iter_steps": + reader.array(message.iter_steps, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.StepSchedulerParameter.prototype.gamma = 0; + +$root.nnabla.CustomSchedulerParameter = class CustomSchedulerParameter { + + constructor() { + this.data_variable = []; + this.output_variable = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.CustomSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.max_iter = reader.float(); + break; + case 10: + message.network_name = reader.string(); + break; + case 50: + message.data_variable.push($root.nnabla.DataVariable.decode(reader, reader.uint32())); + break; + case 80: + message.output_variable.push($root.nnabla.OutputVariable.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CustomSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "max_iter": + message.max_iter = reader.float(); + break; + case "network_name": + message.network_name = reader.string(); + break; + case "data_variable": + message.data_variable.push($root.nnabla.DataVariable.decodeText(reader)); + break; + case "output_variable": + message.output_variable.push($root.nnabla.OutputVariable.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CustomSchedulerParameter.prototype.max_iter = 0; +$root.nnabla.CustomSchedulerParameter.prototype.network_name = ""; + +$root.nnabla.LinearWarmupSchedulerParameter = class LinearWarmupSchedulerParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LinearWarmupSchedulerParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.warmup_iter = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LinearWarmupSchedulerParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "warmup_iter": + message.warmup_iter = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LinearWarmupSchedulerParameter.prototype.warmup_iter = protobuf.Int64.create(0); + +$root.nnabla.DataVariable = class DataVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DataVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 3: + message.data_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DataVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "data_name": + message.data_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DataVariable.prototype.variable_name = ""; +$root.nnabla.DataVariable.prototype.data_name = ""; + +$root.nnabla.GeneratorVariable = class GeneratorVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GeneratorVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.multiplier = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GeneratorVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "multiplier": + message.multiplier = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GeneratorVariable.prototype.variable_name = ""; +$root.nnabla.GeneratorVariable.prototype.type = ""; +$root.nnabla.GeneratorVariable.prototype.multiplier = 0; + +$root.nnabla.LossVariable = class LossVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LossVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LossVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LossVariable.prototype.variable_name = ""; + +$root.nnabla.ParameterVariable = class ParameterVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ParameterVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 2: + message.learning_rate_multiplier = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ParameterVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "learning_rate_multiplier": + message.learning_rate_multiplier = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ParameterVariable.prototype.variable_name = ""; +$root.nnabla.ParameterVariable.prototype.learning_rate_multiplier = 0; + +$root.nnabla.Monitor = class Monitor { + + constructor() { + this.dataset_name = []; + this.data_variable = []; + this.generator_variable = []; + this.monitor_variable = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Monitor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 10: + message.network_name = reader.string(); + break; + case 20: + message.dataset_name.push(reader.string()); + break; + case 50: + message.data_variable.push($root.nnabla.DataVariable.decode(reader, reader.uint32())); + break; + case 60: + message.generator_variable.push($root.nnabla.GeneratorVariable.decode(reader, reader.uint32())); + break; + case 70: + message.monitor_variable.push($root.nnabla.MonitorVariable.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Monitor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "network_name": + message.network_name = reader.string(); + break; + case "dataset_name": + reader.array(message.dataset_name, () => reader.string()); + break; + case "data_variable": + message.data_variable.push($root.nnabla.DataVariable.decodeText(reader)); + break; + case "generator_variable": + message.generator_variable.push($root.nnabla.GeneratorVariable.decodeText(reader)); + break; + case "monitor_variable": + message.monitor_variable.push($root.nnabla.MonitorVariable.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Monitor.prototype.name = ""; +$root.nnabla.Monitor.prototype.network_name = ""; + +$root.nnabla.MonitorVariable = class MonitorVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MonitorVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.data_name = reader.string(); + break; + case 100: + message.multiplier = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MonitorVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "data_name": + message.data_name = reader.string(); + break; + case "multiplier": + message.multiplier = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MonitorVariable.prototype.variable_name = ""; +$root.nnabla.MonitorVariable.prototype.type = ""; +$root.nnabla.MonitorVariable.prototype.data_name = ""; +$root.nnabla.MonitorVariable.prototype.multiplier = 0; + +$root.nnabla.Executor = class Executor { + + constructor() { + this.data_variable = []; + this.generator_variable = []; + this.loss_variable = []; + this.output_variable = []; + this.parameter_variable = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.Executor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 10: + message.network_name = reader.string(); + break; + case 20: + message.num_evaluations = reader.int64(); + break; + case 21: + message.repeat_evaluation_type = reader.string(); + break; + case 30: + message.need_back_propagation = reader.bool(); + break; + case 50: + message.data_variable.push($root.nnabla.DataVariable.decode(reader, reader.uint32())); + break; + case 60: + message.generator_variable.push($root.nnabla.GeneratorVariable.decode(reader, reader.uint32())); + break; + case 70: + message.loss_variable.push($root.nnabla.LossVariable.decode(reader, reader.uint32())); + break; + case 80: + message.output_variable.push($root.nnabla.OutputVariable.decode(reader, reader.uint32())); + break; + case 90: + message.parameter_variable.push($root.nnabla.ParameterVariable.decode(reader, reader.uint32())); + break; + case 101: + message.no_image_normalization = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Executor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "network_name": + message.network_name = reader.string(); + break; + case "num_evaluations": + message.num_evaluations = reader.int64(); + break; + case "repeat_evaluation_type": + message.repeat_evaluation_type = reader.string(); + break; + case "need_back_propagation": + message.need_back_propagation = reader.bool(); + break; + case "data_variable": + message.data_variable.push($root.nnabla.DataVariable.decodeText(reader)); + break; + case "generator_variable": + message.generator_variable.push($root.nnabla.GeneratorVariable.decodeText(reader)); + break; + case "loss_variable": + message.loss_variable.push($root.nnabla.LossVariable.decodeText(reader)); + break; + case "output_variable": + message.output_variable.push($root.nnabla.OutputVariable.decodeText(reader)); + break; + case "parameter_variable": + message.parameter_variable.push($root.nnabla.ParameterVariable.decodeText(reader)); + break; + case "no_image_normalization": + message.no_image_normalization = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Executor.prototype.name = ""; +$root.nnabla.Executor.prototype.network_name = ""; +$root.nnabla.Executor.prototype.num_evaluations = protobuf.Int64.create(0); +$root.nnabla.Executor.prototype.repeat_evaluation_type = ""; +$root.nnabla.Executor.prototype.need_back_propagation = false; +$root.nnabla.Executor.prototype.no_image_normalization = false; + +$root.nnabla.OutputVariable = class OutputVariable { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.OutputVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.data_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.OutputVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "data_name": + message.data_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.OutputVariable.prototype.variable_name = ""; +$root.nnabla.OutputVariable.prototype.type = ""; +$root.nnabla.OutputVariable.prototype.data_name = ""; + +$root.nnabla.Function = class Function { + + constructor() { + this.repeat_id = []; + this.input = []; + this.output = []; + } + + get parameter() { + $root.nnabla.Function.parameterSet = $root.nnabla.Function.parameterSet || new Set([ "affine_param", "rnn_param", "lstm_param", "gru_param", "convolution_param", "fused_convolution_param", "depthwise_convolution_param", "deconvolution_param", "depthwise_deconvolution_param", "deformable_convolution_param", "max_pooling_param", "average_pooling_param", "sum_pooling_param", "unpooling_param", "roi_align_param", "relu_param", "leaky_relu_param", "softmax_param", "log_softmax_param", "elu_param", "selu_param", "crelu_param", "celu_param", "prelu_param", "softplus_param", "fused_batch_normalization_param", "batch_normalization_param", "group_normalization_param", "instance_normalization_param", "layer_normalization_param", "norm_normalization_param", "sync_batch_normalization_param", "tensor_normalization_param", "weight_normalization_param", "weight_standardization_param", "spectral_norm_param", "mean_subtraction_param", "clip_grad_by_norm_param", "sum_param", "cumsum_param", "mean_param", "max_param", "min_param", "norm_param", "prod_param", "cumprod_param", "add2_param", "bc_add2_param", "sub2_param", "mul2_param", "div2_param", "pow2_param", "add_scalar_param", "mul_scalar_param", "pow_scalar_param", "r_sub_scalar_param", "r_div_scalar_param", "r_pow_scalar_param", "sign_param", "minimum_scalar_param", "maximum_scalar_param", "searchsorted_param", "logical_and_scalar_param", "logical_or_scalar_param", "logical_xor_scalar_param", "equal_scalar_param", "not_equal_scalar_param", "greater_equal_scalar_param", "greater_scalar_param", "less_equal_scalar_param", "less_scalar_param", "reset_nan_param", "reset_inf_param", "constant_param", "arange_param", "linspace_param", "batch_matmul_param", "round_param", "ceil_param", "floor_param", "concatenate_param", "split_param", "stack_param", "slice_param", "pad_param", "transpose_param", "broadcast_param", "broadcast_to_param", "tile_param", "one_hot_param", "flip_param", "shift_param", "sort_param", "reshape_param", "shape_param", "trilu_param", "meshgrid_param", "batch_cholesky_param", "gather_param", "scatter_nd_param", "scatter_add_param", "bool_fill_param", "pack_padded_sequence_param", "pad_packed_sequence_param", "interpolate_param", "onnx_resize_param", "fft_param", "ifft_param", "stft_param", "istft_param", "dropout_param", "top_k_data_param", "top_k_grad_param", "rand_param", "randint_param", "randn_param", "rand_binomial_param", "rand_beta_param", "rand_gamma_param", "random_choice_param", "random_crop_param", "random_flip_param", "random_shift_param", "random_erase_param", "image_augmentation_param", "softmax_cross_entropy_param", "categorical_cross_entropy_param", "huber_loss_param", "epsilon_insensitive_loss_param", "kl_multinomial_param", "affine_grid_param", "warp_by_grid_param", "binary_connect_affine_param", "binary_connect_convolution_param", "binary_weight_affine_param", "binary_weight_convolution_param", "inq_affine_param", "inq_convolution_param", "fixed_point_quantize_param", "min_max_quantize_param", "pow2_quantize_param", "prune_param", "quantize_linear_param", "top_n_error_param", "confusion_matrix_param", "vat_noise_param", "sink_param", "nms_detection2d_param", "onnx_non_max_suppression_param", "max_pooling_backward_param", "patch_correlation_param", "unique_param", "eye_like_param", "mod2_param", "bit_shift_param", "einsum_param"]); + return Object.keys(this).find((key) => $root.nnabla.Function.parameterSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.nnabla.Function(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.repeat_id.push(reader.string()); + break; + case 10: + message.context = $root.nnabla.Context.decode(reader, reader.uint32()); + break; + case 20: + message.input.push(reader.string()); + break; + case 30: + message.output.push(reader.string()); + break; + case 1001: + message.affine_param = $root.nnabla.AffineParameter.decode(reader, reader.uint32()); + break; + case 1002: + message.rnn_param = $root.nnabla.RNNParameter.decode(reader, reader.uint32()); + break; + case 1003: + message.lstm_param = $root.nnabla.LSTMParameter.decode(reader, reader.uint32()); + break; + case 1004: + message.gru_param = $root.nnabla.GRUParameter.decode(reader, reader.uint32()); + break; + case 1005: + message.convolution_param = $root.nnabla.ConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1006: + message.fused_convolution_param = $root.nnabla.FusedConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1007: + message.depthwise_convolution_param = $root.nnabla.DepthwiseConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1008: + message.deconvolution_param = $root.nnabla.DeconvolutionParameter.decode(reader, reader.uint32()); + break; + case 1009: + message.depthwise_deconvolution_param = $root.nnabla.DepthwiseDeconvolutionParameter.decode(reader, reader.uint32()); + break; + case 1010: + message.deformable_convolution_param = $root.nnabla.DeformableConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1012: + message.max_pooling_param = $root.nnabla.MaxPoolingParameter.decode(reader, reader.uint32()); + break; + case 1013: + message.average_pooling_param = $root.nnabla.AveragePoolingParameter.decode(reader, reader.uint32()); + break; + case 1015: + message.sum_pooling_param = $root.nnabla.SumPoolingParameter.decode(reader, reader.uint32()); + break; + case 1016: + message.unpooling_param = $root.nnabla.UnpoolingParameter.decode(reader, reader.uint32()); + break; + case 1018: + message.roi_align_param = $root.nnabla.RoiAlignParameter.decode(reader, reader.uint32()); + break; + case 1022: + message.relu_param = $root.nnabla.ReLUParameter.decode(reader, reader.uint32()); + break; + case 1023: + message.leaky_relu_param = $root.nnabla.LeakyReLUParameter.decode(reader, reader.uint32()); + break; + case 1024: + message.softmax_param = $root.nnabla.SoftmaxParameter.decode(reader, reader.uint32()); + break; + case 1025: + message.log_softmax_param = $root.nnabla.LogSoftmaxParameter.decode(reader, reader.uint32()); + break; + case 1026: + message.elu_param = $root.nnabla.ELUParameter.decode(reader, reader.uint32()); + break; + case 1027: + message.selu_param = $root.nnabla.SELUParameter.decode(reader, reader.uint32()); + break; + case 1028: + message.crelu_param = $root.nnabla.CReLUParameter.decode(reader, reader.uint32()); + break; + case 1029: + message.celu_param = $root.nnabla.CELUParameter.decode(reader, reader.uint32()); + break; + case 1030: + message.prelu_param = $root.nnabla.PReLUParameter.decode(reader, reader.uint32()); + break; + case 1037: + message.softplus_param = $root.nnabla.SoftPlusParameter.decode(reader, reader.uint32()); + break; + case 1041: + message.fused_batch_normalization_param = $root.nnabla.FusedBatchNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1042: + message.batch_normalization_param = $root.nnabla.BatchNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1043: + message.group_normalization_param = $root.nnabla.GroupNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1044: + message.instance_normalization_param = $root.nnabla.InstanceNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1045: + message.layer_normalization_param = $root.nnabla.LayerNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1046: + message.norm_normalization_param = $root.nnabla.NormNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1047: + message.sync_batch_normalization_param = $root.nnabla.SyncBatchNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1048: + message.tensor_normalization_param = $root.nnabla.TensorNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1049: + message.weight_normalization_param = $root.nnabla.WeightNormalizationParameter.decode(reader, reader.uint32()); + break; + case 1050: + message.weight_standardization_param = $root.nnabla.WeightStandardizationParameter.decode(reader, reader.uint32()); + break; + case 1051: + message.spectral_norm_param = $root.nnabla.SpectralNormParameter.decode(reader, reader.uint32()); + break; + case 1052: + message.mean_subtraction_param = $root.nnabla.MeanSubtractionParameter.decode(reader, reader.uint32()); + break; + case 1054: + message.clip_grad_by_norm_param = $root.nnabla.ClipGradByNormParameter.decode(reader, reader.uint32()); + break; + case 1055: + message.sum_param = $root.nnabla.SumParameter.decode(reader, reader.uint32()); + break; + case 1056: + message.cumsum_param = $root.nnabla.CumSumParameter.decode(reader, reader.uint32()); + break; + case 1057: + message.mean_param = $root.nnabla.MeanParameter.decode(reader, reader.uint32()); + break; + case 1058: + message.max_param = $root.nnabla.MaxParameter.decode(reader, reader.uint32()); + break; + case 1059: + message.min_param = $root.nnabla.MinParameter.decode(reader, reader.uint32()); + break; + case 1060: + message.norm_param = $root.nnabla.NormParameter.decode(reader, reader.uint32()); + break; + case 1061: + message.prod_param = $root.nnabla.ProdParameter.decode(reader, reader.uint32()); + break; + case 1062: + message.cumprod_param = $root.nnabla.CumProdParameter.decode(reader, reader.uint32()); + break; + case 1065: + message.add2_param = $root.nnabla.Add2Parameter.decode(reader, reader.uint32()); + break; + case 1067: + message.bc_add2_param = $root.nnabla.BcAdd2Parameter.decode(reader, reader.uint32()); + break; + case 1068: + message.sub2_param = $root.nnabla.Sub2Parameter.decode(reader, reader.uint32()); + break; + case 1069: + message.mul2_param = $root.nnabla.Mul2Parameter.decode(reader, reader.uint32()); + break; + case 1071: + message.div2_param = $root.nnabla.Div2Parameter.decode(reader, reader.uint32()); + break; + case 1072: + message.pow2_param = $root.nnabla.Pow2Parameter.decode(reader, reader.uint32()); + break; + case 1073: + message.add_scalar_param = $root.nnabla.AddScalarParameter.decode(reader, reader.uint32()); + break; + case 1074: + message.mul_scalar_param = $root.nnabla.MulScalarParameter.decode(reader, reader.uint32()); + break; + case 1075: + message.pow_scalar_param = $root.nnabla.PowScalarParameter.decode(reader, reader.uint32()); + break; + case 1076: + message.r_sub_scalar_param = $root.nnabla.RSubScalarParameter.decode(reader, reader.uint32()); + break; + case 1077: + message.r_div_scalar_param = $root.nnabla.RDivScalarParameter.decode(reader, reader.uint32()); + break; + case 1078: + message.r_pow_scalar_param = $root.nnabla.RPowScalarParameter.decode(reader, reader.uint32()); + break; + case 1079: + message.sign_param = $root.nnabla.SignParameter.decode(reader, reader.uint32()); + break; + case 1082: + message.minimum_scalar_param = $root.nnabla.MinimumScalarParameter.decode(reader, reader.uint32()); + break; + case 1083: + message.maximum_scalar_param = $root.nnabla.MaximumScalarParameter.decode(reader, reader.uint32()); + break; + case 1093: + message.searchsorted_param = $root.nnabla.SearchSortedParameter.decode(reader, reader.uint32()); + break; + case 1094: + message.logical_and_scalar_param = $root.nnabla.LogicalAndScalarParameter.decode(reader, reader.uint32()); + break; + case 1095: + message.logical_or_scalar_param = $root.nnabla.LogicalOrScalarParameter.decode(reader, reader.uint32()); + break; + case 1096: + message.logical_xor_scalar_param = $root.nnabla.LogicalXorScalarParameter.decode(reader, reader.uint32()); + break; + case 1097: + message.equal_scalar_param = $root.nnabla.EqualScalarParameter.decode(reader, reader.uint32()); + break; + case 1098: + message.not_equal_scalar_param = $root.nnabla.NotEqualScalarParameter.decode(reader, reader.uint32()); + break; + case 1099: + message.greater_equal_scalar_param = $root.nnabla.GreaterEqualScalarParameter.decode(reader, reader.uint32()); + break; + case 1100: + message.greater_scalar_param = $root.nnabla.GreaterScalarParameter.decode(reader, reader.uint32()); + break; + case 1101: + message.less_equal_scalar_param = $root.nnabla.LessEqualScalarParameter.decode(reader, reader.uint32()); + break; + case 1102: + message.less_scalar_param = $root.nnabla.LessScalarParameter.decode(reader, reader.uint32()); + break; + case 1106: + message.reset_nan_param = $root.nnabla.ResetNaNParameter.decode(reader, reader.uint32()); + break; + case 1107: + message.reset_inf_param = $root.nnabla.ResetInfParameter.decode(reader, reader.uint32()); + break; + case 1109: + message.constant_param = $root.nnabla.ConstantParameter.decode(reader, reader.uint32()); + break; + case 1110: + message.arange_param = $root.nnabla.ArangeParameter.decode(reader, reader.uint32()); + break; + case 1111: + message.linspace_param = $root.nnabla.LinspaceParameter.decode(reader, reader.uint32()); + break; + case 1116: + message.batch_matmul_param = $root.nnabla.BatchMatmulParameter.decode(reader, reader.uint32()); + break; + case 1117: + message.round_param = $root.nnabla.RoundParameter.decode(reader, reader.uint32()); + break; + case 1118: + message.ceil_param = $root.nnabla.CeilParameter.decode(reader, reader.uint32()); + break; + case 1119: + message.floor_param = $root.nnabla.FloorParameter.decode(reader, reader.uint32()); + break; + case 1133: + message.concatenate_param = $root.nnabla.ConcatenateParameter.decode(reader, reader.uint32()); + break; + case 1134: + message.split_param = $root.nnabla.SplitParameter.decode(reader, reader.uint32()); + break; + case 1135: + message.stack_param = $root.nnabla.StackParameter.decode(reader, reader.uint32()); + break; + case 1136: + message.slice_param = $root.nnabla.SliceParameter.decode(reader, reader.uint32()); + break; + case 1137: + message.pad_param = $root.nnabla.PadParameter.decode(reader, reader.uint32()); + break; + case 1138: + message.transpose_param = $root.nnabla.TransposeParameter.decode(reader, reader.uint32()); + break; + case 1139: + message.broadcast_param = $root.nnabla.BroadcastParameter.decode(reader, reader.uint32()); + break; + case 1140: + message.broadcast_to_param = $root.nnabla.BroadcastToParameter.decode(reader, reader.uint32()); + break; + case 1141: + message.tile_param = $root.nnabla.TileParameter.decode(reader, reader.uint32()); + break; + case 1142: + message.one_hot_param = $root.nnabla.OneHotParameter.decode(reader, reader.uint32()); + break; + case 1143: + message.flip_param = $root.nnabla.FlipParameter.decode(reader, reader.uint32()); + break; + case 1144: + message.shift_param = $root.nnabla.ShiftParameter.decode(reader, reader.uint32()); + break; + case 1145: + message.sort_param = $root.nnabla.SortParameter.decode(reader, reader.uint32()); + break; + case 1146: + message.reshape_param = $root.nnabla.ReshapeParameter.decode(reader, reader.uint32()); + break; + case 1147: + message.shape_param = $root.nnabla.ShapeParameter.decode(reader, reader.uint32()); + break; + case 1150: + message.trilu_param = $root.nnabla.TriluParameter.decode(reader, reader.uint32()); + break; + case 1151: + message.meshgrid_param = $root.nnabla.MeshgridParameter.decode(reader, reader.uint32()); + break; + case 1155: + message.batch_cholesky_param = $root.nnabla.BatchCholeskyParameter.decode(reader, reader.uint32()); + break; + case 1157: + message.gather_param = $root.nnabla.GatherParameter.decode(reader, reader.uint32()); + break; + case 1160: + message.scatter_nd_param = $root.nnabla.ScatterNdParameter.decode(reader, reader.uint32()); + break; + case 1161: + message.scatter_add_param = $root.nnabla.ScatterAddParameter.decode(reader, reader.uint32()); + break; + case 1163: + message.bool_fill_param = $root.nnabla.BoolFillParameter.decode(reader, reader.uint32()); + break; + case 1164: + message.pack_padded_sequence_param = $root.nnabla.PackPaddedSequenceParameter.decode(reader, reader.uint32()); + break; + case 1165: + message.pad_packed_sequence_param = $root.nnabla.PadPackedSequenceParameter.decode(reader, reader.uint32()); + break; + case 1167: + message.interpolate_param = $root.nnabla.InterpolateParameter.decode(reader, reader.uint32()); + break; + case 1168: + message.onnx_resize_param = $root.nnabla.ONNXResizeParameter.decode(reader, reader.uint32()); + break; + case 1169: + message.fft_param = $root.nnabla.FFTParameter.decode(reader, reader.uint32()); + break; + case 1170: + message.ifft_param = $root.nnabla.IFFTParameter.decode(reader, reader.uint32()); + break; + case 1171: + message.stft_param = $root.nnabla.STFTParameter.decode(reader, reader.uint32()); + break; + case 1172: + message.istft_param = $root.nnabla.ISTFTParameter.decode(reader, reader.uint32()); + break; + case 1173: + message.dropout_param = $root.nnabla.DropoutParameter.decode(reader, reader.uint32()); + break; + case 1174: + message.top_k_data_param = $root.nnabla.TopKDataParameter.decode(reader, reader.uint32()); + break; + case 1175: + message.top_k_grad_param = $root.nnabla.TopKGradParameter.decode(reader, reader.uint32()); + break; + case 1176: + message.rand_param = $root.nnabla.RandParameter.decode(reader, reader.uint32()); + break; + case 1177: + message.randint_param = $root.nnabla.RandintParameter.decode(reader, reader.uint32()); + break; + case 1178: + message.randn_param = $root.nnabla.RandnParameter.decode(reader, reader.uint32()); + break; + case 1179: + message.rand_binomial_param = $root.nnabla.RandBinomialParameter.decode(reader, reader.uint32()); + break; + case 1180: + message.rand_beta_param = $root.nnabla.RandBetaParameter.decode(reader, reader.uint32()); + break; + case 1181: + message.rand_gamma_param = $root.nnabla.RandGammaParameter.decode(reader, reader.uint32()); + break; + case 1182: + message.random_choice_param = $root.nnabla.RandomChoiceParameter.decode(reader, reader.uint32()); + break; + case 1183: + message.random_crop_param = $root.nnabla.RandomCropParameter.decode(reader, reader.uint32()); + break; + case 1184: + message.random_flip_param = $root.nnabla.RandomFlipParameter.decode(reader, reader.uint32()); + break; + case 1185: + message.random_shift_param = $root.nnabla.RandomShiftParameter.decode(reader, reader.uint32()); + break; + case 1186: + message.random_erase_param = $root.nnabla.RandomEraseParameter.decode(reader, reader.uint32()); + break; + case 1187: + message.image_augmentation_param = $root.nnabla.ImageAugmentationParameter.decode(reader, reader.uint32()); + break; + case 1190: + message.softmax_cross_entropy_param = $root.nnabla.SoftmaxCrossEntropyParameter.decode(reader, reader.uint32()); + break; + case 1191: + message.categorical_cross_entropy_param = $root.nnabla.CategoricalCrossEntropyParameter.decode(reader, reader.uint32()); + break; + case 1194: + message.huber_loss_param = $root.nnabla.HuberLossParameter.decode(reader, reader.uint32()); + break; + case 1195: + message.epsilon_insensitive_loss_param = $root.nnabla.EpsilonInsensitiveLossParameter.decode(reader, reader.uint32()); + break; + case 1196: + message.kl_multinomial_param = $root.nnabla.KLMultinomialParameter.decode(reader, reader.uint32()); + break; + case 1197: + message.affine_grid_param = $root.nnabla.AffineGridParameter.decode(reader, reader.uint32()); + break; + case 1198: + message.warp_by_grid_param = $root.nnabla.WarpByGridParameter.decode(reader, reader.uint32()); + break; + case 1202: + message.binary_connect_affine_param = $root.nnabla.BinaryConnectAffineParameter.decode(reader, reader.uint32()); + break; + case 1203: + message.binary_connect_convolution_param = $root.nnabla.BinaryConnectConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1204: + message.binary_weight_affine_param = $root.nnabla.BinaryWeightAffineParameter.decode(reader, reader.uint32()); + break; + case 1205: + message.binary_weight_convolution_param = $root.nnabla.BinaryWeightConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1206: + message.inq_affine_param = $root.nnabla.INQAffineParameter.decode(reader, reader.uint32()); + break; + case 1207: + message.inq_convolution_param = $root.nnabla.INQConvolutionParameter.decode(reader, reader.uint32()); + break; + case 1208: + message.fixed_point_quantize_param = $root.nnabla.FixedPointQuantizeParameter.decode(reader, reader.uint32()); + break; + case 1209: + message.min_max_quantize_param = $root.nnabla.MinMaxQuantizeParameter.decode(reader, reader.uint32()); + break; + case 1210: + message.pow2_quantize_param = $root.nnabla.Pow2QuantizeParameter.decode(reader, reader.uint32()); + break; + case 1211: + message.prune_param = $root.nnabla.PruneParameter.decode(reader, reader.uint32()); + break; + case 1212: + message.quantize_linear_param = $root.nnabla.QuantizeLinearParameter.decode(reader, reader.uint32()); + break; + case 1214: + message.top_n_error_param = $root.nnabla.TopNErrorParameter.decode(reader, reader.uint32()); + break; + case 1216: + message.confusion_matrix_param = $root.nnabla.ConfusionMatrixParameter.decode(reader, reader.uint32()); + break; + case 1217: + message.vat_noise_param = $root.nnabla.VATNoiseParameter.decode(reader, reader.uint32()); + break; + case 1219: + message.sink_param = $root.nnabla.SinkParameter.decode(reader, reader.uint32()); + break; + case 1220: + message.nms_detection2d_param = $root.nnabla.NmsDetection2dParameter.decode(reader, reader.uint32()); + break; + case 1221: + message.onnx_non_max_suppression_param = $root.nnabla.ONNXNonMaxSuppressionParameter.decode(reader, reader.uint32()); + break; + case 1222: + message.max_pooling_backward_param = $root.nnabla.MaxPoolingBackwardParameter.decode(reader, reader.uint32()); + break; + case 1223: + message.patch_correlation_param = $root.nnabla.PatchCorrelationParameter.decode(reader, reader.uint32()); + break; + case 1224: + message.unique_param = $root.nnabla.UniqueParameter.decode(reader, reader.uint32()); + break; + case 1225: + message.eye_like_param = $root.nnabla.EyeLikeParameter.decode(reader, reader.uint32()); + break; + case 1226: + message.mod2_param = $root.nnabla.Mod2Parameter.decode(reader, reader.uint32()); + break; + case 1227: + message.bit_shift_param = $root.nnabla.BitShiftParameter.decode(reader, reader.uint32()); + break; + case 1228: + message.einsum_param = $root.nnabla.EinsumParameter.decode(reader, reader.uint32()); + break; + case 100: + message.repeat_param = $root.nnabla.RepeatParameter.decode(reader, reader.uint32()); + break; + case 101: + message.recurrent_param = $root.nnabla.RecurrentParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Function(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "repeat_id": + reader.array(message.repeat_id, () => reader.string()); + break; + case "context": + message.context = $root.nnabla.Context.decodeText(reader); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "affine_param": + message.affine_param = $root.nnabla.AffineParameter.decodeText(reader); + break; + case "rnn_param": + message.rnn_param = $root.nnabla.RNNParameter.decodeText(reader); + break; + case "lstm_param": + message.lstm_param = $root.nnabla.LSTMParameter.decodeText(reader); + break; + case "gru_param": + message.gru_param = $root.nnabla.GRUParameter.decodeText(reader); + break; + case "convolution_param": + message.convolution_param = $root.nnabla.ConvolutionParameter.decodeText(reader); + break; + case "fused_convolution_param": + message.fused_convolution_param = $root.nnabla.FusedConvolutionParameter.decodeText(reader); + break; + case "depthwise_convolution_param": + message.depthwise_convolution_param = $root.nnabla.DepthwiseConvolutionParameter.decodeText(reader); + break; + case "deconvolution_param": + message.deconvolution_param = $root.nnabla.DeconvolutionParameter.decodeText(reader); + break; + case "depthwise_deconvolution_param": + message.depthwise_deconvolution_param = $root.nnabla.DepthwiseDeconvolutionParameter.decodeText(reader); + break; + case "deformable_convolution_param": + message.deformable_convolution_param = $root.nnabla.DeformableConvolutionParameter.decodeText(reader); + break; + case "max_pooling_param": + message.max_pooling_param = $root.nnabla.MaxPoolingParameter.decodeText(reader); + break; + case "average_pooling_param": + message.average_pooling_param = $root.nnabla.AveragePoolingParameter.decodeText(reader); + break; + case "sum_pooling_param": + message.sum_pooling_param = $root.nnabla.SumPoolingParameter.decodeText(reader); + break; + case "unpooling_param": + message.unpooling_param = $root.nnabla.UnpoolingParameter.decodeText(reader); + break; + case "roi_align_param": + message.roi_align_param = $root.nnabla.RoiAlignParameter.decodeText(reader); + break; + case "relu_param": + message.relu_param = $root.nnabla.ReLUParameter.decodeText(reader); + break; + case "leaky_relu_param": + message.leaky_relu_param = $root.nnabla.LeakyReLUParameter.decodeText(reader); + break; + case "softmax_param": + message.softmax_param = $root.nnabla.SoftmaxParameter.decodeText(reader); + break; + case "log_softmax_param": + message.log_softmax_param = $root.nnabla.LogSoftmaxParameter.decodeText(reader); + break; + case "elu_param": + message.elu_param = $root.nnabla.ELUParameter.decodeText(reader); + break; + case "selu_param": + message.selu_param = $root.nnabla.SELUParameter.decodeText(reader); + break; + case "crelu_param": + message.crelu_param = $root.nnabla.CReLUParameter.decodeText(reader); + break; + case "celu_param": + message.celu_param = $root.nnabla.CELUParameter.decodeText(reader); + break; + case "prelu_param": + message.prelu_param = $root.nnabla.PReLUParameter.decodeText(reader); + break; + case "softplus_param": + message.softplus_param = $root.nnabla.SoftPlusParameter.decodeText(reader); + break; + case "fused_batch_normalization_param": + message.fused_batch_normalization_param = $root.nnabla.FusedBatchNormalizationParameter.decodeText(reader); + break; + case "batch_normalization_param": + message.batch_normalization_param = $root.nnabla.BatchNormalizationParameter.decodeText(reader); + break; + case "group_normalization_param": + message.group_normalization_param = $root.nnabla.GroupNormalizationParameter.decodeText(reader); + break; + case "instance_normalization_param": + message.instance_normalization_param = $root.nnabla.InstanceNormalizationParameter.decodeText(reader); + break; + case "layer_normalization_param": + message.layer_normalization_param = $root.nnabla.LayerNormalizationParameter.decodeText(reader); + break; + case "norm_normalization_param": + message.norm_normalization_param = $root.nnabla.NormNormalizationParameter.decodeText(reader); + break; + case "sync_batch_normalization_param": + message.sync_batch_normalization_param = $root.nnabla.SyncBatchNormalizationParameter.decodeText(reader); + break; + case "tensor_normalization_param": + message.tensor_normalization_param = $root.nnabla.TensorNormalizationParameter.decodeText(reader); + break; + case "weight_normalization_param": + message.weight_normalization_param = $root.nnabla.WeightNormalizationParameter.decodeText(reader); + break; + case "weight_standardization_param": + message.weight_standardization_param = $root.nnabla.WeightStandardizationParameter.decodeText(reader); + break; + case "spectral_norm_param": + message.spectral_norm_param = $root.nnabla.SpectralNormParameter.decodeText(reader); + break; + case "mean_subtraction_param": + message.mean_subtraction_param = $root.nnabla.MeanSubtractionParameter.decodeText(reader); + break; + case "clip_grad_by_norm_param": + message.clip_grad_by_norm_param = $root.nnabla.ClipGradByNormParameter.decodeText(reader); + break; + case "sum_param": + message.sum_param = $root.nnabla.SumParameter.decodeText(reader); + break; + case "cumsum_param": + message.cumsum_param = $root.nnabla.CumSumParameter.decodeText(reader); + break; + case "mean_param": + message.mean_param = $root.nnabla.MeanParameter.decodeText(reader); + break; + case "max_param": + message.max_param = $root.nnabla.MaxParameter.decodeText(reader); + break; + case "min_param": + message.min_param = $root.nnabla.MinParameter.decodeText(reader); + break; + case "norm_param": + message.norm_param = $root.nnabla.NormParameter.decodeText(reader); + break; + case "prod_param": + message.prod_param = $root.nnabla.ProdParameter.decodeText(reader); + break; + case "cumprod_param": + message.cumprod_param = $root.nnabla.CumProdParameter.decodeText(reader); + break; + case "add2_param": + message.add2_param = $root.nnabla.Add2Parameter.decodeText(reader); + break; + case "bc_add2_param": + message.bc_add2_param = $root.nnabla.BcAdd2Parameter.decodeText(reader); + break; + case "sub2_param": + message.sub2_param = $root.nnabla.Sub2Parameter.decodeText(reader); + break; + case "mul2_param": + message.mul2_param = $root.nnabla.Mul2Parameter.decodeText(reader); + break; + case "div2_param": + message.div2_param = $root.nnabla.Div2Parameter.decodeText(reader); + break; + case "pow2_param": + message.pow2_param = $root.nnabla.Pow2Parameter.decodeText(reader); + break; + case "add_scalar_param": + message.add_scalar_param = $root.nnabla.AddScalarParameter.decodeText(reader); + break; + case "mul_scalar_param": + message.mul_scalar_param = $root.nnabla.MulScalarParameter.decodeText(reader); + break; + case "pow_scalar_param": + message.pow_scalar_param = $root.nnabla.PowScalarParameter.decodeText(reader); + break; + case "r_sub_scalar_param": + message.r_sub_scalar_param = $root.nnabla.RSubScalarParameter.decodeText(reader); + break; + case "r_div_scalar_param": + message.r_div_scalar_param = $root.nnabla.RDivScalarParameter.decodeText(reader); + break; + case "r_pow_scalar_param": + message.r_pow_scalar_param = $root.nnabla.RPowScalarParameter.decodeText(reader); + break; + case "sign_param": + message.sign_param = $root.nnabla.SignParameter.decodeText(reader); + break; + case "minimum_scalar_param": + message.minimum_scalar_param = $root.nnabla.MinimumScalarParameter.decodeText(reader); + break; + case "maximum_scalar_param": + message.maximum_scalar_param = $root.nnabla.MaximumScalarParameter.decodeText(reader); + break; + case "searchsorted_param": + message.searchsorted_param = $root.nnabla.SearchSortedParameter.decodeText(reader); + break; + case "logical_and_scalar_param": + message.logical_and_scalar_param = $root.nnabla.LogicalAndScalarParameter.decodeText(reader); + break; + case "logical_or_scalar_param": + message.logical_or_scalar_param = $root.nnabla.LogicalOrScalarParameter.decodeText(reader); + break; + case "logical_xor_scalar_param": + message.logical_xor_scalar_param = $root.nnabla.LogicalXorScalarParameter.decodeText(reader); + break; + case "equal_scalar_param": + message.equal_scalar_param = $root.nnabla.EqualScalarParameter.decodeText(reader); + break; + case "not_equal_scalar_param": + message.not_equal_scalar_param = $root.nnabla.NotEqualScalarParameter.decodeText(reader); + break; + case "greater_equal_scalar_param": + message.greater_equal_scalar_param = $root.nnabla.GreaterEqualScalarParameter.decodeText(reader); + break; + case "greater_scalar_param": + message.greater_scalar_param = $root.nnabla.GreaterScalarParameter.decodeText(reader); + break; + case "less_equal_scalar_param": + message.less_equal_scalar_param = $root.nnabla.LessEqualScalarParameter.decodeText(reader); + break; + case "less_scalar_param": + message.less_scalar_param = $root.nnabla.LessScalarParameter.decodeText(reader); + break; + case "reset_nan_param": + message.reset_nan_param = $root.nnabla.ResetNaNParameter.decodeText(reader); + break; + case "reset_inf_param": + message.reset_inf_param = $root.nnabla.ResetInfParameter.decodeText(reader); + break; + case "constant_param": + message.constant_param = $root.nnabla.ConstantParameter.decodeText(reader); + break; + case "arange_param": + message.arange_param = $root.nnabla.ArangeParameter.decodeText(reader); + break; + case "linspace_param": + message.linspace_param = $root.nnabla.LinspaceParameter.decodeText(reader); + break; + case "batch_matmul_param": + message.batch_matmul_param = $root.nnabla.BatchMatmulParameter.decodeText(reader); + break; + case "round_param": + message.round_param = $root.nnabla.RoundParameter.decodeText(reader); + break; + case "ceil_param": + message.ceil_param = $root.nnabla.CeilParameter.decodeText(reader); + break; + case "floor_param": + message.floor_param = $root.nnabla.FloorParameter.decodeText(reader); + break; + case "concatenate_param": + message.concatenate_param = $root.nnabla.ConcatenateParameter.decodeText(reader); + break; + case "split_param": + message.split_param = $root.nnabla.SplitParameter.decodeText(reader); + break; + case "stack_param": + message.stack_param = $root.nnabla.StackParameter.decodeText(reader); + break; + case "slice_param": + message.slice_param = $root.nnabla.SliceParameter.decodeText(reader); + break; + case "pad_param": + message.pad_param = $root.nnabla.PadParameter.decodeText(reader); + break; + case "transpose_param": + message.transpose_param = $root.nnabla.TransposeParameter.decodeText(reader); + break; + case "broadcast_param": + message.broadcast_param = $root.nnabla.BroadcastParameter.decodeText(reader); + break; + case "broadcast_to_param": + message.broadcast_to_param = $root.nnabla.BroadcastToParameter.decodeText(reader); + break; + case "tile_param": + message.tile_param = $root.nnabla.TileParameter.decodeText(reader); + break; + case "one_hot_param": + message.one_hot_param = $root.nnabla.OneHotParameter.decodeText(reader); + break; + case "flip_param": + message.flip_param = $root.nnabla.FlipParameter.decodeText(reader); + break; + case "shift_param": + message.shift_param = $root.nnabla.ShiftParameter.decodeText(reader); + break; + case "sort_param": + message.sort_param = $root.nnabla.SortParameter.decodeText(reader); + break; + case "reshape_param": + message.reshape_param = $root.nnabla.ReshapeParameter.decodeText(reader); + break; + case "shape_param": + message.shape_param = $root.nnabla.ShapeParameter.decodeText(reader); + break; + case "trilu_param": + message.trilu_param = $root.nnabla.TriluParameter.decodeText(reader); + break; + case "meshgrid_param": + message.meshgrid_param = $root.nnabla.MeshgridParameter.decodeText(reader); + break; + case "batch_cholesky_param": + message.batch_cholesky_param = $root.nnabla.BatchCholeskyParameter.decodeText(reader); + break; + case "gather_param": + message.gather_param = $root.nnabla.GatherParameter.decodeText(reader); + break; + case "scatter_nd_param": + message.scatter_nd_param = $root.nnabla.ScatterNdParameter.decodeText(reader); + break; + case "scatter_add_param": + message.scatter_add_param = $root.nnabla.ScatterAddParameter.decodeText(reader); + break; + case "bool_fill_param": + message.bool_fill_param = $root.nnabla.BoolFillParameter.decodeText(reader); + break; + case "pack_padded_sequence_param": + message.pack_padded_sequence_param = $root.nnabla.PackPaddedSequenceParameter.decodeText(reader); + break; + case "pad_packed_sequence_param": + message.pad_packed_sequence_param = $root.nnabla.PadPackedSequenceParameter.decodeText(reader); + break; + case "interpolate_param": + message.interpolate_param = $root.nnabla.InterpolateParameter.decodeText(reader); + break; + case "onnx_resize_param": + message.onnx_resize_param = $root.nnabla.ONNXResizeParameter.decodeText(reader); + break; + case "fft_param": + message.fft_param = $root.nnabla.FFTParameter.decodeText(reader); + break; + case "ifft_param": + message.ifft_param = $root.nnabla.IFFTParameter.decodeText(reader); + break; + case "stft_param": + message.stft_param = $root.nnabla.STFTParameter.decodeText(reader); + break; + case "istft_param": + message.istft_param = $root.nnabla.ISTFTParameter.decodeText(reader); + break; + case "dropout_param": + message.dropout_param = $root.nnabla.DropoutParameter.decodeText(reader); + break; + case "top_k_data_param": + message.top_k_data_param = $root.nnabla.TopKDataParameter.decodeText(reader); + break; + case "top_k_grad_param": + message.top_k_grad_param = $root.nnabla.TopKGradParameter.decodeText(reader); + break; + case "rand_param": + message.rand_param = $root.nnabla.RandParameter.decodeText(reader); + break; + case "randint_param": + message.randint_param = $root.nnabla.RandintParameter.decodeText(reader); + break; + case "randn_param": + message.randn_param = $root.nnabla.RandnParameter.decodeText(reader); + break; + case "rand_binomial_param": + message.rand_binomial_param = $root.nnabla.RandBinomialParameter.decodeText(reader); + break; + case "rand_beta_param": + message.rand_beta_param = $root.nnabla.RandBetaParameter.decodeText(reader); + break; + case "rand_gamma_param": + message.rand_gamma_param = $root.nnabla.RandGammaParameter.decodeText(reader); + break; + case "random_choice_param": + message.random_choice_param = $root.nnabla.RandomChoiceParameter.decodeText(reader); + break; + case "random_crop_param": + message.random_crop_param = $root.nnabla.RandomCropParameter.decodeText(reader); + break; + case "random_flip_param": + message.random_flip_param = $root.nnabla.RandomFlipParameter.decodeText(reader); + break; + case "random_shift_param": + message.random_shift_param = $root.nnabla.RandomShiftParameter.decodeText(reader); + break; + case "random_erase_param": + message.random_erase_param = $root.nnabla.RandomEraseParameter.decodeText(reader); + break; + case "image_augmentation_param": + message.image_augmentation_param = $root.nnabla.ImageAugmentationParameter.decodeText(reader); + break; + case "softmax_cross_entropy_param": + message.softmax_cross_entropy_param = $root.nnabla.SoftmaxCrossEntropyParameter.decodeText(reader); + break; + case "categorical_cross_entropy_param": + message.categorical_cross_entropy_param = $root.nnabla.CategoricalCrossEntropyParameter.decodeText(reader); + break; + case "huber_loss_param": + message.huber_loss_param = $root.nnabla.HuberLossParameter.decodeText(reader); + break; + case "epsilon_insensitive_loss_param": + message.epsilon_insensitive_loss_param = $root.nnabla.EpsilonInsensitiveLossParameter.decodeText(reader); + break; + case "kl_multinomial_param": + message.kl_multinomial_param = $root.nnabla.KLMultinomialParameter.decodeText(reader); + break; + case "affine_grid_param": + message.affine_grid_param = $root.nnabla.AffineGridParameter.decodeText(reader); + break; + case "warp_by_grid_param": + message.warp_by_grid_param = $root.nnabla.WarpByGridParameter.decodeText(reader); + break; + case "binary_connect_affine_param": + message.binary_connect_affine_param = $root.nnabla.BinaryConnectAffineParameter.decodeText(reader); + break; + case "binary_connect_convolution_param": + message.binary_connect_convolution_param = $root.nnabla.BinaryConnectConvolutionParameter.decodeText(reader); + break; + case "binary_weight_affine_param": + message.binary_weight_affine_param = $root.nnabla.BinaryWeightAffineParameter.decodeText(reader); + break; + case "binary_weight_convolution_param": + message.binary_weight_convolution_param = $root.nnabla.BinaryWeightConvolutionParameter.decodeText(reader); + break; + case "inq_affine_param": + message.inq_affine_param = $root.nnabla.INQAffineParameter.decodeText(reader); + break; + case "inq_convolution_param": + message.inq_convolution_param = $root.nnabla.INQConvolutionParameter.decodeText(reader); + break; + case "fixed_point_quantize_param": + message.fixed_point_quantize_param = $root.nnabla.FixedPointQuantizeParameter.decodeText(reader); + break; + case "min_max_quantize_param": + message.min_max_quantize_param = $root.nnabla.MinMaxQuantizeParameter.decodeText(reader); + break; + case "pow2_quantize_param": + message.pow2_quantize_param = $root.nnabla.Pow2QuantizeParameter.decodeText(reader); + break; + case "prune_param": + message.prune_param = $root.nnabla.PruneParameter.decodeText(reader); + break; + case "quantize_linear_param": + message.quantize_linear_param = $root.nnabla.QuantizeLinearParameter.decodeText(reader); + break; + case "top_n_error_param": + message.top_n_error_param = $root.nnabla.TopNErrorParameter.decodeText(reader); + break; + case "confusion_matrix_param": + message.confusion_matrix_param = $root.nnabla.ConfusionMatrixParameter.decodeText(reader); + break; + case "vat_noise_param": + message.vat_noise_param = $root.nnabla.VATNoiseParameter.decodeText(reader); + break; + case "sink_param": + message.sink_param = $root.nnabla.SinkParameter.decodeText(reader); + break; + case "nms_detection2d_param": + message.nms_detection2d_param = $root.nnabla.NmsDetection2dParameter.decodeText(reader); + break; + case "onnx_non_max_suppression_param": + message.onnx_non_max_suppression_param = $root.nnabla.ONNXNonMaxSuppressionParameter.decodeText(reader); + break; + case "max_pooling_backward_param": + message.max_pooling_backward_param = $root.nnabla.MaxPoolingBackwardParameter.decodeText(reader); + break; + case "patch_correlation_param": + message.patch_correlation_param = $root.nnabla.PatchCorrelationParameter.decodeText(reader); + break; + case "unique_param": + message.unique_param = $root.nnabla.UniqueParameter.decodeText(reader); + break; + case "eye_like_param": + message.eye_like_param = $root.nnabla.EyeLikeParameter.decodeText(reader); + break; + case "mod2_param": + message.mod2_param = $root.nnabla.Mod2Parameter.decodeText(reader); + break; + case "bit_shift_param": + message.bit_shift_param = $root.nnabla.BitShiftParameter.decodeText(reader); + break; + case "einsum_param": + message.einsum_param = $root.nnabla.EinsumParameter.decodeText(reader); + break; + case "repeat_param": + message.repeat_param = $root.nnabla.RepeatParameter.decodeText(reader); + break; + case "recurrent_param": + message.recurrent_param = $root.nnabla.RecurrentParameter.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Function.prototype.name = ""; +$root.nnabla.Function.prototype.type = ""; +$root.nnabla.Function.prototype.context = null; +$root.nnabla.Function.prototype.repeat_param = null; +$root.nnabla.Function.prototype.recurrent_param = null; + +$root.nnabla.AffineParameter = class AffineParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AffineParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AffineParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AffineParameter.prototype.base_axis = protobuf.Int64.create(0); + +$root.nnabla.RNNParameter = class RNNParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RNNParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_layers = reader.int64(); + break; + case 2: + message.nonlinearity = reader.string(); + break; + case 3: + message.dropout = reader.float(); + break; + case 4: + message.bidirectional = reader.bool(); + break; + case 5: + message.training = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RNNParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_layers": + message.num_layers = reader.int64(); + break; + case "nonlinearity": + message.nonlinearity = reader.string(); + break; + case "dropout": + message.dropout = reader.float(); + break; + case "bidirectional": + message.bidirectional = reader.bool(); + break; + case "training": + message.training = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RNNParameter.prototype.num_layers = protobuf.Int64.create(0); +$root.nnabla.RNNParameter.prototype.nonlinearity = ""; +$root.nnabla.RNNParameter.prototype.dropout = 0; +$root.nnabla.RNNParameter.prototype.bidirectional = false; +$root.nnabla.RNNParameter.prototype.training = false; + +$root.nnabla.LSTMParameter = class LSTMParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LSTMParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_layers = reader.int64(); + break; + case 2: + message.dropout = reader.float(); + break; + case 3: + message.bidirectional = reader.bool(); + break; + case 4: + message.training = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LSTMParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_layers": + message.num_layers = reader.int64(); + break; + case "dropout": + message.dropout = reader.float(); + break; + case "bidirectional": + message.bidirectional = reader.bool(); + break; + case "training": + message.training = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LSTMParameter.prototype.num_layers = protobuf.Int64.create(0); +$root.nnabla.LSTMParameter.prototype.dropout = 0; +$root.nnabla.LSTMParameter.prototype.bidirectional = false; +$root.nnabla.LSTMParameter.prototype.training = false; + +$root.nnabla.GRUParameter = class GRUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GRUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_layers = reader.int64(); + break; + case 2: + message.dropout = reader.float(); + break; + case 3: + message.bidirectional = reader.bool(); + break; + case 4: + message.training = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GRUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_layers": + message.num_layers = reader.int64(); + break; + case "dropout": + message.dropout = reader.float(); + break; + case "bidirectional": + message.bidirectional = reader.bool(); + break; + case "training": + message.training = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GRUParameter.prototype.num_layers = protobuf.Int64.create(0); +$root.nnabla.GRUParameter.prototype.dropout = 0; +$root.nnabla.GRUParameter.prototype.bidirectional = false; +$root.nnabla.GRUParameter.prototype.training = false; + +$root.nnabla.ConvolutionParameter = class ConvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.ConvolutionParameter.prototype.pad = null; +$root.nnabla.ConvolutionParameter.prototype.stride = null; +$root.nnabla.ConvolutionParameter.prototype.dilation = null; +$root.nnabla.ConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.ConvolutionParameter.prototype.channel_last = false; + +$root.nnabla.FusedConvolutionParameter = class FusedConvolutionParameter { + + constructor() { + this.nonlinearity_args = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.FusedConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.channel_last = reader.bool(); + break; + case 7: + message.decay_rate = reader.float(); + break; + case 8: + message.eps = reader.float(); + break; + case 9: + message.batch_stat = reader.bool(); + break; + case 10: + message.nonlinearity = reader.string(); + break; + case 11: + message.nonlinearity_args = reader.floats(message.nonlinearity_args, tag); + break; + case 12: + message.pad_mode = reader.string(); + break; + case 13: + message.constant_value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FusedConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + case "decay_rate": + message.decay_rate = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "batch_stat": + message.batch_stat = reader.bool(); + break; + case "nonlinearity": + message.nonlinearity = reader.string(); + break; + case "nonlinearity_args": + reader.array(message.nonlinearity_args, () => reader.float()); + break; + case "pad_mode": + message.pad_mode = reader.string(); + break; + case "constant_value": + message.constant_value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.FusedConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.FusedConvolutionParameter.prototype.pad = null; +$root.nnabla.FusedConvolutionParameter.prototype.stride = null; +$root.nnabla.FusedConvolutionParameter.prototype.dilation = null; +$root.nnabla.FusedConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.FusedConvolutionParameter.prototype.channel_last = false; +$root.nnabla.FusedConvolutionParameter.prototype.decay_rate = 0; +$root.nnabla.FusedConvolutionParameter.prototype.eps = 0; +$root.nnabla.FusedConvolutionParameter.prototype.batch_stat = false; +$root.nnabla.FusedConvolutionParameter.prototype.nonlinearity = ""; +$root.nnabla.FusedConvolutionParameter.prototype.pad_mode = ""; +$root.nnabla.FusedConvolutionParameter.prototype.constant_value = 0; + +$root.nnabla.DepthwiseConvolutionParameter = class DepthwiseConvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DepthwiseConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.multiplier = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DepthwiseConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "multiplier": + message.multiplier = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DepthwiseConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.DepthwiseConvolutionParameter.prototype.pad = null; +$root.nnabla.DepthwiseConvolutionParameter.prototype.stride = null; +$root.nnabla.DepthwiseConvolutionParameter.prototype.dilation = null; +$root.nnabla.DepthwiseConvolutionParameter.prototype.multiplier = protobuf.Int64.create(0); + +$root.nnabla.DeconvolutionParameter = class DeconvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DeconvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.channel_last = reader.bool(); + break; + case 7: + message.output_padding = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DeconvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + case "output_padding": + message.output_padding = $root.nnabla.Shape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DeconvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.DeconvolutionParameter.prototype.pad = null; +$root.nnabla.DeconvolutionParameter.prototype.stride = null; +$root.nnabla.DeconvolutionParameter.prototype.dilation = null; +$root.nnabla.DeconvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.DeconvolutionParameter.prototype.channel_last = false; +$root.nnabla.DeconvolutionParameter.prototype.output_padding = null; + +$root.nnabla.DepthwiseDeconvolutionParameter = class DepthwiseDeconvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DepthwiseDeconvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.divisor = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DepthwiseDeconvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "divisor": + message.divisor = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DepthwiseDeconvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.DepthwiseDeconvolutionParameter.prototype.pad = null; +$root.nnabla.DepthwiseDeconvolutionParameter.prototype.stride = null; +$root.nnabla.DepthwiseDeconvolutionParameter.prototype.dilation = null; +$root.nnabla.DepthwiseDeconvolutionParameter.prototype.divisor = protobuf.Int64.create(0); + +$root.nnabla.DeformableConvolutionParameter = class DeformableConvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DeformableConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.deformable_group = reader.int64(); + break; + case 7: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DeformableConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "deformable_group": + message.deformable_group = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DeformableConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.DeformableConvolutionParameter.prototype.pad = null; +$root.nnabla.DeformableConvolutionParameter.prototype.stride = null; +$root.nnabla.DeformableConvolutionParameter.prototype.dilation = null; +$root.nnabla.DeformableConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.DeformableConvolutionParameter.prototype.deformable_group = protobuf.Int64.create(0); +$root.nnabla.DeformableConvolutionParameter.prototype.channel_last = false; + +$root.nnabla.MaxPoolingParameter = class MaxPoolingParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MaxPoolingParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.ignore_border = reader.bool(); + break; + case 4: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MaxPoolingParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "ignore_border": + message.ignore_border = reader.bool(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MaxPoolingParameter.prototype.kernel = null; +$root.nnabla.MaxPoolingParameter.prototype.stride = null; +$root.nnabla.MaxPoolingParameter.prototype.ignore_border = false; +$root.nnabla.MaxPoolingParameter.prototype.pad = null; +$root.nnabla.MaxPoolingParameter.prototype.channel_last = false; + +$root.nnabla.AveragePoolingParameter = class AveragePoolingParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AveragePoolingParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.ignore_border = reader.bool(); + break; + case 4: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.channel_last = reader.bool(); + break; + case 6: + message.including_pad = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AveragePoolingParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "ignore_border": + message.ignore_border = reader.bool(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + case "including_pad": + message.including_pad = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AveragePoolingParameter.prototype.kernel = null; +$root.nnabla.AveragePoolingParameter.prototype.stride = null; +$root.nnabla.AveragePoolingParameter.prototype.ignore_border = false; +$root.nnabla.AveragePoolingParameter.prototype.pad = null; +$root.nnabla.AveragePoolingParameter.prototype.channel_last = false; +$root.nnabla.AveragePoolingParameter.prototype.including_pad = false; + +$root.nnabla.SumPoolingParameter = class SumPoolingParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SumPoolingParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.ignore_border = reader.bool(); + break; + case 4: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SumPoolingParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "ignore_border": + message.ignore_border = reader.bool(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SumPoolingParameter.prototype.kernel = null; +$root.nnabla.SumPoolingParameter.prototype.stride = null; +$root.nnabla.SumPoolingParameter.prototype.ignore_border = false; +$root.nnabla.SumPoolingParameter.prototype.pad = null; +$root.nnabla.SumPoolingParameter.prototype.channel_last = false; + +$root.nnabla.UnpoolingParameter = class UnpoolingParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.UnpoolingParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.UnpoolingParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.nnabla.Shape.decodeText(reader); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.UnpoolingParameter.prototype.kernel = null; +$root.nnabla.UnpoolingParameter.prototype.channel_last = false; + +$root.nnabla.RoiAlignParameter = class RoiAlignParameter { + + constructor() { + this.spatial_scale = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.RoiAlignParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.output_size = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.spatial_scale = reader.floats(message.spatial_scale, tag); + break; + case 3: + message.sampling_ratio = reader.int64(); + break; + case 4: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RoiAlignParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "output_size": + message.output_size = $root.nnabla.Shape.decodeText(reader); + break; + case "spatial_scale": + reader.array(message.spatial_scale, () => reader.float()); + break; + case "sampling_ratio": + message.sampling_ratio = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RoiAlignParameter.prototype.output_size = null; +$root.nnabla.RoiAlignParameter.prototype.sampling_ratio = protobuf.Int64.create(0); +$root.nnabla.RoiAlignParameter.prototype.channel_last = false; + +$root.nnabla.ReLUParameter = class ReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ReLUParameter.prototype.inplace = false; + +$root.nnabla.LeakyReLUParameter = class LeakyReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LeakyReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LeakyReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LeakyReLUParameter.prototype.alpha = 0; +$root.nnabla.LeakyReLUParameter.prototype.inplace = false; + +$root.nnabla.SoftmaxParameter = class SoftmaxParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SoftmaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SoftmaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SoftmaxParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.LogSoftmaxParameter = class LogSoftmaxParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LogSoftmaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LogSoftmaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LogSoftmaxParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.ELUParameter = class ELUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ELUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ELUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ELUParameter.prototype.alpha = 0; + +$root.nnabla.SELUParameter = class SELUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SELUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scale = reader.double(); + break; + case 2: + message.alpha = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SELUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "scale": + message.scale = reader.double(); + break; + case "alpha": + message.alpha = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SELUParameter.prototype.scale = 0; +$root.nnabla.SELUParameter.prototype.alpha = 0; + +$root.nnabla.CReLUParameter = class CReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CReLUParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.CELUParameter = class CELUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CELUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.double(); + break; + case 2: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CELUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.double(); + break; + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CELUParameter.prototype.alpha = 0; +$root.nnabla.CELUParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.PReLUParameter = class PReLUParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PReLUParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PReLUParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PReLUParameter.prototype.base_axis = protobuf.Int64.create(0); + +$root.nnabla.SoftPlusParameter = class SoftPlusParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SoftPlusParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.beta = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SoftPlusParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "beta": + message.beta = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SoftPlusParameter.prototype.beta = 0; + +$root.nnabla.FusedBatchNormalizationParameter = class FusedBatchNormalizationParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.FusedBatchNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.decay_rate = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + case 4: + message.batch_stat = reader.bool(); + break; + case 5: + message.nonlinearity = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FusedBatchNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "decay_rate": + message.decay_rate = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "batch_stat": + message.batch_stat = reader.bool(); + break; + case "nonlinearity": + message.nonlinearity = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.FusedBatchNormalizationParameter.prototype.decay_rate = 0; +$root.nnabla.FusedBatchNormalizationParameter.prototype.eps = 0; +$root.nnabla.FusedBatchNormalizationParameter.prototype.batch_stat = false; +$root.nnabla.FusedBatchNormalizationParameter.prototype.nonlinearity = ""; + +$root.nnabla.BatchNormalizationParameter = class BatchNormalizationParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.BatchNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.decay_rate = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + case 4: + message.batch_stat = reader.bool(); + break; + case 5: + message.no_scale = reader.bool(); + break; + case 6: + message.no_bias = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BatchNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "decay_rate": + message.decay_rate = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "batch_stat": + message.batch_stat = reader.bool(); + break; + case "no_scale": + message.no_scale = reader.bool(); + break; + case "no_bias": + message.no_bias = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BatchNormalizationParameter.prototype.decay_rate = 0; +$root.nnabla.BatchNormalizationParameter.prototype.eps = 0; +$root.nnabla.BatchNormalizationParameter.prototype.batch_stat = false; +$root.nnabla.BatchNormalizationParameter.prototype.no_scale = false; +$root.nnabla.BatchNormalizationParameter.prototype.no_bias = false; + +$root.nnabla.GroupNormalizationParameter = class GroupNormalizationParameter { + + constructor() { + this.batch_axis = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.GroupNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_groups = reader.int64(); + break; + case 2: + message.channel_axis = reader.int64(); + break; + case 3: + message.batch_axis = reader.array(message.batch_axis, () => reader.int64(), tag); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.no_scale = reader.bool(); + break; + case 6: + message.no_bias = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GroupNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_groups": + message.num_groups = reader.int64(); + break; + case "channel_axis": + message.channel_axis = reader.int64(); + break; + case "batch_axis": + reader.array(message.batch_axis, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + case "no_scale": + message.no_scale = reader.bool(); + break; + case "no_bias": + message.no_bias = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GroupNormalizationParameter.prototype.num_groups = protobuf.Int64.create(0); +$root.nnabla.GroupNormalizationParameter.prototype.channel_axis = protobuf.Int64.create(0); +$root.nnabla.GroupNormalizationParameter.prototype.eps = 0; +$root.nnabla.GroupNormalizationParameter.prototype.no_scale = false; +$root.nnabla.GroupNormalizationParameter.prototype.no_bias = false; + +$root.nnabla.InstanceNormalizationParameter = class InstanceNormalizationParameter { + + constructor() { + this.batch_axis = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.InstanceNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channel_axis = reader.int64(); + break; + case 2: + message.batch_axis = reader.array(message.batch_axis, () => reader.int64(), tag); + break; + case 3: + message.eps = reader.float(); + break; + case 4: + message.no_scale = reader.bool(); + break; + case 5: + message.no_bias = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.InstanceNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "channel_axis": + message.channel_axis = reader.int64(); + break; + case "batch_axis": + reader.array(message.batch_axis, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + case "no_scale": + message.no_scale = reader.bool(); + break; + case "no_bias": + message.no_bias = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.InstanceNormalizationParameter.prototype.channel_axis = protobuf.Int64.create(0); +$root.nnabla.InstanceNormalizationParameter.prototype.eps = 0; +$root.nnabla.InstanceNormalizationParameter.prototype.no_scale = false; +$root.nnabla.InstanceNormalizationParameter.prototype.no_bias = false; + +$root.nnabla.LayerNormalizationParameter = class LayerNormalizationParameter { + + constructor() { + this.batch_axis = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.LayerNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batch_axis = reader.array(message.batch_axis, () => reader.int64(), tag); + break; + case 2: + message.eps = reader.float(); + break; + case 3: + message.no_scale = reader.bool(); + break; + case 4: + message.no_bias = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LayerNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "batch_axis": + reader.array(message.batch_axis, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + case "no_scale": + message.no_scale = reader.bool(); + break; + case "no_bias": + message.no_bias = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LayerNormalizationParameter.prototype.eps = 0; +$root.nnabla.LayerNormalizationParameter.prototype.no_scale = false; +$root.nnabla.LayerNormalizationParameter.prototype.no_bias = false; + +$root.nnabla.NormNormalizationParameter = class NormNormalizationParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.NormNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.p = reader.float(); + break; + case 2: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NormNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "p": + message.p = reader.float(); + break; + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NormNormalizationParameter.prototype.p = 0; +$root.nnabla.NormNormalizationParameter.prototype.eps = 0; + +$root.nnabla.SyncBatchNormalizationParameter = class SyncBatchNormalizationParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.SyncBatchNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.comm = $root.nnabla.Communicator.decode(reader, reader.uint32()); + break; + case 2: + message.group = reader.string(); + break; + case 3: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 4: + message.decay_rate = reader.float(); + break; + case 5: + message.eps = reader.float(); + break; + case 6: + message.batch_stat = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SyncBatchNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "comm": + message.comm = $root.nnabla.Communicator.decodeText(reader); + break; + case "group": + message.group = reader.string(); + break; + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "decay_rate": + message.decay_rate = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + case "batch_stat": + message.batch_stat = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SyncBatchNormalizationParameter.prototype.comm = null; +$root.nnabla.SyncBatchNormalizationParameter.prototype.group = ""; +$root.nnabla.SyncBatchNormalizationParameter.prototype.decay_rate = 0; +$root.nnabla.SyncBatchNormalizationParameter.prototype.eps = 0; +$root.nnabla.SyncBatchNormalizationParameter.prototype.batch_stat = false; + +$root.nnabla.TensorNormalizationParameter = class TensorNormalizationParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.TensorNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.eps = reader.float(); + break; + case 3: + message.no_scale = reader.bool(); + break; + case 4: + message.no_bias = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TensorNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "eps": + message.eps = reader.float(); + break; + case "no_scale": + message.no_scale = reader.bool(); + break; + case "no_bias": + message.no_bias = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TensorNormalizationParameter.prototype.eps = 0; +$root.nnabla.TensorNormalizationParameter.prototype.no_scale = false; +$root.nnabla.TensorNormalizationParameter.prototype.no_bias = false; + +$root.nnabla.WeightNormalizationParameter = class WeightNormalizationParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.WeightNormalizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim = reader.int64(); + break; + case 2: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.WeightNormalizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + message.dim = reader.int64(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.WeightNormalizationParameter.prototype.dim = protobuf.Int64.create(0); +$root.nnabla.WeightNormalizationParameter.prototype.eps = 0; + +$root.nnabla.WeightStandardizationParameter = class WeightStandardizationParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.WeightStandardizationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channel_axis = reader.int64(); + break; + case 2: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.WeightStandardizationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "channel_axis": + message.channel_axis = reader.int64(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.WeightStandardizationParameter.prototype.channel_axis = protobuf.Int64.create(0); +$root.nnabla.WeightStandardizationParameter.prototype.eps = 0; + +$root.nnabla.SpectralNormParameter = class SpectralNormParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SpectralNormParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim = reader.int64(); + break; + case 2: + message.itr = reader.int64(); + break; + case 3: + message.eps = reader.float(); + break; + case 4: + message.test = reader.bool(); + break; + case 5: + message.output_u = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SpectralNormParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + message.dim = reader.int64(); + break; + case "itr": + message.itr = reader.int64(); + break; + case "eps": + message.eps = reader.float(); + break; + case "test": + message.test = reader.bool(); + break; + case "output_u": + message.output_u = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SpectralNormParameter.prototype.dim = protobuf.Int64.create(0); +$root.nnabla.SpectralNormParameter.prototype.itr = protobuf.Int64.create(0); +$root.nnabla.SpectralNormParameter.prototype.eps = 0; +$root.nnabla.SpectralNormParameter.prototype.test = false; +$root.nnabla.SpectralNormParameter.prototype.output_u = false; + +$root.nnabla.MeanSubtractionParameter = class MeanSubtractionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MeanSubtractionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.update_running_mean = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MeanSubtractionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "update_running_mean": + message.update_running_mean = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MeanSubtractionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.MeanSubtractionParameter.prototype.update_running_mean = false; + +$root.nnabla.ClipGradByNormParameter = class ClipGradByNormParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.ClipGradByNormParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clip_norm = reader.float(); + break; + case 2: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ClipGradByNormParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "clip_norm": + message.clip_norm = reader.float(); + break; + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ClipGradByNormParameter.prototype.clip_norm = 0; + +$root.nnabla.SumParameter = class SumParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.SumParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keep_dims = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SumParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SumParameter.prototype.keep_dims = false; + +$root.nnabla.CumSumParameter = class CumSumParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CumSumParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.exclusive = reader.bool(); + break; + case 3: + message.reverse = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CumSumParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "exclusive": + message.exclusive = reader.bool(); + break; + case "reverse": + message.reverse = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CumSumParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.CumSumParameter.prototype.exclusive = false; +$root.nnabla.CumSumParameter.prototype.reverse = false; + +$root.nnabla.MeanParameter = class MeanParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.MeanParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keep_dims = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MeanParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MeanParameter.prototype.keep_dims = false; + +$root.nnabla.MaxParameter = class MaxParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.MaxParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keep_dims = reader.bool(); + break; + case 3: + message.with_index = reader.bool(); + break; + case 4: + message.only_index = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MaxParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + case "with_index": + message.with_index = reader.bool(); + break; + case "only_index": + message.only_index = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MaxParameter.prototype.keep_dims = false; +$root.nnabla.MaxParameter.prototype.with_index = false; +$root.nnabla.MaxParameter.prototype.only_index = false; + +$root.nnabla.MinParameter = class MinParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.MinParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keep_dims = reader.bool(); + break; + case 3: + message.with_index = reader.bool(); + break; + case 4: + message.only_index = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MinParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + case "with_index": + message.with_index = reader.bool(); + break; + case "only_index": + message.only_index = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MinParameter.prototype.keep_dims = false; +$root.nnabla.MinParameter.prototype.with_index = false; +$root.nnabla.MinParameter.prototype.only_index = false; + +$root.nnabla.NormParameter = class NormParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.NormParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.p = reader.float(); + break; + case 2: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 3: + message.keep_dims = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NormParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "p": + message.p = reader.float(); + break; + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NormParameter.prototype.p = 0; +$root.nnabla.NormParameter.prototype.keep_dims = false; + +$root.nnabla.ProdParameter = class ProdParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.ProdParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.keep_dims = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ProdParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "keep_dims": + message.keep_dims = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ProdParameter.prototype.keep_dims = false; + +$root.nnabla.CumProdParameter = class CumProdParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CumProdParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.exclusive = reader.bool(); + break; + case 3: + message.reverse = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CumProdParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "exclusive": + message.exclusive = reader.bool(); + break; + case "reverse": + message.reverse = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CumProdParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.CumProdParameter.prototype.exclusive = false; +$root.nnabla.CumProdParameter.prototype.reverse = false; + +$root.nnabla.Add2Parameter = class Add2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Add2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Add2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Add2Parameter.prototype.inplace = false; + +$root.nnabla.BcAdd2Parameter = class BcAdd2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BcAdd2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BcAdd2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BcAdd2Parameter.prototype.inplace = false; + +$root.nnabla.Sub2Parameter = class Sub2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Sub2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Sub2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Sub2Parameter.prototype.inplace = false; + +$root.nnabla.Mul2Parameter = class Mul2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Mul2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Mul2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Mul2Parameter.prototype.inplace = false; + +$root.nnabla.Div2Parameter = class Div2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Div2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Div2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Div2Parameter.prototype.inplace = false; + +$root.nnabla.Pow2Parameter = class Pow2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Pow2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Pow2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Pow2Parameter.prototype.inplace = false; + +$root.nnabla.AddScalarParameter = class AddScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.AddScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + case 2: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AddScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AddScalarParameter.prototype.val = 0; +$root.nnabla.AddScalarParameter.prototype.inplace = false; + +$root.nnabla.MulScalarParameter = class MulScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MulScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + case 2: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MulScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MulScalarParameter.prototype.val = 0; +$root.nnabla.MulScalarParameter.prototype.inplace = false; + +$root.nnabla.PowScalarParameter = class PowScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PowScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + case 2: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PowScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PowScalarParameter.prototype.val = 0; +$root.nnabla.PowScalarParameter.prototype.inplace = false; + +$root.nnabla.RSubScalarParameter = class RSubScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RSubScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RSubScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RSubScalarParameter.prototype.val = 0; + +$root.nnabla.RDivScalarParameter = class RDivScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RDivScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RDivScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RDivScalarParameter.prototype.val = 0; + +$root.nnabla.RPowScalarParameter = class RPowScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RPowScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RPowScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RPowScalarParameter.prototype.val = 0; + +$root.nnabla.SignParameter = class SignParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SignParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SignParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SignParameter.prototype.alpha = 0; + +$root.nnabla.MinimumScalarParameter = class MinimumScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MinimumScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MinimumScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MinimumScalarParameter.prototype.val = 0; + +$root.nnabla.MaximumScalarParameter = class MaximumScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MaximumScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MaximumScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MaximumScalarParameter.prototype.val = 0; + +$root.nnabla.SearchSortedParameter = class SearchSortedParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SearchSortedParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.right = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SearchSortedParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "right": + message.right = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SearchSortedParameter.prototype.right = false; + +$root.nnabla.LogicalAndScalarParameter = class LogicalAndScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LogicalAndScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LogicalAndScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LogicalAndScalarParameter.prototype.val = false; + +$root.nnabla.LogicalOrScalarParameter = class LogicalOrScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LogicalOrScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LogicalOrScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LogicalOrScalarParameter.prototype.val = false; + +$root.nnabla.LogicalXorScalarParameter = class LogicalXorScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LogicalXorScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LogicalXorScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LogicalXorScalarParameter.prototype.val = false; + +$root.nnabla.EqualScalarParameter = class EqualScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.EqualScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.EqualScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.EqualScalarParameter.prototype.val = 0; + +$root.nnabla.NotEqualScalarParameter = class NotEqualScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.NotEqualScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NotEqualScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NotEqualScalarParameter.prototype.val = 0; + +$root.nnabla.GreaterEqualScalarParameter = class GreaterEqualScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GreaterEqualScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GreaterEqualScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GreaterEqualScalarParameter.prototype.val = 0; + +$root.nnabla.GreaterScalarParameter = class GreaterScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GreaterScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GreaterScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GreaterScalarParameter.prototype.val = 0; + +$root.nnabla.LessEqualScalarParameter = class LessEqualScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LessEqualScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LessEqualScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LessEqualScalarParameter.prototype.val = 0; + +$root.nnabla.LessScalarParameter = class LessScalarParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LessScalarParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LessScalarParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LessScalarParameter.prototype.val = 0; + +$root.nnabla.ResetNaNParameter = class ResetNaNParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ResetNaNParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ResetNaNParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ResetNaNParameter.prototype.val = 0; + +$root.nnabla.ResetInfParameter = class ResetInfParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ResetInfParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ResetInfParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ResetInfParameter.prototype.val = 0; + +$root.nnabla.ConstantParameter = class ConstantParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ConstantParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.float(); + break; + case 2: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ConstantParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ConstantParameter.prototype.val = 0; +$root.nnabla.ConstantParameter.prototype.shape = null; + +$root.nnabla.ArangeParameter = class ArangeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ArangeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.float(); + break; + case 2: + message.stop = reader.float(); + break; + case 3: + message.step = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ArangeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "start": + message.start = reader.float(); + break; + case "stop": + message.stop = reader.float(); + break; + case "step": + message.step = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ArangeParameter.prototype.start = 0; +$root.nnabla.ArangeParameter.prototype.stop = 0; +$root.nnabla.ArangeParameter.prototype.step = 0; + +$root.nnabla.LinspaceParameter = class LinspaceParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.LinspaceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.float(); + break; + case 2: + message.stop = reader.float(); + break; + case 3: + message.num = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.LinspaceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "start": + message.start = reader.float(); + break; + case "stop": + message.stop = reader.float(); + break; + case "num": + message.num = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.LinspaceParameter.prototype.start = 0; +$root.nnabla.LinspaceParameter.prototype.stop = 0; +$root.nnabla.LinspaceParameter.prototype.num = protobuf.Int64.create(0); + +$root.nnabla.BatchMatmulParameter = class BatchMatmulParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BatchMatmulParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.transpose_a = reader.bool(); + break; + case 2: + message.transpose_b = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BatchMatmulParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "transpose_a": + message.transpose_a = reader.bool(); + break; + case "transpose_b": + message.transpose_b = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BatchMatmulParameter.prototype.transpose_a = false; +$root.nnabla.BatchMatmulParameter.prototype.transpose_b = false; + +$root.nnabla.RoundParameter = class RoundParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RoundParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RoundParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CeilParameter = class CeilParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CeilParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CeilParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.FloorParameter = class FloorParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.FloorParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FloorParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ConcatenateParameter = class ConcatenateParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ConcatenateParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ConcatenateParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ConcatenateParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.SplitParameter = class SplitParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SplitParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SplitParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SplitParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.StackParameter = class StackParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.StackParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.StackParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.StackParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.SliceParameter = class SliceParameter { + + constructor() { + this.start = []; + this.stop = []; + this.step = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.SliceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.array(message.start, () => reader.int64(), tag); + break; + case 2: + message.stop = reader.array(message.stop, () => reader.int64(), tag); + break; + case 3: + message.step = reader.array(message.step, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SliceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "start": + reader.array(message.start, () => reader.int64()); + break; + case "stop": + reader.array(message.stop, () => reader.int64()); + break; + case "step": + reader.array(message.step, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PadParameter = class PadParameter { + + constructor() { + this.pad_width = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.PadParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pad_width = reader.array(message.pad_width, () => reader.int64(), tag); + break; + case 2: + message.mode = reader.string(); + break; + case 3: + message.constant_value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PadParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pad_width": + reader.array(message.pad_width, () => reader.int64()); + break; + case "mode": + message.mode = reader.string(); + break; + case "constant_value": + message.constant_value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PadParameter.prototype.mode = ""; +$root.nnabla.PadParameter.prototype.constant_value = 0; + +$root.nnabla.TransposeParameter = class TransposeParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.TransposeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TransposeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BroadcastParameter = class BroadcastParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BroadcastParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BroadcastParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BroadcastParameter.prototype.shape = null; + +$root.nnabla.BroadcastToParameter = class BroadcastToParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BroadcastToParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BroadcastToParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BroadcastToParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.TileParameter = class TileParameter { + + constructor() { + this.reps = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.TileParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.reps = reader.array(message.reps, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TileParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "reps": + reader.array(message.reps, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.OneHotParameter = class OneHotParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.OneHotParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.OneHotParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.OneHotParameter.prototype.shape = null; + +$root.nnabla.FlipParameter = class FlipParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.FlipParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FlipParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ShiftParameter = class ShiftParameter { + + constructor() { + this.shifts = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.ShiftParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shifts = reader.array(message.shifts, () => reader.int64(), tag); + break; + case 2: + message.border_mode = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ShiftParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shifts": + reader.array(message.shifts, () => reader.int64()); + break; + case "border_mode": + message.border_mode = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ShiftParameter.prototype.border_mode = ""; + +$root.nnabla.SortParameter = class SortParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SortParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.reverse = reader.bool(); + break; + case 3: + message.with_index = reader.bool(); + break; + case 4: + message.only_index = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SortParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "reverse": + message.reverse = reader.bool(); + break; + case "with_index": + message.with_index = reader.bool(); + break; + case "only_index": + message.only_index = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SortParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.SortParameter.prototype.reverse = false; +$root.nnabla.SortParameter.prototype.with_index = false; +$root.nnabla.SortParameter.prototype.only_index = false; + +$root.nnabla.ReshapeParameter = class ReshapeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ReshapeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.inplace = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ReshapeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "inplace": + message.inplace = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ReshapeParameter.prototype.shape = null; +$root.nnabla.ReshapeParameter.prototype.inplace = false; + +$root.nnabla.ShapeParameter = class ShapeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ShapeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int64(); + break; + case 2: + message.end = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ShapeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "start": + message.start = reader.int64(); + break; + case "end": + message.end = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ShapeParameter.prototype.start = protobuf.Int64.create(0); +$root.nnabla.ShapeParameter.prototype.end = protobuf.Int64.create(0); + +$root.nnabla.TriluParameter = class TriluParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.TriluParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + case 2: + message.upper = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TriluParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + case "upper": + message.upper = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TriluParameter.prototype.k = protobuf.Int64.create(0); +$root.nnabla.TriluParameter.prototype.upper = false; + +$root.nnabla.MeshgridParameter = class MeshgridParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MeshgridParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ij_indexing = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MeshgridParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "ij_indexing": + message.ij_indexing = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MeshgridParameter.prototype.ij_indexing = false; + +$root.nnabla.BatchCholeskyParameter = class BatchCholeskyParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BatchCholeskyParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.upper = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BatchCholeskyParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "upper": + message.upper = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BatchCholeskyParameter.prototype.upper = false; + +$root.nnabla.GatherParameter = class GatherParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.GatherParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.batch_dims = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.GatherParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "batch_dims": + message.batch_dims = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.GatherParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.GatherParameter.prototype.batch_dims = protobuf.Int64.create(0); + +$root.nnabla.ScatterNdParameter = class ScatterNdParameter { + + constructor() { + this.shape = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.ScatterNdParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = reader.array(message.shape, () => reader.int64(), tag); + break; + case 2: + message.add = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ScatterNdParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + reader.array(message.shape, () => reader.int64()); + break; + case "add": + message.add = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ScatterNdParameter.prototype.add = false; + +$root.nnabla.ScatterAddParameter = class ScatterAddParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ScatterAddParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ScatterAddParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ScatterAddParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.BoolFillParameter = class BoolFillParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BoolFillParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BoolFillParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BoolFillParameter.prototype.value = 0; + +$root.nnabla.PackPaddedSequenceParameter = class PackPaddedSequenceParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PackPaddedSequenceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batch_first = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PackPaddedSequenceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "batch_first": + message.batch_first = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PackPaddedSequenceParameter.prototype.batch_first = false; + +$root.nnabla.PadPackedSequenceParameter = class PadPackedSequenceParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PadPackedSequenceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batch_first = reader.bool(); + break; + case 2: + message.padding_value = reader.float(); + break; + case 3: + message.total_length = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PadPackedSequenceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "batch_first": + message.batch_first = reader.bool(); + break; + case "padding_value": + message.padding_value = reader.float(); + break; + case "total_length": + message.total_length = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PadPackedSequenceParameter.prototype.batch_first = false; +$root.nnabla.PadPackedSequenceParameter.prototype.padding_value = 0; +$root.nnabla.PadPackedSequenceParameter.prototype.total_length = protobuf.Int64.create(0); + +$root.nnabla.InterpolateParameter = class InterpolateParameter { + + constructor() { + this.output_size = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.InterpolateParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.output_size = reader.array(message.output_size, () => reader.int64(), tag); + break; + case 2: + message.mode = reader.string(); + break; + case 3: + message.align_corners = reader.bool(); + break; + case 4: + message.half_pixel = reader.bool(); + break; + case 5: + message.half_pixel_for_nn = reader.bool(); + break; + case 6: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.InterpolateParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "output_size": + reader.array(message.output_size, () => reader.int64()); + break; + case "mode": + message.mode = reader.string(); + break; + case "align_corners": + message.align_corners = reader.bool(); + break; + case "half_pixel": + message.half_pixel = reader.bool(); + break; + case "half_pixel_for_nn": + message.half_pixel_for_nn = reader.bool(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.InterpolateParameter.prototype.mode = ""; +$root.nnabla.InterpolateParameter.prototype.align_corners = false; +$root.nnabla.InterpolateParameter.prototype.half_pixel = false; +$root.nnabla.InterpolateParameter.prototype.half_pixel_for_nn = false; +$root.nnabla.InterpolateParameter.prototype.channel_last = false; + +$root.nnabla.ONNXResizeParameter = class ONNXResizeParameter { + + constructor() { + this.roi = []; + this.scales = []; + this.sizes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.ONNXResizeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.roi = reader.floats(message.roi, tag); + break; + case 2: + message.scales = reader.floats(message.scales, tag); + break; + case 3: + message.sizes = reader.array(message.sizes, () => reader.int64(), tag); + break; + case 4: + message.mode = reader.string(); + break; + case 5: + message.coordinate_transformation_mode = reader.string(); + break; + case 6: + message.cubic_coeff_a = reader.float(); + break; + case 7: + message.exclude_outside = reader.int64(); + break; + case 8: + message.extrapolation_value = reader.float(); + break; + case 9: + message.nearest_mode = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ONNXResizeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "roi": + reader.array(message.roi, () => reader.float()); + break; + case "scales": + reader.array(message.scales, () => reader.float()); + break; + case "sizes": + reader.array(message.sizes, () => reader.int64()); + break; + case "mode": + message.mode = reader.string(); + break; + case "coordinate_transformation_mode": + message.coordinate_transformation_mode = reader.string(); + break; + case "cubic_coeff_a": + message.cubic_coeff_a = reader.float(); + break; + case "exclude_outside": + message.exclude_outside = reader.int64(); + break; + case "extrapolation_value": + message.extrapolation_value = reader.float(); + break; + case "nearest_mode": + message.nearest_mode = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ONNXResizeParameter.prototype.mode = ""; +$root.nnabla.ONNXResizeParameter.prototype.coordinate_transformation_mode = ""; +$root.nnabla.ONNXResizeParameter.prototype.cubic_coeff_a = 0; +$root.nnabla.ONNXResizeParameter.prototype.exclude_outside = protobuf.Int64.create(0); +$root.nnabla.ONNXResizeParameter.prototype.extrapolation_value = 0; +$root.nnabla.ONNXResizeParameter.prototype.nearest_mode = ""; + +$root.nnabla.FFTParameter = class FFTParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.FFTParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signal_ndim = reader.int64(); + break; + case 2: + message.normalized = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FFTParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "signal_ndim": + message.signal_ndim = reader.int64(); + break; + case "normalized": + message.normalized = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.FFTParameter.prototype.signal_ndim = protobuf.Int64.create(0); +$root.nnabla.FFTParameter.prototype.normalized = false; + +$root.nnabla.IFFTParameter = class IFFTParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.IFFTParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signal_ndim = reader.int64(); + break; + case 2: + message.normalized = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.IFFTParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "signal_ndim": + message.signal_ndim = reader.int64(); + break; + case "normalized": + message.normalized = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.IFFTParameter.prototype.signal_ndim = protobuf.Int64.create(0); +$root.nnabla.IFFTParameter.prototype.normalized = false; + +$root.nnabla.STFTParameter = class STFTParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.STFTParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.window_size = reader.int64(); + break; + case 2: + message.stride = reader.int64(); + break; + case 3: + message.fft_size = reader.int64(); + break; + case 4: + message.window_type = reader.string(); + break; + case 5: + message.center = reader.bool(); + break; + case 6: + message.pad_mode = reader.string(); + break; + case 7: + message.as_istft_backward = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.STFTParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "window_size": + message.window_size = reader.int64(); + break; + case "stride": + message.stride = reader.int64(); + break; + case "fft_size": + message.fft_size = reader.int64(); + break; + case "window_type": + message.window_type = reader.string(); + break; + case "center": + message.center = reader.bool(); + break; + case "pad_mode": + message.pad_mode = reader.string(); + break; + case "as_istft_backward": + message.as_istft_backward = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.STFTParameter.prototype.window_size = protobuf.Int64.create(0); +$root.nnabla.STFTParameter.prototype.stride = protobuf.Int64.create(0); +$root.nnabla.STFTParameter.prototype.fft_size = protobuf.Int64.create(0); +$root.nnabla.STFTParameter.prototype.window_type = ""; +$root.nnabla.STFTParameter.prototype.center = false; +$root.nnabla.STFTParameter.prototype.pad_mode = ""; +$root.nnabla.STFTParameter.prototype.as_istft_backward = false; + +$root.nnabla.ISTFTParameter = class ISTFTParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ISTFTParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.window_size = reader.int64(); + break; + case 2: + message.stride = reader.int64(); + break; + case 3: + message.fft_size = reader.int64(); + break; + case 4: + message.window_type = reader.string(); + break; + case 5: + message.center = reader.bool(); + break; + case 6: + message.pad_mode = reader.string(); + break; + case 7: + message.as_stft_backward = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ISTFTParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "window_size": + message.window_size = reader.int64(); + break; + case "stride": + message.stride = reader.int64(); + break; + case "fft_size": + message.fft_size = reader.int64(); + break; + case "window_type": + message.window_type = reader.string(); + break; + case "center": + message.center = reader.bool(); + break; + case "pad_mode": + message.pad_mode = reader.string(); + break; + case "as_stft_backward": + message.as_stft_backward = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ISTFTParameter.prototype.window_size = protobuf.Int64.create(0); +$root.nnabla.ISTFTParameter.prototype.stride = protobuf.Int64.create(0); +$root.nnabla.ISTFTParameter.prototype.fft_size = protobuf.Int64.create(0); +$root.nnabla.ISTFTParameter.prototype.window_type = ""; +$root.nnabla.ISTFTParameter.prototype.center = false; +$root.nnabla.ISTFTParameter.prototype.pad_mode = ""; +$root.nnabla.ISTFTParameter.prototype.as_stft_backward = false; + +$root.nnabla.DropoutParameter = class DropoutParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.DropoutParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.p = reader.double(); + break; + case 2: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.DropoutParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "p": + message.p = reader.double(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.DropoutParameter.prototype.p = 0; +$root.nnabla.DropoutParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.TopKDataParameter = class TopKDataParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.TopKDataParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + case 2: + message.abs = reader.bool(); + break; + case 3: + message.reduce = reader.bool(); + break; + case 4: + message.base_axis = reader.int64(); + break; + case 5: + message.largest = reader.bool(); + break; + case 6: + message.with_index = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TopKDataParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + case "abs": + message.abs = reader.bool(); + break; + case "reduce": + message.reduce = reader.bool(); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + case "largest": + message.largest = reader.bool(); + break; + case "with_index": + message.with_index = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TopKDataParameter.prototype.k = protobuf.Int64.create(0); +$root.nnabla.TopKDataParameter.prototype.abs = false; +$root.nnabla.TopKDataParameter.prototype.reduce = false; +$root.nnabla.TopKDataParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.TopKDataParameter.prototype.largest = false; +$root.nnabla.TopKDataParameter.prototype.with_index = false; + +$root.nnabla.TopKGradParameter = class TopKGradParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.TopKGradParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + case 2: + message.abs = reader.bool(); + break; + case 3: + message.base_axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TopKGradParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + case "abs": + message.abs = reader.bool(); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TopKGradParameter.prototype.k = protobuf.Int64.create(0); +$root.nnabla.TopKGradParameter.prototype.abs = false; +$root.nnabla.TopKGradParameter.prototype.base_axis = protobuf.Int64.create(0); + +$root.nnabla.RandParameter = class RandParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.low = reader.float(); + break; + case 2: + message.high = reader.float(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "low": + message.low = reader.float(); + break; + case "high": + message.high = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandParameter.prototype.low = 0; +$root.nnabla.RandParameter.prototype.high = 0; +$root.nnabla.RandParameter.prototype.shape = null; +$root.nnabla.RandParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandintParameter = class RandintParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandintParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.low = reader.int64(); + break; + case 2: + message.high = reader.int64(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandintParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "low": + message.low = reader.int64(); + break; + case "high": + message.high = reader.int64(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandintParameter.prototype.low = protobuf.Int64.create(0); +$root.nnabla.RandintParameter.prototype.high = protobuf.Int64.create(0); +$root.nnabla.RandintParameter.prototype.shape = null; +$root.nnabla.RandintParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandnParameter = class RandnParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandnParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mu = reader.float(); + break; + case 2: + message.sigma = reader.float(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandnParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mu": + message.mu = reader.float(); + break; + case "sigma": + message.sigma = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandnParameter.prototype.mu = 0; +$root.nnabla.RandnParameter.prototype.sigma = 0; +$root.nnabla.RandnParameter.prototype.shape = null; +$root.nnabla.RandnParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandBinomialParameter = class RandBinomialParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandBinomialParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.n = reader.int64(); + break; + case 2: + message.p = reader.float(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandBinomialParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "n": + message.n = reader.int64(); + break; + case "p": + message.p = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandBinomialParameter.prototype.n = protobuf.Int64.create(0); +$root.nnabla.RandBinomialParameter.prototype.p = 0; +$root.nnabla.RandBinomialParameter.prototype.shape = null; +$root.nnabla.RandBinomialParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandBetaParameter = class RandBetaParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandBetaParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandBetaParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandBetaParameter.prototype.alpha = 0; +$root.nnabla.RandBetaParameter.prototype.beta = 0; +$root.nnabla.RandBetaParameter.prototype.shape = null; +$root.nnabla.RandBetaParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandGammaParameter = class RandGammaParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandGammaParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.float(); + break; + case 2: + message.theta = reader.float(); + break; + case 3: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandGammaParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.float(); + break; + case "theta": + message.theta = reader.float(); + break; + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandGammaParameter.prototype.k = 0; +$root.nnabla.RandGammaParameter.prototype.theta = 0; +$root.nnabla.RandGammaParameter.prototype.shape = null; +$root.nnabla.RandGammaParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandomChoiceParameter = class RandomChoiceParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandomChoiceParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.replace = reader.bool(); + break; + case 3: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandomChoiceParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "replace": + message.replace = reader.bool(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandomChoiceParameter.prototype.shape = null; +$root.nnabla.RandomChoiceParameter.prototype.replace = false; +$root.nnabla.RandomChoiceParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandomCropParameter = class RandomCropParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.RandomCropParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.base_axis = reader.int64(); + break; + case 3: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandomCropParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandomCropParameter.prototype.shape = null; +$root.nnabla.RandomCropParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.RandomCropParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandomFlipParameter = class RandomFlipParameter { + + constructor() { + this.axes = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.RandomFlipParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axes = reader.array(message.axes, () => reader.int64(), tag); + break; + case 2: + message.base_axis = reader.int64(); + break; + case 3: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandomFlipParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axes": + reader.array(message.axes, () => reader.int64()); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandomFlipParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.RandomFlipParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandomShiftParameter = class RandomShiftParameter { + + constructor() { + this.shifts = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.RandomShiftParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shifts = reader.array(message.shifts, () => reader.int64(), tag); + break; + case 2: + message.border_mode = reader.string(); + break; + case 3: + message.constant_value = reader.float(); + break; + case 4: + message.base_axis = reader.int64(); + break; + case 5: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandomShiftParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shifts": + reader.array(message.shifts, () => reader.int64()); + break; + case "border_mode": + message.border_mode = reader.string(); + break; + case "constant_value": + message.constant_value = reader.float(); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandomShiftParameter.prototype.border_mode = ""; +$root.nnabla.RandomShiftParameter.prototype.constant_value = 0; +$root.nnabla.RandomShiftParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.RandomShiftParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.RandomEraseParameter = class RandomEraseParameter { + + constructor() { + this.area_ratios = []; + this.aspect_ratios = []; + this.replacements = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.RandomEraseParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.prob = reader.float(); + break; + case 2: + message.area_ratios = reader.floats(message.area_ratios, tag); + break; + case 3: + message.aspect_ratios = reader.floats(message.aspect_ratios, tag); + break; + case 4: + message.replacements = reader.floats(message.replacements, tag); + break; + case 5: + message.n = reader.int64(); + break; + case 6: + message.share = reader.bool(); + break; + case 7: + message.inplace = reader.bool(); + break; + case 8: + message.base_axis = reader.int64(); + break; + case 9: + message.seed = reader.int64(); + break; + case 10: + message.channel_last = reader.bool(); + break; + case 11: + message.ste_fine_grained = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.RandomEraseParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "prob": + message.prob = reader.float(); + break; + case "area_ratios": + reader.array(message.area_ratios, () => reader.float()); + break; + case "aspect_ratios": + reader.array(message.aspect_ratios, () => reader.float()); + break; + case "replacements": + reader.array(message.replacements, () => reader.float()); + break; + case "n": + message.n = reader.int64(); + break; + case "share": + message.share = reader.bool(); + break; + case "inplace": + message.inplace = reader.bool(); + break; + case "base_axis": + message.base_axis = reader.int64(); + break; + case "seed": + message.seed = reader.int64(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + case "ste_fine_grained": + message.ste_fine_grained = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.RandomEraseParameter.prototype.prob = 0; +$root.nnabla.RandomEraseParameter.prototype.n = protobuf.Int64.create(0); +$root.nnabla.RandomEraseParameter.prototype.share = false; +$root.nnabla.RandomEraseParameter.prototype.inplace = false; +$root.nnabla.RandomEraseParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.RandomEraseParameter.prototype.seed = protobuf.Int64.create(0); +$root.nnabla.RandomEraseParameter.prototype.channel_last = false; +$root.nnabla.RandomEraseParameter.prototype.ste_fine_grained = false; + +$root.nnabla.ImageAugmentationParameter = class ImageAugmentationParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ImageAugmentationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.min_scale = reader.float(); + break; + case 4: + message.max_scale = reader.float(); + break; + case 5: + message.angle = reader.float(); + break; + case 6: + message.aspect_ratio = reader.float(); + break; + case 7: + message.distortion = reader.float(); + break; + case 8: + message.flip_lr = reader.bool(); + break; + case 9: + message.flip_ud = reader.bool(); + break; + case 10: + message.brightness = reader.float(); + break; + case 11: + message.brightness_each = reader.bool(); + break; + case 12: + message.contrast = reader.float(); + break; + case 13: + message.contrast_center = reader.float(); + break; + case 14: + message.contrast_each = reader.bool(); + break; + case 15: + message.noise = reader.float(); + break; + case 16: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ImageAugmentationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.nnabla.Shape.decodeText(reader); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "min_scale": + message.min_scale = reader.float(); + break; + case "max_scale": + message.max_scale = reader.float(); + break; + case "angle": + message.angle = reader.float(); + break; + case "aspect_ratio": + message.aspect_ratio = reader.float(); + break; + case "distortion": + message.distortion = reader.float(); + break; + case "flip_lr": + message.flip_lr = reader.bool(); + break; + case "flip_ud": + message.flip_ud = reader.bool(); + break; + case "brightness": + message.brightness = reader.float(); + break; + case "brightness_each": + message.brightness_each = reader.bool(); + break; + case "contrast": + message.contrast = reader.float(); + break; + case "contrast_center": + message.contrast_center = reader.float(); + break; + case "contrast_each": + message.contrast_each = reader.bool(); + break; + case "noise": + message.noise = reader.float(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ImageAugmentationParameter.prototype.shape = null; +$root.nnabla.ImageAugmentationParameter.prototype.pad = null; +$root.nnabla.ImageAugmentationParameter.prototype.min_scale = 0; +$root.nnabla.ImageAugmentationParameter.prototype.max_scale = 0; +$root.nnabla.ImageAugmentationParameter.prototype.angle = 0; +$root.nnabla.ImageAugmentationParameter.prototype.aspect_ratio = 0; +$root.nnabla.ImageAugmentationParameter.prototype.distortion = 0; +$root.nnabla.ImageAugmentationParameter.prototype.flip_lr = false; +$root.nnabla.ImageAugmentationParameter.prototype.flip_ud = false; +$root.nnabla.ImageAugmentationParameter.prototype.brightness = 0; +$root.nnabla.ImageAugmentationParameter.prototype.brightness_each = false; +$root.nnabla.ImageAugmentationParameter.prototype.contrast = 0; +$root.nnabla.ImageAugmentationParameter.prototype.contrast_center = 0; +$root.nnabla.ImageAugmentationParameter.prototype.contrast_each = false; +$root.nnabla.ImageAugmentationParameter.prototype.noise = 0; +$root.nnabla.ImageAugmentationParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.SoftmaxCrossEntropyParameter = class SoftmaxCrossEntropyParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SoftmaxCrossEntropyParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SoftmaxCrossEntropyParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SoftmaxCrossEntropyParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.CategoricalCrossEntropyParameter = class CategoricalCrossEntropyParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.CategoricalCrossEntropyParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.CategoricalCrossEntropyParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.CategoricalCrossEntropyParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.HuberLossParameter = class HuberLossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.HuberLossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.delta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.HuberLossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "delta": + message.delta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.HuberLossParameter.prototype.delta = 0; + +$root.nnabla.EpsilonInsensitiveLossParameter = class EpsilonInsensitiveLossParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.EpsilonInsensitiveLossParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.epsilon = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.EpsilonInsensitiveLossParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "epsilon": + message.epsilon = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.EpsilonInsensitiveLossParameter.prototype.epsilon = 0; + +$root.nnabla.KLMultinomialParameter = class KLMultinomialParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.KLMultinomialParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.KLMultinomialParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.KLMultinomialParameter.prototype.base_axis = protobuf.Int64.create(0); + +$root.nnabla.AffineGridParameter = class AffineGridParameter { + + constructor() { + this.size = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.AffineGridParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.array(message.size, () => reader.int64(), tag); + break; + case 2: + message.align_corners = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.AffineGridParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "size": + reader.array(message.size, () => reader.int64()); + break; + case "align_corners": + message.align_corners = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.AffineGridParameter.prototype.align_corners = false; + +$root.nnabla.WarpByGridParameter = class WarpByGridParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.WarpByGridParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.string(); + break; + case 2: + message.padding_mode = reader.string(); + break; + case 3: + message.align_corners = reader.bool(); + break; + case 4: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.WarpByGridParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "mode": + message.mode = reader.string(); + break; + case "padding_mode": + message.padding_mode = reader.string(); + break; + case "align_corners": + message.align_corners = reader.bool(); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.WarpByGridParameter.prototype.mode = ""; +$root.nnabla.WarpByGridParameter.prototype.padding_mode = ""; +$root.nnabla.WarpByGridParameter.prototype.align_corners = false; +$root.nnabla.WarpByGridParameter.prototype.channel_last = false; + +$root.nnabla.BinaryConnectAffineParameter = class BinaryConnectAffineParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BinaryConnectAffineParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.quantize_zero_to = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BinaryConnectAffineParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "quantize_zero_to": + message.quantize_zero_to = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BinaryConnectAffineParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.BinaryConnectAffineParameter.prototype.quantize_zero_to = 0; + +$root.nnabla.BinaryConnectConvolutionParameter = class BinaryConnectConvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BinaryConnectConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.quantize_zero_to = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BinaryConnectConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "quantize_zero_to": + message.quantize_zero_to = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BinaryConnectConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.BinaryConnectConvolutionParameter.prototype.pad = null; +$root.nnabla.BinaryConnectConvolutionParameter.prototype.stride = null; +$root.nnabla.BinaryConnectConvolutionParameter.prototype.dilation = null; +$root.nnabla.BinaryConnectConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.BinaryConnectConvolutionParameter.prototype.quantize_zero_to = 0; + +$root.nnabla.BinaryWeightAffineParameter = class BinaryWeightAffineParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BinaryWeightAffineParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.quantize_zero_to = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BinaryWeightAffineParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "quantize_zero_to": + message.quantize_zero_to = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BinaryWeightAffineParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.BinaryWeightAffineParameter.prototype.quantize_zero_to = 0; + +$root.nnabla.BinaryWeightConvolutionParameter = class BinaryWeightConvolutionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BinaryWeightConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.quantize_zero_to = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BinaryWeightConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "quantize_zero_to": + message.quantize_zero_to = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BinaryWeightConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.BinaryWeightConvolutionParameter.prototype.pad = null; +$root.nnabla.BinaryWeightConvolutionParameter.prototype.stride = null; +$root.nnabla.BinaryWeightConvolutionParameter.prototype.dilation = null; +$root.nnabla.BinaryWeightConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.BinaryWeightConvolutionParameter.prototype.quantize_zero_to = 0; + +$root.nnabla.INQAffineParameter = class INQAffineParameter { + + constructor() { + this.inq_iterations = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.INQAffineParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.num_bits = reader.int64(); + break; + case 3: + message.inq_iterations = reader.array(message.inq_iterations, () => reader.int64(), tag); + break; + case 4: + message.selection_algorithm = reader.string(); + break; + case 5: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.INQAffineParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "num_bits": + message.num_bits = reader.int64(); + break; + case "inq_iterations": + reader.array(message.inq_iterations, () => reader.int64()); + break; + case "selection_algorithm": + message.selection_algorithm = reader.string(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.INQAffineParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.INQAffineParameter.prototype.num_bits = protobuf.Int64.create(0); +$root.nnabla.INQAffineParameter.prototype.selection_algorithm = ""; +$root.nnabla.INQAffineParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.INQConvolutionParameter = class INQConvolutionParameter { + + constructor() { + this.inq_iterations = []; + } + + static decode(reader, length) { + const message = new $root.nnabla.INQConvolutionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.dilation = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.group = reader.int64(); + break; + case 6: + message.num_bits = reader.int64(); + break; + case 7: + message.inq_iterations = reader.array(message.inq_iterations, () => reader.int64(), tag); + break; + case 8: + message.selection_algorithm = reader.string(); + break; + case 9: + message.seed = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.INQConvolutionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "dilation": + message.dilation = $root.nnabla.Shape.decodeText(reader); + break; + case "group": + message.group = reader.int64(); + break; + case "num_bits": + message.num_bits = reader.int64(); + break; + case "inq_iterations": + reader.array(message.inq_iterations, () => reader.int64()); + break; + case "selection_algorithm": + message.selection_algorithm = reader.string(); + break; + case "seed": + message.seed = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.INQConvolutionParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.INQConvolutionParameter.prototype.pad = null; +$root.nnabla.INQConvolutionParameter.prototype.stride = null; +$root.nnabla.INQConvolutionParameter.prototype.dilation = null; +$root.nnabla.INQConvolutionParameter.prototype.group = protobuf.Int64.create(0); +$root.nnabla.INQConvolutionParameter.prototype.num_bits = protobuf.Int64.create(0); +$root.nnabla.INQConvolutionParameter.prototype.selection_algorithm = ""; +$root.nnabla.INQConvolutionParameter.prototype.seed = protobuf.Int64.create(0); + +$root.nnabla.FixedPointQuantizeParameter = class FixedPointQuantizeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.FixedPointQuantizeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sign = reader.bool(); + break; + case 2: + message.n = reader.int64(); + break; + case 3: + message.delta = reader.float(); + break; + case 4: + message.ste_fine_grained = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.FixedPointQuantizeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sign": + message.sign = reader.bool(); + break; + case "n": + message.n = reader.int64(); + break; + case "delta": + message.delta = reader.float(); + break; + case "ste_fine_grained": + message.ste_fine_grained = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.FixedPointQuantizeParameter.prototype.sign = false; +$root.nnabla.FixedPointQuantizeParameter.prototype.n = protobuf.Int64.create(0); +$root.nnabla.FixedPointQuantizeParameter.prototype.delta = 0; +$root.nnabla.FixedPointQuantizeParameter.prototype.ste_fine_grained = false; + +$root.nnabla.MinMaxQuantizeParameter = class MinMaxQuantizeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MinMaxQuantizeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.decay = reader.float(); + break; + case 2: + message.x_min_max = reader.bool(); + break; + case 3: + message.ema = reader.bool(); + break; + case 4: + message.ste_fine_grained = reader.bool(); + break; + case 5: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MinMaxQuantizeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "decay": + message.decay = reader.float(); + break; + case "x_min_max": + message.x_min_max = reader.bool(); + break; + case "ema": + message.ema = reader.bool(); + break; + case "ste_fine_grained": + message.ste_fine_grained = reader.bool(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MinMaxQuantizeParameter.prototype.decay = 0; +$root.nnabla.MinMaxQuantizeParameter.prototype.x_min_max = false; +$root.nnabla.MinMaxQuantizeParameter.prototype.ema = false; +$root.nnabla.MinMaxQuantizeParameter.prototype.ste_fine_grained = false; +$root.nnabla.MinMaxQuantizeParameter.prototype.eps = 0; + +$root.nnabla.Pow2QuantizeParameter = class Pow2QuantizeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Pow2QuantizeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sign = reader.bool(); + break; + case 2: + message.with_zero = reader.bool(); + break; + case 3: + message.n = reader.int64(); + break; + case 4: + message.m = reader.int64(); + break; + case 5: + message.ste_fine_grained = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Pow2QuantizeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sign": + message.sign = reader.bool(); + break; + case "with_zero": + message.with_zero = reader.bool(); + break; + case "n": + message.n = reader.int64(); + break; + case "m": + message.m = reader.int64(); + break; + case "ste_fine_grained": + message.ste_fine_grained = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Pow2QuantizeParameter.prototype.sign = false; +$root.nnabla.Pow2QuantizeParameter.prototype.with_zero = false; +$root.nnabla.Pow2QuantizeParameter.prototype.n = protobuf.Int64.create(0); +$root.nnabla.Pow2QuantizeParameter.prototype.m = protobuf.Int64.create(0); +$root.nnabla.Pow2QuantizeParameter.prototype.ste_fine_grained = false; + +$root.nnabla.PruneParameter = class PruneParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PruneParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rate = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PruneParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "rate": + message.rate = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PruneParameter.prototype.rate = 0; + +$root.nnabla.QuantizeLinearParameter = class QuantizeLinearParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.QuantizeLinearParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.round_mode = reader.string(); + break; + case 2: + message.narrow_range = reader.bool(); + break; + case 3: + message.dtype = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.QuantizeLinearParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "round_mode": + message.round_mode = reader.string(); + break; + case "narrow_range": + message.narrow_range = reader.bool(); + break; + case "dtype": + message.dtype = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.QuantizeLinearParameter.prototype.round_mode = ""; +$root.nnabla.QuantizeLinearParameter.prototype.narrow_range = false; +$root.nnabla.QuantizeLinearParameter.prototype.dtype = protobuf.Int64.create(0); + +$root.nnabla.TopNErrorParameter = class TopNErrorParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.TopNErrorParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.n = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.TopNErrorParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + case "n": + message.n = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.TopNErrorParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.TopNErrorParameter.prototype.n = protobuf.Int64.create(0); + +$root.nnabla.ConfusionMatrixParameter = class ConfusionMatrixParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ConfusionMatrixParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ConfusionMatrixParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ConfusionMatrixParameter.prototype.axis = protobuf.Int64.create(0); + +$root.nnabla.VATNoiseParameter = class VATNoiseParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.VATNoiseParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base_axis = reader.int64(); + break; + case 2: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.VATNoiseParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "base_axis": + message.base_axis = reader.int64(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.VATNoiseParameter.prototype.base_axis = protobuf.Int64.create(0); +$root.nnabla.VATNoiseParameter.prototype.eps = 0; + +$root.nnabla.SinkParameter = class SinkParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.SinkParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.one_input_grad = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.SinkParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "one_input_grad": + message.one_input_grad = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.SinkParameter.prototype.one_input_grad = false; + +$root.nnabla.NmsDetection2dParameter = class NmsDetection2dParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.NmsDetection2dParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.thresh = reader.float(); + break; + case 2: + message.nms = reader.float(); + break; + case 3: + message.nms_per_class = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.NmsDetection2dParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "thresh": + message.thresh = reader.float(); + break; + case "nms": + message.nms = reader.float(); + break; + case "nms_per_class": + message.nms_per_class = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.NmsDetection2dParameter.prototype.thresh = 0; +$root.nnabla.NmsDetection2dParameter.prototype.nms = 0; +$root.nnabla.NmsDetection2dParameter.prototype.nms_per_class = false; + +$root.nnabla.ONNXNonMaxSuppressionParameter = class ONNXNonMaxSuppressionParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.ONNXNonMaxSuppressionParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.center_point_box = reader.int64(); + break; + case 2: + message.max_output_boxes_per_class = reader.int64(); + break; + case 3: + message.iou_threshold = reader.float(); + break; + case 4: + message.score_threshold = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.ONNXNonMaxSuppressionParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "center_point_box": + message.center_point_box = reader.int64(); + break; + case "max_output_boxes_per_class": + message.max_output_boxes_per_class = reader.int64(); + break; + case "iou_threshold": + message.iou_threshold = reader.float(); + break; + case "score_threshold": + message.score_threshold = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.ONNXNonMaxSuppressionParameter.prototype.center_point_box = protobuf.Int64.create(0); +$root.nnabla.ONNXNonMaxSuppressionParameter.prototype.max_output_boxes_per_class = protobuf.Int64.create(0); +$root.nnabla.ONNXNonMaxSuppressionParameter.prototype.iou_threshold = 0; +$root.nnabla.ONNXNonMaxSuppressionParameter.prototype.score_threshold = 0; + +$root.nnabla.MaxPoolingBackwardParameter = class MaxPoolingBackwardParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.MaxPoolingBackwardParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.stride = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.ignore_border = reader.bool(); + break; + case 4: + message.pad = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.channel_last = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.MaxPoolingBackwardParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "kernel": + message.kernel = $root.nnabla.Shape.decodeText(reader); + break; + case "stride": + message.stride = $root.nnabla.Shape.decodeText(reader); + break; + case "ignore_border": + message.ignore_border = reader.bool(); + break; + case "pad": + message.pad = $root.nnabla.Shape.decodeText(reader); + break; + case "channel_last": + message.channel_last = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.MaxPoolingBackwardParameter.prototype.kernel = null; +$root.nnabla.MaxPoolingBackwardParameter.prototype.stride = null; +$root.nnabla.MaxPoolingBackwardParameter.prototype.ignore_border = false; +$root.nnabla.MaxPoolingBackwardParameter.prototype.pad = null; +$root.nnabla.MaxPoolingBackwardParameter.prototype.channel_last = false; + +$root.nnabla.PatchCorrelationParameter = class PatchCorrelationParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.PatchCorrelationParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.patch = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 2: + message.shift = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 3: + message.patch_step = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 4: + message.shift_step = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + case 5: + message.padding = $root.nnabla.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.PatchCorrelationParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "patch": + message.patch = $root.nnabla.Shape.decodeText(reader); + break; + case "shift": + message.shift = $root.nnabla.Shape.decodeText(reader); + break; + case "patch_step": + message.patch_step = $root.nnabla.Shape.decodeText(reader); + break; + case "shift_step": + message.shift_step = $root.nnabla.Shape.decodeText(reader); + break; + case "padding": + message.padding = $root.nnabla.Shape.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.PatchCorrelationParameter.prototype.patch = null; +$root.nnabla.PatchCorrelationParameter.prototype.shift = null; +$root.nnabla.PatchCorrelationParameter.prototype.patch_step = null; +$root.nnabla.PatchCorrelationParameter.prototype.shift_step = null; +$root.nnabla.PatchCorrelationParameter.prototype.padding = null; + +$root.nnabla.UniqueParameter = class UniqueParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.UniqueParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.flatten = reader.bool(); + break; + case 2: + message.axis = reader.int64(); + break; + case 3: + message.sorted = reader.bool(); + break; + case 4: + message.with_index = reader.bool(); + break; + case 5: + message.with_inverse = reader.bool(); + break; + case 6: + message.with_counts = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.UniqueParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "flatten": + message.flatten = reader.bool(); + break; + case "axis": + message.axis = reader.int64(); + break; + case "sorted": + message.sorted = reader.bool(); + break; + case "with_index": + message.with_index = reader.bool(); + break; + case "with_inverse": + message.with_inverse = reader.bool(); + break; + case "with_counts": + message.with_counts = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.UniqueParameter.prototype.flatten = false; +$root.nnabla.UniqueParameter.prototype.axis = protobuf.Int64.create(0); +$root.nnabla.UniqueParameter.prototype.sorted = false; +$root.nnabla.UniqueParameter.prototype.with_index = false; +$root.nnabla.UniqueParameter.prototype.with_inverse = false; +$root.nnabla.UniqueParameter.prototype.with_counts = false; + +$root.nnabla.EyeLikeParameter = class EyeLikeParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.EyeLikeParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.EyeLikeParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "k": + message.k = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.EyeLikeParameter.prototype.k = protobuf.Int64.create(0); + +$root.nnabla.Mod2Parameter = class Mod2Parameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.Mod2Parameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fmod = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.Mod2Parameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "fmod": + message.fmod = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.Mod2Parameter.prototype.fmod = false; + +$root.nnabla.BitShiftParameter = class BitShiftParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.BitShiftParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.direction = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.BitShiftParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "direction": + message.direction = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.BitShiftParameter.prototype.direction = ""; + +$root.nnabla.EinsumParameter = class EinsumParameter { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.nnabla.EinsumParameter(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.equation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.nnabla.EinsumParameter(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "equation": + message.equation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.nnabla.EinsumParameter.prototype.equation = ""; diff --git a/nnabla.js b/nnabla.js new file mode 100644 index 00000000000..ebbd26921a6 --- /dev/null +++ b/nnabla.js @@ -0,0 +1,302 @@ + +import * as protobuf from './protobuf.js'; +import * as text from './text.js'; + +const nnabla = {}; + +nnabla.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + if (identifier.endsWith('.nntxt')) { + const tags = context.tags('pbtxt'); + if (tags.has('network')) { + return 'nnabla.pbtxt'; + } + } + return undefined; + } + + async open(context, target) { + await context.require('./nnabla-proto'); + nnabla.proto = protobuf.get('nnabla').nnabla; + switch (target) { + case 'nnabla.pbtxt': { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + const model = nnabla.proto.NNablaProtoBuf.decodeText(reader); + const open = async (model, version) => { + const metadata = await context.metadata('nnabla-metadata.json'); + return new nnabla.Model(metadata, model, `NNabla${version ? ` v${version}` : ''}`); + }; + try { + const contexts = await Promise.all([ + context.fetch('nnp_version.txt'), + context.fetch('parameter.protobuf') + ]); + const version = text.Reader.open(contexts[0].stream).read(); + const reader = protobuf.BinaryReader.open(contexts[1].stream); + const params = nnabla.proto.NNablaProtoBuf.decode(reader); + model.parameter = params.parameter; + return await open(model, version); + } catch (error) { + return await open(model); + } + } + default: { + throw new nnabla.Error(`Unsupported nnabla format '${target}'.`); + } + } + } +}; + +nnabla.Model = class { + + constructor(metadata, model, format) { + this.format = format; + this.graphs = []; + const tensors = new Map(model.parameter.map((parameter) => { + const name = parameter.variable_name; + const shape = new nnabla.TensorShape(parameter.shape.dim); + const type = new nnabla.TensorType(shape); + return [ name, new nnabla.Tensor(name, type, parameter.data) ]; + })); + const networks = new Map(model.network.map((network) => [ network.name, network ])); + for (const executor of model.executor) { + const network = networks.get(executor.network_name); + const graph = new nnabla.Graph(metadata, network, executor.data_variable, executor.output_variable, tensors); + this.graphs.push(graph); + } + for (const optimizer of model.optimizer) { + const network = networks.get(optimizer.network_name); + const graph = new nnabla.Graph(metadata, network, optimizer.data_variable, optimizer.loss_variable, tensors); + this.graphs.push(graph); + } + for (const monitor of model.monitor) { + const network = networks.get(monitor.network_name); + const graph = new nnabla.Graph(metadata, network, monitor.data_variable, monitor.monitor_variable, tensors); + this.graphs.push(graph); + } + } +}; + +nnabla.Graph = class { + + constructor (metadata, network, inputs, outputs, tensors) { + this.name = network.name; + const values = new Map(network.variable.map((variable) => { + const name = variable.name; + const shape = new nnabla.TensorShape(variable.shape.dim); + const type = new nnabla.TensorType(shape); + return [ name, new nnabla.Value(name, type, tensors.get(name)) ]; + })); + values.map = (name) => { + if (!values.has(name)) { + values.set(name, new nnabla.Value(name, null, tensors.get(name))); + } + return values.get(name); + }; + this.inputs = inputs.map((item) => { + const name = item.variable_name; + return new nnabla.Argument(name, [ values.map(name) ]); + }); + this.outputs = outputs.map((output) => { + const name = output.variable_name; + return new nnabla.Argument(name, [ values.map(name) ]); + }); + const get_parameters = (func) => { + for (const [key, value] of Object.entries(func)) { + if (key.endsWith("_param")) { + return value; + } + } + return undefined; + }; + this.nodes = network.function.map((func) => { + const parameters = get_parameters(func) || []; + const attributes = Object.entries(parameters).map(([name, value]) => { + return new nnabla.Attribute(metadata, func.type, name, value); + }); + const func_type = metadata.type(func.type); + const inputs = []; + for (let index = 0; index < func.input.length;) { + const input = func_type.inputs && index < func_type.inputs.length ? func_type.inputs[index] : { name: index.toString() }; + const count = input.list ? func.input.length - index : 1; + const args = func.input.slice(index, index + count).map((input) => values.map(input)); + const argument = new nnabla.Argument(input.name, args); + inputs.push(argument); + index += count; + } + const outputs = []; + for (let index = 0; index < func.output.length;) { + const output = func_type.outputs && index < func_type.outputs.length ? func_type.outputs[index] : { name: index.toString() }; + const count = output.list ? func.output.length - index : 1; + const args = func.output.slice(index, index + count).map((output) => values.map(output)); + const argument = new nnabla.Argument(output.name, args); + outputs.push(argument); + index += count; + } + return new nnabla.Node(metadata, func, attributes, inputs, outputs); + }); + } +}; + +nnabla.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +nnabla.Value = class { + + constructor(name, type, initializer) { + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._type) { + return this._type; + } + if (this._initializer) { + return this._initializer.type; + } + return null; + } + + get initializer() { + return this._initializer; + } +}; + +nnabla.Node = class { + + constructor(metadata, func, attributes, inputs, outputs) { + this.name = func.name; + this.type = metadata.type(func.type) || { name: func.type, type: func.type }; + this.attributes = attributes || []; + this.outputs = outputs || []; + this.chain = []; + // TODO: "nonlinearity" does not match metadata type + const get_nonlinearity = (name) => { + switch (name) { + case "identity": return "Identity"; + case "relu": return "ReLU"; + case "sigmoid": return "Sigmoid"; + case "tanh": return "Tanh"; + case "leaky_relu": return "LeakyReLU"; + case "elu": return "ELU"; + case "relu6": return "ReLU6"; + default: return name; + } + }; + switch (func.type) { + case "FusedConvolution": { + this.inputs = inputs.slice(0, 3) || []; + if (inputs.length > 3) { + this.chain.push(new nnabla.Node(metadata, { name: `${func.name}/bn`, type: "BatchNormalization" }, [], inputs.slice(3, 7))); + } + if (inputs.length > 7) { + this.chain.push(new nnabla.Node(metadata, { name: `${func.name}/add`, type: "Add2" }, [], inputs.slice(7))); + } + const type_a = attributes.find((item) => item.name === "nonlinearity").value; + this.chain.push(new nnabla.Node(metadata, { name: `${func.name}/act`, type: get_nonlinearity(type_a) })); + break; + } + case "FusedBatchNormalization": { + this.inputs = inputs.slice(0, 5) || []; + if (inputs.length > 4) { + this.chain.push(new nnabla.Node(metadata, { name: `${func.name}/add`, type: "Add2" }, [], inputs.slice(5))); + } + const type_b = attributes.find((item) => item.name === "nonlinearity").value; + this.chain.push(new nnabla.Node(metadata, { name: `${func.name}/act`, type: get_nonlinearity(type_b) })); + break; + } + default: { + this.inputs = inputs || []; + break; + } + } + } +}; + +nnabla.Attribute = class { + + constructor(metadata, type, name, value) { + this.name = name; + const attribute = metadata.attribute(type, name); + this.description = attribute.description; + switch (attribute.type) { + case "shape": + this.type = "int64[]"; + this.value = value.dim; + break; + default: + this.type = attribute.type; + this.value = value; + break; + } + if (Object.prototype.hasOwnProperty.call(attribute, 'default') && this.value == attribute.default) { + this.visible = false; + } + } +}; + +nnabla.Tensor = class { + + constructor(name, type, values) { + this.name = name; + this.type = type; + this.encoding = '|'; + this._values = values; + } + + get values() { + const dataType = this.type.dataType; + switch (dataType) { + case 'float32': return new Float32Array(this._values); + default: throw new nnabla.Error(`Unsupported data type '${dataType}'.`); + } + } +}; + +nnabla.TensorType = class { + + constructor(shape) { + this.dataType = "float32"; + this.shape = shape; + this.denotation = null; // TODO + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +nnabla.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return (this.dimensions && this.dimensions.length) ? (`[${this.dimensions.join(',')}]`) : ''; + } +}; + +nnabla.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Neural Network Library model.'; + } +}; + +export const ModelFactory = nnabla.ModelFactory; diff --git a/nnc.js b/nnc.js new file mode 100644 index 00000000000..457257a44fa --- /dev/null +++ b/nnc.js @@ -0,0 +1,31 @@ + +const nnc = {}; + +nnc.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0xC0, 0x0F, 0x00, 0x00, 0x45, 0x4E, 0x4E, 0x43 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'nnc'; + } + return ''; + } + + async open(/* context, target */) { + throw new nnc.Error('File contains undocumented NNC data.'); + } +}; + +nnc.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading NNC model.'; + } +}; + +export const ModelFactory = nnc.ModelFactory; + + + diff --git a/nnef.js b/nnef.js new file mode 100644 index 00000000000..71ad697cf74 --- /dev/null +++ b/nnef.js @@ -0,0 +1,83 @@ + +import * as text from './text.js'; + +const nnef = {}; + +nnef.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'nnef') { + const stream = context.stream; + if (nnef.TextReader.open(stream)) { + return 'nnef.graph'; + } + } + if (extension === 'dat') { + const stream = context.stream; + if (stream && stream.length > 2) { + const buffer = stream.peek(2); + if (buffer[0] === 0x4E && buffer[1] === 0xEF) { + return 'nnef.dat'; + } + } + } + return null; + } + + async open(context, target) { + switch (target) { + case 'nnef.graph': { + const stream = context.stream; + const reader = nnef.TextReader.open(stream); + throw new nnef.Error(`NNEF v${reader.version} support not implemented.`); + } + case 'nnef.dat': { + throw new nnef.Error('NNEF dat format support not implemented.'); + } + default: { + throw new nnef.Error(`Unsupported NNEF format '${target}'.`); + } + } + } +}; + +nnef.TextReader = class { + + static open(stream) { + const reader = text.Reader.open(stream); + for (let i = 0; i < 32; i++) { + const line = reader.read(); + const match = /version\s*(\d+\.\d+);/.exec(line); + if (match) { + return new nnef.TextReader(stream, match[1]); + } + if (line === undefined) { + break; + } + + + } + return null; + } + + constructor(stream, version) { + this._stream = stream; + this._version = version; + } + + get version() { + return this._version; + } +}; + +nnef.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading NNEF model.'; + } +}; + +export const ModelFactory = nnef.ModelFactory; diff --git a/numpy.js b/numpy.js new file mode 100644 index 00000000000..3cf61118fec --- /dev/null +++ b/numpy.js @@ -0,0 +1,357 @@ + +// Experimental + +import * as python from './python.js'; + +const numpy = {}; + +numpy.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0x93, 0x4E, 0x55, 0x4D, 0x50, 0x59 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return { name: 'npy' }; + } + const entries = context.peek('npz'); + if (entries && entries.size > 0) { + return { name: 'npz', value: entries }; + } + const obj = context.peek('pkl'); + if (obj) { + if (numpy.Utility.isTensor(obj)) { + return { name: 'numpy.ndarray', value: obj }; + } + if (Array.isArray(obj) && obj.length > 0 && obj.every((obj) => obj && obj.__class__ && obj.__class__.__name__ === 'Network' && (obj.__class__.__module__ === 'dnnlib.tflib.network' || obj.__class__.__module__ === 'tfutil'))) { + return { name: 'dnnlib.tflib.network', value: obj }; + } + const weights = numpy.Utility.weights(obj); + if (weights && weights.size > 0) { + return { name: 'pickle', value: weights }; + } + } + return undefined; + } + + async open(context, target) { + let format = ''; + const graphs = []; + switch (target.name) { + case 'npy': { + format = 'NumPy Array'; + const execution = new python.Execution(); + const stream = context.stream; + const buffer = stream.peek(); + const bytes = execution.invoke('io.BytesIO', [ buffer ]); + const array = execution.invoke('numpy.load', [ bytes ]); + const layer = { type: 'numpy.ndarray', parameters: [ { name: 'value', tensor: { name: '', array: array } } ] }; + graphs.push({ layers: [ layer ] }); + break; + } + case 'npz': { + format = 'NumPy Zip'; + const layers = new Map(); + for (const [key, array] of target.value) { + const name = key.replace(/\.npy$/, ''); + const parts = name.split('/'); + const parameterName = parts.pop(); + const groupName = parts.join('/'); + if (!layers.has(groupName)) { + layers.set(groupName, { name: groupName, parameters: [] }); + } + const layer = layers.get(groupName); + layer.parameters.push({ + name: parameterName, + tensor: { name: name, array: array } + }); + } + graphs.push({ layers: Array.from(layers.values()) }); + break; + } + case 'pickle': { + format = 'NumPy Weights'; + const layers = new Map(); + const layer = (name) => { + if (!layers.has(name)) { + layers.set(name, { name: name, parameters: [] }); + } + return layers.get(name); + }; + const weights = target.value; + let separator = undefined; + if (Array.from(weights.keys()).every((key) => key.indexOf('.') !== -1)) { + separator = '.'; + } + if (Array.from(weights.keys()).every((key) => key.indexOf('_') > key.indexOf('.'))) { + separator = '_'; + } + for (const [name, value] of weights) { + if (name.endsWith('.__class__')) { + layer(name.substring(0, name.length - 10)).type = value; + continue; + } + const parts = separator ? name.split(separator) : null; + const parameterName = separator ? parts.pop() : name; + const layerName = separator ? parts.join(separator) : ''; + if (!layers.has(layerName)) { + layers.set(layerName, { name: layerName, parameters: [] }); + } + layer(layerName).parameters.push({ + name: parameterName, + tensor: { name: name, array: value } + }); + } + graphs.push({ layers: Array.from(layers.values()) }); + break; + } + case 'numpy.ndarray': { + format = 'NumPy NDArray'; + const layer = { + type: 'numpy.ndarray', + parameters: [ { name: 'value', tensor: { name: '', array: target.value } } ] + }; + graphs.push({ layers: [ layer ] }); + break; + } + case 'dnnlib.tflib.network': { + format = 'dnnlib'; + for (const obj of target.value) { + const layers = new Map(); + for (const [name, value] of obj.variables) { + if (numpy.Utility.isTensor(value)) { + const parts = name.split('/'); + const parameterName = parts.length > 1 ? parts.pop() : '?'; + const layerName = parts.join('/'); + if (!layers.has(layerName)) { + layers.set(layerName, { name: layerName, parameters: [] }); + } + const layer = layers.get(layerName); + layer.parameters.push({ + name: parameterName, + tensor: { name: name, array: value } + }); + } + } + graphs.push({ name: obj.name, layers: Array.from(layers.values()) }); + } + break; + } + default: { + throw new numpy.Error(`Unsupported NumPy format '${target.name}'.`); + } + } + return new numpy.Model(format, graphs); + } +}; + +numpy.Model = class { + + constructor(format, graphs) { + this.format = format; + this.graphs = graphs.map((graph) => new numpy.Graph(graph)); + } +}; + +numpy.Graph = class { + + constructor(graph) { + this.name = graph.name || ''; + this.nodes = graph.layers.map((layer) => new numpy.Node(layer)); + this.inputs = []; + this.outputs = []; + } +}; + +numpy.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +numpy.Value = class { + + constructor(name, initializer) { + if (typeof name !== 'string') { + throw new numpy.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer.type; + this.initializer = initializer || null; + } +}; + +numpy.Node = class { + + constructor(layer) { + this._name = layer.name || ''; + this._type = { name: layer.type || 'Object' }; + this._inputs = []; + for (const parameter of layer.parameters) { + const initializer = new numpy.Tensor(parameter.tensor.array); + const value = new numpy.Value(parameter.tensor.name || '', initializer); + const argument = new numpy.Argument(parameter.name, [ value ]); + this._inputs.push(argument); + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return []; + } + + get attributes() { + return []; + } +}; + +numpy.Tensor = class { + + constructor(array) { + this.type = new numpy.TensorType(array.dtype.__name__, new numpy.TensorShape(array.shape)); + this.stride = array.strides.map((stride) => stride / array.itemsize); + this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.flatten().tolist() : array.tobytes(); + this.encoding = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + } +}; + +numpy.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +numpy.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions && this.dimensions.length > 0 ? `[${this.dimensions.join(',')}]` : ''; + } +}; + +numpy.Utility = class { + + static isTensor(obj) { + return obj && obj.__class__ && + ((obj.__class__.__module__ === 'numpy' && obj.__class__.__name__ === 'ndarray') || + (obj.__class__.__module__ === 'numpy.core.memmap' && obj.__class__.__name__ === 'memmap')); + } + + static weights(obj) { + const dict = (obj, key) => { + const dict = key === '' ? obj : obj[key]; + if (dict) { + const weights = new Map(); + if (dict instanceof Map) { + for (const [key, obj] of dict) { + if (numpy.Utility.isTensor(obj)) { + weights.set(key, obj); + continue; + } else if (obj instanceof Map && Array.from(obj).every(([, value]) => numpy.Utility.isTensor(value))) { + for (const [name, value] of obj) { + weights.set(`${key}.${name}`, value); + } + continue; + } else if (key === '_metadata') { + continue; + } + return null; + } + return weights; + } else if (!Array.isArray(dict)) { + const set = new Set([ 'weight_order', 'lr', 'model_iter', '__class__' ]); + for (const [name, value] of Object.entries(dict)) { + if (numpy.Utility.isTensor(value)) { + weights.set(name, value); + continue; + } + if (set.has(name)) { + continue; + } + if (value && !Array.isArray(value) && Object.entries(value).every(([, value]) => numpy.Utility.isTensor(value))) { + if (value && value.__class__ && value.__class__.__module__ && value.__class__.__name__) { + weights.set(`${name}.__class__`, `${value.__class__.__module__}.${value.__class__.__name__}`); + } + for (const [name, obj] of Object.entries(value)) { + weights.set(`${name}.${name}`, obj); + } + continue; + } + return null; + } + return weights; + } + } + return null; + }; + const list = (obj, key) => { + let list = key === '' ? obj : obj[key]; + if (list && Array.isArray(list) && list.every((obj) => Object.values(obj).every((value) => numpy.Utility.isTensor(value)))) { + list = list.map((obj) => obj instanceof Map ? obj : new Map(Object.entries(obj))); + } + if (list && Array.isArray(list)) { + const weights = new Map(); + for (let i = 0; i < list.length; i++) { + const obj = list[i]; + if (numpy.Utility.isTensor(obj)) { + weights.set(i.toString(), obj); + continue; + } else if (obj instanceof Map && Array.from(obj).every(([, value]) => numpy.Utility.isTensor(value))) { + for (const [name, value] of obj) { + weights.set(`${i}.${name}`, value); + } + continue; + } + return null; + } + return weights; + } + return null; + }; + const keys = [ '', 'blobs', 'model', 'experiment_state' ]; + for (const key of keys) { + const weights = dict(obj, key); + if (weights && weights.size > 0) { + return weights; + } + } + for (const key of keys) { + const weights = list(obj, key); + if (weights) { + return weights; + } + } + return null; + } +}; + +numpy.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Chainer model.'; + } +}; + +export const ModelFactory = numpy.ModelFactory; diff --git a/om-metadata.json b/om-metadata.json new file mode 100644 index 00000000000..9533e86a1a0 --- /dev/null +++ b/om-metadata.json @@ -0,0 +1,3043 @@ +[ + { + "name": "Acos", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Acosh", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Activation", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode", "type": "Enum", "enum": [ "Sigmoid", "ReLU", "Tanh", "Clipped ReLU", "ELU", "PReLU", "Abs", "Relu1", "Softsign", "Softplus", "Hardsigmoid", "Threshold ReLU", "Selu", "Linear", "Relu6", "GeLU" ] }, + { "name": "coef" }, + { "name": "negative_slope" } + ] + }, + { + "name": "ReLU", + "category": "Activation" + }, + { + "name": "Relu", + "category": "Activation" + }, + { + "name": "Add", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ArgMax", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" }, + { "name": "axis" }, + { "name": "output_type" }, + { "name": "outmaxval" }, + { "name": "topk" } + ] + }, + { + "name": "ArgMaxExt2", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "output_type" }, + { "name": "keep_dims" }, + { "name": "outmaxval" }, + { "name": "topk" } + ] + }, + { + "name": "ArgMin", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Asin", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Asinh", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Atan", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Atanh", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "AxisAlignedBboxTransform", + "inputs": [ + { "name": "roi" }, + { "name": "bbox_deltas" }, + { "name": "batch_split" }, + { "name": "im_info" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "BatchMatMul", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "adj_x1" }, + { "name": "adj_x2" } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "inputs": [ + { "name": "x" }, + { "name": "scale" }, + { "name": "b" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "momentum" }, + { "name": "epsilon" }, + { "name": "mode" }, + { "name": "use_global_stats" } + ] + }, + { + "name": "BatchNormExt2", + "inputs": [ + { "name": "x" }, + { "name": "scale" }, + { "name": "offset" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "momentum" }, + { "name": "epsilon" }, + { "name": "mode" }, + { "name": "use_global_stats" } + ] + }, + { + "name": "BatchReindex", + "inputs": [ + { "name": "x" }, + { "name": "reindex" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "BatchToSpaceND", + "inputs": [ + { "name": "x" }, + { "name": "block_shape" }, + { "name": "crops" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Bias", + "inputs": [ + { "name": "x" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "BiasAdd", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "data_format" } + ] + }, + { + "name": "BidirectionLSTM", + "inputs": [ + { "name": "x" }, + { "name": "seq_len" }, + { "name": "w_fw" }, + { "name": "w_bw" }, + { "name": "c_0_fw" }, + { "name": "h_0_fw" }, + { "name": "c_0_bw" }, + { "name": "h_0_bw" } + ], + "outputs": [ + { "name": "y_fw" }, + { "name": "y_bw" }, + { "name": "h_t_fw" }, + { "name": "c_t_fw" }, + { "name": "h_t_bw" }, + { "name": "c_t_bw" } + ], + "attributes": [ + { "name": "forget_bias" }, + { "name": "num_layers" }, + { "name": "activation" }, + { "name": "cell_type" }, + { "name": "state_is_tuple" } + ] + }, + { + "name": "BNInference", + "inputs": [ + { "name": "x" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "scale" }, + { "name": "offset" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "momentum" }, + { "name": "epsilon" }, + { "name": "mode" }, + { "name": "use_global_stats" } + ] + }, + { + "name": "BNLL", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "BroadcastTo", + "inputs": [ + { "name": "x" }, + { "name": "shape" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Cast", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "SrcT" }, + { "name": "DstT" } + ] + }, + { + "name": "CastT", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "src_dtype" }, + { "name": "dst_dtype" } + ] + }, + { + "name": "Ceil", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ChannelAxpy", + "inputs": [ + { "name": "a" }, + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + }, + { + "name": "ChannelShuffle", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "num_group" } + ] + }, + { + "name": "Clip", + "inputs": [ + { "name": "x" }, + { "name": "clip_value_min" }, + { "name": "clip_value_max" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Clipboxes", + "inputs": [ + { "name": "x" }, + { "name": "im_info" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ClipByValue", + "inputs": [ + { "name": "x" }, + { "name": "clip_value_min" }, + { "name": "clip_value_max" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "inputs": [ + { "name": "x", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ConcatD", + "category": "Tensor", + "inputs": [ + { "name": "x", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ConcatV2D", + "category": "Tensor", + "inputs": [ + { "name": "x", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Const", + "category": "Constant", + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "value" } + ] + }, + { + "name": "Conv2D", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DepthwiseConv2D", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "filter" }, + { "name": "bias" }, + { "name": "offset_w" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "strides" }, + { "name": "dilations" }, + { "name": "pads" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "groups" }, + { "name": "data_format" }, + { "name": "offset_x" }, + { "name": "mode", "type": "Enum", "enum": [ "Convolution", "Cross Correlation", "Deconvolution", "Depthwise" ] }, + { "name": "algo", "type": "Enum", "enum": [ "GEMM", "Winograd", "GEMM_ACCU_Float32" ] } + ] + }, + { + "name": "ConvolutionDepthwise", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "filter" }, + { "name": "bias" }, + { "name": "offset_w" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "strides" }, + { "name": "dilations" }, + { "name": "pads" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "data_format" }, + { "name": "offset_x" } + ] + }, + { + "name": "ConvTranspose", + "category": "Layer", + "inputs": [ + { "name": "output_shape" }, + { "name": "filter" }, + { "name": "x" }, + { "name": "bias" }, + { "name": "offset_w" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "strides" }, + { "name": "pads" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "dilations" }, + { "name": "groups" }, + { "name": "data_format" }, + { "name": "offset_x" } + ] + }, + { + "name": "Copy", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Cos", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Cosh", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Crop", + "inputs": [ + { "name": "x" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "offsets" } + ] + }, + { + "name": "CropAndResize", + "inputs": [ + { "name": "x" }, + { "name": "boxes" }, + { "name": "box_index" }, + { "name": "crop_size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "extrapolation_value" }, + { "name": "method" } + ] + }, + { + "name": "Cumprod", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "exclusive" }, + { "name": "reverse" } + ] + }, + { + "name": "Cumsum", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "exclusive" }, + { "name": "reverse" } + ] + }, + { + "name": "Data", + "category": "Data", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "index" } + ] + }, + { + "name": "DecodeBBox", + "inputs": [ + { "name": "box_predictions" }, + { "name": "anchors" } + ], + "outputs": [ + { "name": "decoded_boxes" } + ], + "attributes": [ + { "name": "decode_clip" } + ] + }, + { + "name": "Deconvolution", + "category": "Layer", + "inputs": [ + { "name": "input_sizes" }, + { "name": "filter" }, + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "group" }, + { "name": "num_output" }, + { "name": "pad" }, + { "name": "stride" }, + { "name": "dilation" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "Padding" }, + { "name": "bias_term" }, + { "name": "kernel" } + ] + }, + { + "name": "DepthToSpace", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "block_size" }, + { "name": "mode" }, + { "name": "data_format" } + ] + }, + { + "name": "Dequantize", + "category": "Tensor", + "inputs": [ + { "name": "x" }, + { "name": "min_range" }, + { "name": "max_range" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode" } + ] + }, + { + "name": "DetectionPostprocessing", + "inputs": [ + { "name": "score" }, + { "name": "bbox_delta" }, + { "name": "anchors" } + ], + "outputs": [ + { "name": "detect_scores" }, + { "name": "rois" }, + { "name": "detect_class" }, + { "name": "actual_rois_num" } + ] + }, + { + "name": "DynamicImageData", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "max_src_image_size" }, + { "name": "image_type" } + ] + }, + { + "name": "Eltwise", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "N" }, + { "name": "mode", "type": "Enum", "enum": [ "Product", "Sum", "Max" ] }, + { "name": "coeff" } + ] + }, + { + "name": "Elu", + "category": "Activation" + }, + { + "name": "Equal", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Erf", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Exp", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "base" }, + { "name": "scale" }, + { "name": "shift" } + ] + }, + { + "name": "ExpandDims", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Expm1", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ExtractImagePatches", + "inputs": [ + { "name": "images" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "ksizes" }, + { "name": "strides" }, + { "name": "rates" }, + { "name": "padding" } + ] + }, + { + "name": "FakeQuantWithMinMaxVars", + "inputs": [ + { "name": "x" }, + { "name": "min" }, + { "name": "max" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_bits" }, + { "name": "narrow_range" } + ] + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannel", + "inputs": [ + { "name": "x" }, + { "name": "min" }, + { "name": "max" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_bits" }, + { "name": "narrow_range" } + ] + }, + { + "name": "Fc", + "category": "Layer", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Fill", + "inputs": [ + { "name": "dims" }, + { "name": "value" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Flatten", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "FlattenV2", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "end_axis" } + ] + }, + { + "name": "Floor", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "FloorDiv", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "FloorMod", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "FractionalPooling", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" }, + { "name": "row_pooling_sequence" }, + { "name": "col_pooling_sequence" } + ], + "attributes": [ + { "name": "mode" }, + { "name": "pooling_ratio" }, + { "name": "pseudo_random" }, + { "name": "overlapping" }, + { "name": "deterministic" }, + { "name": "seed" }, + { "name": "seed2" } + ] + }, + { + "name": "FSRDetectionOutput", + "inputs": [ + { "name": "rois" }, + { "name": "bbox_delta" }, + { "name": "score" }, + { "name": "im_info" }, + { "name": "actual_rois_num" } + ], + "outputs": [ + { "name": "actual_bbox_num" }, + { "name": "box" } + ], + "attributes": [ + { "name": "num_classes" }, + { "name": "score_threshold" }, + { "name": "iou_threshold" }, + { "name": "batch_rois" } + ] + }, + { + "name": "FullConnection", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "w" }, + { "name": "b" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_output" } + ] + }, + { + "name": "FullyConnection", + "category": "Layer", + "inputs": [ + { "name": "X" }, + { "name": "W" }, + { "name": "B" }, + { "name": "offset_w" } + ], + "outputs": [ + { "name": "Y" } + ] + }, + { + "name": "Gather", + "inputs": [ + { "name": "params" }, + { "name": "indices" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "GatherNd", + "inputs": [ + { "name": "x" }, + { "name": "indices" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "GatherV2D", + "inputs": [ + { "name": "x" }, + { "name": "indices" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "GemmD", + "inputs": [ + { "name": "a" }, + { "name": "b" }, + { "name": "c" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "alpha" }, + { "name": "beta" }, + { "name": "transpose_a" }, + { "name": "transpose_b" } + ] + }, + { + "name": "GraphOp", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Greater", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "GreaterEqual", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "HardSwish", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "HeatmapMaxKeypoint", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y1" }, + { "name": "y2" } + ] + }, + { + "name": "ImageChannelSwap", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "rbuv_swap_switch" }, + { "name": "ax_swap_switch" } + ] + }, + { + "name": "ImageColorSpaceConvertion", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "target_format" } + ] + }, + { + "name": "ImageCrop", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "load_start_pos_h" }, + { "name": "load_start_pos_w" }, + { "name": "crop_size_h" }, + { "name": "crop_size_w" } + ] + }, + { + "name": "ImageData", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "input_format" }, + { "name": "src_image_size_w" }, + { "name": "src_image_size_h" }, + { "name": "image_type" } + ] + }, + { + "name": "ImageDataTypeConversion", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mean_chn_0" }, + { "name": "mean_chn_1" }, + { "name": "mean_chn_2" }, + { "name": "mean_chn_3" }, + { "name": "min_chn_0" }, + { "name": "min_chn_1" }, + { "name": "min_chn_2" }, + { "name": "min_chn_3" }, + { "name": "var_reci_chn_0" }, + { "name": "var_reci_chn_1" }, + { "name": "var_reci_chn_2" }, + { "name": "var_reci_chn_3" } + ] + }, + { + "name": "ImagePadding", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "left_padding_size" }, + { "name": "right_padding_size" }, + { "name": "top_padding_size" }, + { "name": "bottom_padding_size" } + ] + }, + { + "name": "ImageResize", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "resize_output_h" }, + { "name": "resize_output_w" } + ] + }, + { + "name": "ImageRotation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "rotation_angle" } + ] + }, + { + "name": "InstanceNorm", + "category": "Normalization", + "inputs": [ + { "name": "x" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "y" }, + { "name": "saved_mean" }, + { "name": "saved_inv_stdev" } + ], + "attributes": [ + { "name": "data_format" }, + { "name": "epsilon" } + ] + }, + { + "name": "Interp", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "InvertPermutation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "L2Normalize", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "eps" } + ] + }, + { + "name": "LayerNorm", + "category": "Normalization", + "inputs": [ + { "name": "x" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "begin_norm_axis" }, + { "name": "begin_params_axis" }, + { "name": "epsilon" } + ] + }, + { + "name": "Less", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LessEqual", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Log", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Log1p", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogicalAnd", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogicalNot", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogicalOr", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogicalXor", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "LogSoftmax", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "LRN", + "category": "Normalization", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "depth_radius" }, + { "name": "bias" }, + { "name": "alpha" }, + { "name": "beta" }, + { "name": "norm_region" } + ] + }, + { + "name": "LSTM", + "inputs": [ + { "name": "x" }, + { "name": "cont" }, + { "name": "w_x" }, + { "name": "bias" }, + { "name": "w_h" }, + { "name": "x_static" }, + { "name": "h_0" }, + { "name": "c_0" }, + { "name": "w_x_static" } + ], + "outputs": [ + { "name": "h" }, + { "name": "h_t" }, + { "name": "c_t" } + ], + "attributes": [ + { "name": "expose_hidden" } + ] + }, + { + "name": "MatMul", + "inputs": [ + { "name": "x1" }, + { "name": "x2" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "transpose_x1" }, + { "name": "transpose_x2" } + ] + }, + { + "name": "Maximum", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "MaxPool", + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MaxPoolV3", + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "AvgPool", + "category": "Pool", + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Minimum", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "MirrorPad", + "inputs": [ + { "name": "x" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode" } + ] + }, + { + "name": "Mish", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "MsrGenerateRpnProposals", + "inputs": [ + { "name": "scores" }, + { "name": "boxes" }, + { "name": "img_shape" } + ], + "outputs": [ + { "name": "proposal_scores" }, + { "name": "proposal_boxes" }, + { "name": "proposal_num" } + ], + "attributes": [ + { "name": "pre_nms_topk" }, + { "name": "post_nums_topk" }, + { "name": "rpn_mini_size" }, + { "name": "rpn_proposal_nms_thresh" } + ] + }, + { + "name": "Mul", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Multinomial", + "inputs": [ + { "name": "x" }, + { "name": "num_samples" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "seed" }, + { "name": "seed2" } + ] + }, + { + "name": "MVN", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "normalizeVariance" }, + { "name": "acrossChannel" } + ] + }, + { + "name": "Neg", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "NetOutput", + "category": "Data", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NonMaxSuppression", + "inputs": [ + { "name": "boxes" }, + { "name": "scores" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "max_output_size" }, + { "name": "iou_threshold" }, + { "name": "score_threshold" } + ] + }, + { + "name": "NonMaxSuppressionV3D", + "inputs": [ + { "name": "boxes" }, + { "name": "scores" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "max_output_size" }, + { "name": "iou_threshold" }, + { "name": "score_threshold" } + ] + }, + { + "name": "NonMaxSuppressionV6", + "inputs": [ + { "name": "boxes" }, + { "name": "scores" }, + { "name": "max_output_boxes_per_class" }, + { "name": "iou_threshold" }, + { "name": "score_threshold" } + ], + "outputs": [ + { "name": "selected_indices" } + ], + "attributes": [ + { "name": "center_point_box" } + ] + }, + { + "name": "Normalize", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "across_spatial" }, + { "name": "channel_shared" }, + { "name": "eps" } + ] + }, + { + "name": "NotEqual", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "OneHot", + "inputs": [ + { "name": "x" }, + { "name": "depth" }, + { "name": "on_value" }, + { "name": "off_value" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "Pack", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "N" } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "inputs": [ + { "name": "x" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "PadV2", + "inputs": [ + { "name": "x" }, + { "name": "paddings" }, + { "name": "constant_values" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Permute", + "category": "Shape", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "global_pooling" }, + { "name": "window" }, + { "name": "pad" }, + { "name": "stride" }, + { "name": "ceil_mode" }, + { "name": "data_mode" } + ] + }, + { + "name": "PoolingAve", + "category": "Pool", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "PoolingD", + "category": "Pool", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "global_pooling" }, + { "name": "window" }, + { "name": "pad" }, + { "name": "stride" }, + { "name": "ceil_mode" }, + { "name": "data_mode" } + ] + }, + { + "name": "PoolingMax", + "category": "Pool", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Pow", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Power", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "scale" }, + { "name": "shift" }, + { "name": "power" } + ] + }, + { + "name": "PRelu", + "category": "Activation", + "inputs": [ + { "name": "x" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "PReLU", + "category": "Activation", + "inputs": [ + { "name": "x" }, + { "name": "param" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "PriorBox", + "inputs": [ + { "name": "x" }, + { "name": "img" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Proposal", + "inputs": [ + { "name": "cls_prob" }, + { "name": "bbox_pred" }, + { "name": "im_info" } + ], + "outputs": [ + { "name": "rois" } + ], + "attributes": [ + { "name": "feat_stride" }, + { "name": "base_size" }, + { "name": "min_size" }, + { "name": "ratio" }, + { "name": "scale" }, + { "name": "pre_nms_topn" }, + { "name": "post_nms_topn" }, + { "name": "nms_thresh" } + ] + }, + { + "name": "PSROIPooling", + "inputs": [ + { "name": "x" }, + { "name": "rois" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "spatial_scale" }, + { "name": "output_dim" }, + { "name": "group_size" } + ] + }, + { + "name": "Quantize", + "inputs": [ + { "name": "x" }, + { "name": "min_range" }, + { "name": "max_range" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "mode" } + ] + }, + { + "name": "QuantizedConvolution", + "inputs": [ + { "name": "x" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "strides" }, + { "name": "dilations" }, + { "name": "pads" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "groups" }, + { "name": "data_format" }, + { "name": "x_quant_type" }, + { "name": "filter_quant_type" }, + { "name": "x_quant_scale" }, + { "name": "x_quant_offset" }, + { "name": "filter_quant_scales" } + ] + }, + { + "name": "QuantizedConvolutionDepthwise", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "strides" }, + { "name": "dilations" }, + { "name": "pads" }, + { "name": "pad_mode", "type": "Padding" }, + { "name": "data_format" }, + { "name": "x_quant_type" }, + { "name": "filter_quant_type" }, + { "name": "x_quant_scale" }, + { "name": "x_quant_offset" }, + { "name": "filter_quant_scales" } + ] + }, + { + "name": "QuantizedFullConnection", + "inputs": [ + { "name": "x" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_output" }, + { "name": "x_quant_type" }, + { "name": "filter_quant_type" }, + { "name": "x_quant_scale" }, + { "name": "x_quant_offset" }, + { "name": "filter_quant_scales" } + ] + }, + { + "name": "QuantizedFullyConnection", + "inputs": [ + { "name": "x" }, + { "name": "w" }, + { "name": "b" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_output" }, + { "name": "transpose" }, + { "name": "axis" }, + { "name": "x_quant_type" }, + { "name": "w_quant_type" }, + { "name": "x_quant_scale" }, + { "name": "x_quant_offset" }, + { "name": "w_quant_scales" } + ] + }, + { + "name": "QuantizedMatMul", + "inputs": [ + { "name": "x1" }, + { "name": "x2" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "transpose_x1" }, + { "name": "transpose_x2" }, + { "name": "x1_quant_type" }, + { "name": "x2_quant_type" }, + { "name": "x1_quant_scale" }, + { "name": "x1_quant_offset" }, + { "name": "x2_quant_scales" } + ] + }, + { + "name": "RandomNormal", + "inputs": [ + { "name": "shape" }, + { "name": "mean" }, + { "name": "stddev" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomNormalNoSeed", + "inputs": [ + { "name": "shape" }, + { "name": "mean" }, + { "name": "stddev" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomShuffle", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomShuffleNoSeed", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomUniform", + "inputs": [ + { "name": "shape" }, + { "name": "minval" }, + { "name": "maxval" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomUniformInt", + "inputs": [ + { "name": "shape" }, + { "name": "minval" }, + { "name": "maxval" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RandomUniformNoSeed", + "inputs": [ + { "name": "shape" }, + { "name": "minval" }, + { "name": "maxval" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Range", + "inputs": [ + { "name": "start" }, + { "name": "limit" }, + { "name": "delta" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Rank", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "RealDiv", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Reciprocal", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ReduceAll", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "axes" }, + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceAllD", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axes" }, + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceAny", + "inputs": [ + { "name": "x" }, + { "name": "axes" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceL2D", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axes" }, + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceLogSumExp", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axes" }, + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceMax", + "inputs": [ + { "name": "x" }, + { "name": "axes" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceMean", + "inputs": [ + { "name": "x" }, + { "name": "axes" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceMin", + "inputs": [ + { "name": "x" }, + { "name": "axes" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceProd", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" }, + { "name": "axes" } + ] + }, + { + "name": "ReduceProdD", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axes" }, + { "name": "keep_dims" } + ] + }, + { + "name": "ReduceSum", + "inputs": [ + { "name": "x" }, + { "name": "axes" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "keep_dims" } + ] + }, + { + "name": "Reduction", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "operation" }, + { "name": "axis" }, + { "name": "coeff" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "x" }, + { "name": "shape" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ResizeBilinear", + "inputs": [ + { "name": "x" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "align_corners" } + ] + }, + { + "name": "ResizeBilinearV2", + "inputs": [ + { "name": "x" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "align_corners" }, + { "name": "half_pixel_centers" } + ] + }, + { + "name": "ResizeNearestNeighbor", + "inputs": [ + { "name": "x" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "align_corners" } + ] + }, + { + "name": "ResizeNearestNeighborV2", + "inputs": [ + { "name": "x" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "align_corners" }, + { "name": "half_pixel_centers" } + ] + }, + { + "name": "Reverse", + "inputs": [ + { "name": "x" }, + { "name": "axis" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ReverseSequence", + "inputs": [ + { "name": "x" }, + { "name": "seq_lengths" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "seq_dim" }, + { "name": "batch_dim" } + ] + }, + { + "name": "Rint", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ROIAlignV2", + "inputs": [ + { "name": "features" }, + { "name": "rois" }, + { "name": "rois_n" }, + { "name": "batch_indices" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "spatial_scale" }, + { "name": "pooled_height" }, + { "name": "pooled_width" }, + { "name": "sample_num" }, + { "name": "roi_end_mode" }, + { "name": "mode" } + ] + }, + { + "name": "ROIPooling", + "category": "Pool", + "inputs": [ + { "name": "x" }, + { "name": "rois" }, + { "name": "roi_actual_num" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "pooled_h" }, + { "name": "pooled_w" }, + { "name": "spatial_scale_h" }, + { "name": "spatial_scale_w" } + ] + }, + { + "name": "Round", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Rsqrt", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Scale", + "category": "Layer", + "inputs": [ + { "name": "x" }, + { "name": "scale" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "num_axes" }, + { "name": "scale_from_blob" } + ] + }, + { + "name": "ScatterNd", + "inputs": [ + { "name": "indices" }, + { "name": "x" }, + { "name": "shape" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "ScatterUpdate", + "inputs": [ + { "name": "var" }, + { "name": "indices" }, + { "name": "updates" } + ], + "outputs": [ + { "name": "var" } + ], + "attributes": [ + { "name": "use_locking" }, + { "name": "axis" } + ] + }, + { + "name": "SegmentMax", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SegmentMean", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SegmentMin", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SegmentProd", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SegmentSum", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Select", + "inputs": [ + { "name": "condition" }, + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Shape", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "dtype" } + ] + }, + { + "name": "ShuffleChannel", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "group" } + ] + }, + { + "name": "ShuffleChannelV2", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "group" } + ] + }, + { + "name": "Sign", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Sigmoid", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Sin", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Sinh", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Size", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "dtype" } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "inputs": [ + { "name": "x" }, + { "name": "offsets" }, + { "name": "size" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SoftmaxV2", + "category": "Activation", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SpaceToBatchND", + "inputs": [ + { "name": "x" }, + { "name": "block_shape" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SpaceToDepth", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "block_size" }, + { "name": "data_format" } + ] + }, + { + "name": "SparseToDense", + "inputs": [ + { "name": "indices" }, + { "name": "output_shape" }, + { "name": "values" }, + { "name": "default_value" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Split", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" }, + { "name": "output_num" }, + { "name": "slice_point" }, + { "name": "size_split" } + ] + }, + { + "name": "SplitD", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "split_dim" }, + { "name": "num_split" } + ] + }, + { + "name": "SplitV", + "inputs": [ + { "name": "x" }, + { "name": "size_splits" }, + { "name": "split_dim" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num_split" } + ] + }, + { + "name": "SPP", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "pyramidHeight" }, + { "name": "poolingMode" } + ] + }, + { + "name": "Sqrt", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Square", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SquaredDifference", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Squeeze", + "category": "Shape", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "axis" } + ] + }, + { + "name": "SSDDetectionOutput", + "inputs": [ + { "name": "mbox_conf" }, + { "name": "mbox_loc" }, + { "name": "mbox_priorbox" } + ], + "outputs": [ + { "name": "out_boxnum" }, + { "name": "regionProposal" } + ], + "attributes": [ + { "name": "num_classes" }, + { "name": "shared_location" }, + { "name": "background_label_id" }, + { "name": "nms_threshold" }, + { "name": "top_k" }, + { "name": "eta" }, + { "name": "variance_encoded_in_target" }, + { "name": "code_type" }, + { "name": "keep_top_k" }, + { "name": "confidence_threshold" } + ] + }, + { + "name": "StopGradient", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "inputs": [ + { "name": "x" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "begin_mask" }, + { "name": "end_mask" }, + { "name": "ellipsis_mask" }, + { "name": "new_axis_mask" }, + { "name": "shrink_axis_mask" } + ] + }, + { + "name": "StridedSliceD", + "category": "Tensor", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "StridedSliceV2", + "inputs": [ + { "name": "x" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "axes" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "begin_mask" }, + { "name": "end_mask" }, + { "name": "ellipsis_mask" }, + { "name": "new_axis_mask" }, + { "name": "shrink_axis_mask" } + ] + }, + { + "name": "Sub", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "SVDF", + "inputs": [ + { "name": "x" }, + { "name": "weights_feature" }, + { "name": "weights_time" }, + { "name": "bias" }, + { "name": "state_in" } + ], + "outputs": [ + { "name": "state_out" }, + { "name": "y" } + ], + "attributes": [ + { "name": "rank" }, + { "name": "use_bias" } + ] + }, + { + "name": "Swish", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "scale" } + ] + }, + { + "name": "Tan", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "TensorArray", + "inputs": [ + { "name": "data" } + ], + "outputs": [ + { "name": "handle" }, + { "name": "flow" }, + { "name": "memory" } + ] + }, + { + "name": "TensorArrayGather", + "inputs": [ + { "name": "handle" }, + { "name": "indices" }, + { "name": "flow_in" } + ], + "outputs": [ + { "name": "flow" } + ] + }, + { + "name": "TensorArrayScatter", + "inputs": [ + { "name": "handle" }, + { "name": "indices" }, + { "name": "value" }, + { "name": "flow_in" } + ], + "outputs": [ + { "name": "flow" } + ] + }, + { + "name": "Threshold", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "threshold" } + ] + }, + { + "name": "Tile", + "inputs": [ + { "name": "x" }, + { "name": "multiples" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "TopK", + "inputs": [ + { "name": "x" }, + { "name": "k" } + ], + "outputs": [ + { "name": "values" }, + { "name": "indices" } + ], + "attributes": [ + { "name": "sorted" } + ] + }, + { + "name": "TransData", + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Transpose", + "category": "Shape", + "inputs": [ + { "name": "x" }, + { "name": "w" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "order" } + ] + }, + { + "name": "TruncateDiv", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "TruncateMod", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Undefined", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Unpack", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "num" }, + { "name": "axis" } + ] + }, + { + "name": "UnsortedSegmentSum", + "inputs": [ + { "name": "x" }, + { "name": "segment_ids" }, + { "name": "num_segments" } + ], + "outputs": [ + { "name": "y" } + ] + }, + { + "name": "Upsample", + "category": "Data", + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "y" } + ], + "attributes": [ + { "name": "stride_h" }, + { "name": "stride_w" }, + { "name": "scale" } + ] + }, + { + "name": "Xlogy", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + } +] \ No newline at end of file diff --git a/om-proto.js b/om-proto.js new file mode 100644 index 00000000000..6712bae64e2 --- /dev/null +++ b/om-proto.js @@ -0,0 +1,1065 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('om'); + +$root.ge = {}; + +$root.ge.proto = {}; + +$root.ge.proto.DataType = { + "DT_UNDEFINED": 0, + "DT_FLOAT": 1, + "DT_FLOAT16": 2, + "DT_INT8": 3, + "DT_UINT8": 4, + "DT_INT16": 5, + "DT_UINT16": 6, + "DT_INT32": 7, + "DT_INT64": 8, + "DT_UINT32": 9, + "DT_UINT64": 10, + "DT_BOOL": 11, + "DT_DOUBLE": 12, + "DT_STRING": 13, + "DT_DUAL_SUB_INT8": 14, + "DT_DUAL_SUB_UINT8": 15, + "DT_COMPLEX64": 16, + "DT_COMPLEX128": 17, + "DT_QINT8": 18, + "DT_QINT16": 19, + "DT_QINT32": 20, + "DT_QUINT8": 21, + "DT_QUINT16": 22, + "DT_RESOURCE": 23, + "DT_STRING_REF": 24, + "DT_DUAL": 25, + "DT_VARIANT": 26, + "DT_BF16": 27, + "DT_INT4": 28, + "DT_UINT1": 29, + "DT_INT2": 30, + "DT_UINT2": 31 +}; + +$root.ge.proto.AttrDef = class AttrDef { + + constructor() { + } + + get value() { + $root.ge.proto.AttrDef.valueSet = $root.ge.proto.AttrDef.valueSet || new Set([ "s", "i", "f", "b", "bt", "list", "func", "td", "t", "g", "list_list_int", "dt", "list_list_float"]); + return Object.keys(this).find((key) => $root.ge.proto.AttrDef.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.s = reader.bytes(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.f = reader.float(); + break; + case 5: + message.b = reader.bool(); + break; + case 7: + message.bt = reader.bytes(); + break; + case 1: + message.list = $root.ge.proto.AttrDef.ListValue.decode(reader, reader.uint32()); + break; + case 10: + message.func = $root.ge.proto.NamedAttrs.decode(reader, reader.uint32()); + break; + case 11: + message.td = $root.ge.proto.TensorDescriptor.decode(reader, reader.uint32()); + break; + case 12: + message.t = $root.ge.proto.TensorDef.decode(reader, reader.uint32()); + break; + case 13: + message.g = $root.ge.proto.GraphDef.decode(reader, reader.uint32()); + break; + case 14: + message.list_list_int = $root.ge.proto.AttrDef.ListListInt.decode(reader, reader.uint32()); + break; + case 15: + message.dt = reader.int64(); + break; + case 16: + message.list_list_float = $root.ge.proto.AttrDef.ListListFloat.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "s": + message.s = reader.bytes(); + break; + case "i": + message.i = reader.int64(); + break; + case "f": + message.f = reader.float(); + break; + case "b": + message.b = reader.bool(); + break; + case "bt": + message.bt = reader.bytes(); + break; + case "list": + message.list = $root.ge.proto.AttrDef.ListValue.decodeText(reader); + break; + case "func": + message.func = $root.ge.proto.NamedAttrs.decodeText(reader); + break; + case "td": + message.td = $root.ge.proto.TensorDescriptor.decodeText(reader); + break; + case "t": + message.t = $root.ge.proto.TensorDef.decodeText(reader); + break; + case "g": + message.g = $root.ge.proto.GraphDef.decodeText(reader); + break; + case "list_list_int": + message.list_list_int = $root.ge.proto.AttrDef.ListListInt.decodeText(reader); + break; + case "dt": + message.dt = reader.int64(); + break; + case "list_list_float": + message.list_list_float = $root.ge.proto.AttrDef.ListListFloat.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.AttrDef.ListValue = class ListValue { + + constructor() { + this.s = []; + this.i = []; + this.f = []; + this.b = []; + this.bt = []; + this.td = []; + this.t = []; + this.g = []; + this.na = []; + this.dt = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef.ListValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.s.push(reader.bytes()); + break; + case 3: + message.i = reader.array(message.i, () => reader.int64(), tag); + break; + case 4: + message.f = reader.floats(message.f, tag); + break; + case 5: + message.b = reader.array(message.b, () => reader.bool(), tag); + break; + case 7: + message.bt.push(reader.bytes()); + break; + case 8: + message.td.push($root.ge.proto.TensorDescriptor.decode(reader, reader.uint32())); + break; + case 9: + message.t.push($root.ge.proto.TensorDef.decode(reader, reader.uint32())); + break; + case 10: + message.g.push($root.ge.proto.GraphDef.decode(reader, reader.uint32())); + break; + case 11: + message.na.push($root.ge.proto.NamedAttrs.decode(reader, reader.uint32())); + break; + case 12: + message.dt = reader.array(message.dt, () => reader.int64(), tag); + break; + case 20: + message.val_type = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef.ListValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "s": + reader.array(message.s, () => reader.bytes()); + break; + case "i": + reader.array(message.i, () => reader.int64()); + break; + case "f": + reader.array(message.f, () => reader.float()); + break; + case "b": + reader.array(message.b, () => reader.bool()); + break; + case "bt": + reader.array(message.bt, () => reader.bytes()); + break; + case "td": + message.td.push($root.ge.proto.TensorDescriptor.decodeText(reader)); + break; + case "t": + message.t.push($root.ge.proto.TensorDef.decodeText(reader)); + break; + case "g": + message.g.push($root.ge.proto.GraphDef.decodeText(reader)); + break; + case "na": + message.na.push($root.ge.proto.NamedAttrs.decodeText(reader)); + break; + case "dt": + reader.array(message.dt, () => reader.int64()); + break; + case "val_type": + message.val_type = reader.enum($root.ge.proto.AttrDef.ListValue.ListValueType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.AttrDef.ListValue.prototype.val_type = 0; + +$root.ge.proto.AttrDef.ListValue.ListValueType = { + "VT_LIST_NONE": 0, + "VT_LIST_STRING": 1, + "VT_LIST_INT": 2, + "VT_LIST_FLOAT": 3, + "VT_LIST_BOOL": 4, + "VT_LIST_BYTES": 5, + "VT_LIST_TENSOR_DESC": 6, + "VT_LIST_TENSOR": 7, + "VT_LIST_GRAPH": 8, + "VT_LIST_NAMED_ATTRS": 9, + "VT_LIST_DATA_TYPE": 10 +}; + +$root.ge.proto.AttrDef.ListListInt = class ListListInt { + + constructor() { + this.list_list_i = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef.ListListInt(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.list_list_i.push($root.ge.proto.AttrDef.ListListInt.ListInt.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef.ListListInt(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "list_list_i": + message.list_list_i.push($root.ge.proto.AttrDef.ListListInt.ListInt.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.AttrDef.ListListInt.ListInt = class ListInt { + + constructor() { + this.list_i = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef.ListListInt.ListInt(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.list_i = reader.array(message.list_i, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef.ListListInt.ListInt(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "list_i": + reader.array(message.list_i, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.AttrDef.ListListFloat = class ListListFloat { + + constructor() { + this.list_list_f = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef.ListListFloat(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.list_list_f.push($root.ge.proto.AttrDef.ListListFloat.ListFloat.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef.ListListFloat(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "list_list_f": + message.list_list_f.push($root.ge.proto.AttrDef.ListListFloat.ListFloat.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.AttrDef.ListListFloat.ListFloat = class ListFloat { + + constructor() { + this.list_f = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.AttrDef.ListListFloat.ListFloat(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.list_f = reader.floats(message.list_f, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.AttrDef.ListListFloat.ListFloat(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "list_f": + reader.array(message.list_f, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.NamedAttrs = class NamedAttrs { + + constructor() { + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.ge.proto.NamedAttrs(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.NamedAttrs(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.NamedAttrs.prototype.name = ""; + +$root.ge.proto.ShapeDef = class ShapeDef { + + constructor() { + this.dim = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.ShapeDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim = reader.array(message.dim, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.ShapeDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + reader.array(message.dim, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.TensorDescriptor = class TensorDescriptor { + + constructor() { + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.ge.proto.TensorDescriptor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.dtype = reader.int32(); + break; + case 3: + message.shape = $root.ge.proto.ShapeDef.decode(reader, reader.uint32()); + break; + case 4: + message.layout = reader.string(); + break; + case 9: + message.has_out_attr = reader.bool(); + break; + case 10: + message.size = reader.int64(); + break; + case 11: + message.weight_size = reader.int64(); + break; + case 12: + message.reuse_input = reader.bool(); + break; + case 13: + message.output_tensor = reader.bool(); + break; + case 14: + message.device_type = reader.string(); + break; + case 15: + message.input_tensor = reader.bool(); + break; + case 16: + message.real_dim_cnt = reader.int64(); + break; + case 17: + message.reuse_input_index = reader.int64(); + break; + case 18: + message.data_offset = reader.int64(); + break; + case 19: + message.cmps_size = reader.int64(); + break; + case 20: + message.cmps_tab = reader.string(); + break; + case 21: + message.cmps_tab_offset = reader.int64(); + break; + case 5: + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.TensorDescriptor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "dtype": + message.dtype = reader.enum($root.ge.proto.DataType); + break; + case "shape": + message.shape = $root.ge.proto.ShapeDef.decodeText(reader); + break; + case "layout": + message.layout = reader.string(); + break; + case "has_out_attr": + message.has_out_attr = reader.bool(); + break; + case "size": + message.size = reader.int64(); + break; + case "weight_size": + message.weight_size = reader.int64(); + break; + case "reuse_input": + message.reuse_input = reader.bool(); + break; + case "output_tensor": + message.output_tensor = reader.bool(); + break; + case "device_type": + message.device_type = reader.string(); + break; + case "input_tensor": + message.input_tensor = reader.bool(); + break; + case "real_dim_cnt": + message.real_dim_cnt = reader.int64(); + break; + case "reuse_input_index": + message.reuse_input_index = reader.int64(); + break; + case "data_offset": + message.data_offset = reader.int64(); + break; + case "cmps_size": + message.cmps_size = reader.int64(); + break; + case "cmps_tab": + message.cmps_tab = reader.string(); + break; + case "cmps_tab_offset": + message.cmps_tab_offset = reader.int64(); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.TensorDescriptor.prototype.name = ""; +$root.ge.proto.TensorDescriptor.prototype.dtype = 0; +$root.ge.proto.TensorDescriptor.prototype.shape = null; +$root.ge.proto.TensorDescriptor.prototype.layout = ""; +$root.ge.proto.TensorDescriptor.prototype.has_out_attr = false; +$root.ge.proto.TensorDescriptor.prototype.size = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.weight_size = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.reuse_input = false; +$root.ge.proto.TensorDescriptor.prototype.output_tensor = false; +$root.ge.proto.TensorDescriptor.prototype.device_type = ""; +$root.ge.proto.TensorDescriptor.prototype.input_tensor = false; +$root.ge.proto.TensorDescriptor.prototype.real_dim_cnt = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.reuse_input_index = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.data_offset = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.cmps_size = protobuf.Int64.create(0); +$root.ge.proto.TensorDescriptor.prototype.cmps_tab = ""; +$root.ge.proto.TensorDescriptor.prototype.cmps_tab_offset = protobuf.Int64.create(0); + +$root.ge.proto.TensorDef = class TensorDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.ge.proto.TensorDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.desc = $root.ge.proto.TensorDescriptor.decode(reader, reader.uint32()); + break; + case 2: + message.data = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.TensorDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "desc": + message.desc = $root.ge.proto.TensorDescriptor.decodeText(reader); + break; + case "data": + message.data = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.TensorDef.prototype.desc = null; +$root.ge.proto.TensorDef.prototype.data = new Uint8Array([]); + +$root.ge.proto.OpDef = class OpDef { + + constructor() { + this.input = []; + this.attr = {}; + this.input_name = []; + this.src_name = []; + this.src_index = []; + this.dst_name = []; + this.dst_index = []; + this.input_i = []; + this.output_i = []; + this.workspace = []; + this.workspace_bytes = []; + this.is_input_const = []; + this.input_desc = []; + this.output_desc = []; + this.subgraph_name = []; + } + + static decode(reader, length) { + const message = new $root.ge.proto.OpDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 5: + message.input.push(reader.string()); + break; + case 10: + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decode(reader, reader.uint32())); + break; + case 20: + message.has_out_attr = reader.bool(); + break; + case 21: + message.id = reader.int64(); + break; + case 22: + message.stream_id = reader.int64(); + break; + case 23: + message.input_name.push(reader.string()); + break; + case 24: + message.src_name.push(reader.string()); + break; + case 25: + message.src_index = reader.array(message.src_index, () => reader.int64(), tag); + break; + case 26: + message.dst_name.push(reader.string()); + break; + case 27: + message.dst_index = reader.array(message.dst_index, () => reader.int64(), tag); + break; + case 28: + message.input_i = reader.array(message.input_i, () => reader.int64(), tag); + break; + case 29: + message.output_i = reader.array(message.output_i, () => reader.int64(), tag); + break; + case 30: + message.workspace = reader.array(message.workspace, () => reader.int64(), tag); + break; + case 31: + message.workspace_bytes = reader.array(message.workspace_bytes, () => reader.int64(), tag); + break; + case 32: + message.is_input_const = reader.array(message.is_input_const, () => reader.bool(), tag); + break; + case 33: + message.input_desc.push($root.ge.proto.TensorDescriptor.decode(reader, reader.uint32())); + break; + case 34: + message.output_desc.push($root.ge.proto.TensorDescriptor.decode(reader, reader.uint32())); + break; + case 35: + message.subgraph_name.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.OpDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decodeText(reader)); + break; + case "has_out_attr": + message.has_out_attr = reader.bool(); + break; + case "id": + message.id = reader.int64(); + break; + case "stream_id": + message.stream_id = reader.int64(); + break; + case "input_name": + reader.array(message.input_name, () => reader.string()); + break; + case "src_name": + reader.array(message.src_name, () => reader.string()); + break; + case "src_index": + reader.array(message.src_index, () => reader.int64()); + break; + case "dst_name": + reader.array(message.dst_name, () => reader.string()); + break; + case "dst_index": + reader.array(message.dst_index, () => reader.int64()); + break; + case "input_i": + reader.array(message.input_i, () => reader.int64()); + break; + case "output_i": + reader.array(message.output_i, () => reader.int64()); + break; + case "workspace": + reader.array(message.workspace, () => reader.int64()); + break; + case "workspace_bytes": + reader.array(message.workspace_bytes, () => reader.int64()); + break; + case "is_input_const": + reader.array(message.is_input_const, () => reader.bool()); + break; + case "input_desc": + message.input_desc.push($root.ge.proto.TensorDescriptor.decodeText(reader)); + break; + case "output_desc": + message.output_desc.push($root.ge.proto.TensorDescriptor.decodeText(reader)); + break; + case "subgraph_name": + reader.array(message.subgraph_name, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.OpDef.prototype.name = ""; +$root.ge.proto.OpDef.prototype.type = ""; +$root.ge.proto.OpDef.prototype.has_out_attr = false; +$root.ge.proto.OpDef.prototype.id = protobuf.Int64.create(0); +$root.ge.proto.OpDef.prototype.stream_id = protobuf.Int64.create(0); + +$root.ge.proto.GraphDef = class GraphDef { + + constructor() { + this.input = []; + this.output = []; + this.op = []; + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.ge.proto.GraphDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 4: + message.input.push(reader.string()); + break; + case 5: + message.output.push(reader.string()); + break; + case 6: + message.op.push($root.ge.proto.OpDef.decode(reader, reader.uint32())); + break; + case 11: + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.GraphDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "op": + message.op.push($root.ge.proto.OpDef.decodeText(reader)); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.GraphDef.prototype.name = ""; + +$root.ge.proto.ModelDef = class ModelDef { + + constructor() { + this.graph = []; + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.ge.proto.ModelDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.version = reader.uint32(); + break; + case 3: + message.custom_version = reader.string(); + break; + case 7: + message.graph.push($root.ge.proto.GraphDef.decode(reader, reader.uint32())); + break; + case 11: + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.ge.proto.ModelDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "version": + message.version = reader.uint32(); + break; + case "custom_version": + message.custom_version = reader.string(); + break; + case "graph": + message.graph.push($root.ge.proto.GraphDef.decodeText(reader)); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.ge.proto.AttrDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.ge.proto.ModelDef.prototype.name = ""; +$root.ge.proto.ModelDef.prototype.version = 0; +$root.ge.proto.ModelDef.prototype.custom_version = ""; diff --git a/om.js b/om.js new file mode 100644 index 00000000000..61fe683a045 --- /dev/null +++ b/om.js @@ -0,0 +1,793 @@ + +// Experimental + +import * as base from './base.js'; +import * as protobuf from './protobuf.js'; + +const om = {}; +const svp = {}; + +om.ModelFactory = class { + + match(context) { + return om.Container.open(context); + } + + async open(context, target) { + await target.read(); + const metadata = await context.metadata('om-metadata.json'); + return new om.Model(metadata, target); + } +}; + +om.Model = class { + + constructor(metadata, target) { + this.format = target.format; + const context = { + metadata: metadata, + signature: target.signature, + weights: target.weights + }; + this.graphs = target.model.graph.map((graph) => new om.Graph(context, graph)); + } +}; + +om.Graph = class { + + constructor(context, graph) { + switch (context.signature) { + case 'IMOD': this.name = graph.name; break; + case 'PICO': this.name = graph.id.toString(); break; + default: throw new om.Error(`Unsupported DaVinci OM ${context.signature} signature.`); + } + this.nodes = []; + this.inputs = []; + this.outputs = []; + const values = new Map(); + values.map = (name, type, tensor) => { + if (!values.has(name)) { + values.set(name, new om.Value(name, type || null, tensor || null)); + } else if ((type && !type.equals(values.get(name).type)) || + (tensor && tensor !== values.get(name).initializer)) { + throw new om.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const tensors = new Map(); + const ops = []; + for (const op of graph.op) { + if (op.type === 'Const' && op.attr && op.attr.value) { + const desc = op.attr.value.t.desc; + let data = null; + if (op.attr.value.t.data.length !== 0) { + data = op.attr.value.t.data; + } else if (context.weights == null) { + data = null; + } else if (desc.attr.merged_offset) { + const offset = desc.attr.merged_offset.i; + data = context.weights.slice(offset, offset + desc.weight_size); + } else { + const offset = desc.data_offset; + data = context.weights.slice(offset, offset + desc.weight_size); + } + const type = om.Utility.tensorType(desc); + const tensor = new om.Tensor('Constant', type, data); + tensors.set(op.name, tensor); + continue; + } + ops.push(op); + } + for (const op of ops) { + const node = new om.Node(context, op, graph, values, tensors); + this.nodes.push(node); + } + } +}; + +om.Node = class { + + constructor(context, op, graph, values, tensors) { + this.name = op.name || ''; + this.type = context.metadata.type(op.type) || { name: op.type }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + this.chain = []; + this.controlDependencies = []; + this.device = null; + if (op.input) { + let index = 0; + for (let i = 0; i < op.input.length; i++) { + const input = op.input[i]; + if (input === '') { + continue; + } + const name = this.type.inputs && i < this.type.inputs.length ? this.type.inputs[i].name : `input${index === 0 ? '' : index}`; + index++; + const end = this.type.inputs && i < this.type.inputs.length && this.type.inputs[i].type && this.type.inputs[i].type === 'Tensor[]' ? op.input.length : i + 1; + const list = []; + for (let j = i; j < end; j++) { + const input = op.input[j]; + if (input === '') { + continue; + } + const index = input.lastIndexOf(':'); + const identifier = input.substring(0, index); + const src_index = input.substring(index + 1); + if (src_index === '-1') { + this.controlDependencies.push(values.map(name)); + continue; + } + const type = om.Utility.tensorType(op.input_desc[j]); + const tensor = tensors.get(identifier); + const value = values.map(input, type, tensor); + list.push(value); + } + const argument = new om.Argument(name, list); + this.inputs.push(argument); + i = end - 1; + } + } + if (op.output_desc) { + for (let i = 0; i < op.output_desc.length; i++) { + const identifier = `${this.name}:${i}`; + const type = om.Utility.tensorType(op.output_desc[i]); + const name = this.type.outputs && i < this.type.outputs.length ? this.type.outputs[i].name : `output${i === 0 ? '' : i}`; + const value = values.map(identifier, type); + const argument = new om.Argument(name, [ value ]); + this.outputs.push(argument); + } + } + for (const [name, value] of Object.entries(op.attr || {})) { + if (name === 'device') { + this.device = value; + continue; + } + if (name === 'original_op_names') { + continue; + } + if (name === 'relu_flag' && value.b) { + this.chain.push(new om.Node(context, { type: 'ReLU' }, graph, value)); + continue; + } + const attribute = new om.Attribute(context, name, value); + this.attributes.push(attribute); + } + } +}; + +om.Attribute = class { + + constructor(context, name, value) { + this.name = name; + this.value = value; + switch (value.value) { + case 'i': { + this.value = value.i; + this.type = 'int64'; + break; + } + case 'f': { + this.value = value.f; + this.type = 'float32'; + break; + } + case 'b': { + this.value = value.b; + this.type = 'boolean'; + break; + } + case 'bt': { + this.value = null; + if (value.bt.length !== 0) { + this.type = 'tensor'; + const shape = new om.TensorShape([ value.bt.length / 4 ]); + const type = new om.TensorType('float32', shape); + this.value = new om.Tensor('Constant', type, value.bt); + } + break; + } + case 'dt': { + this.type = 'DataType'; + this.value = om.Utility.dtype(value.dt.toNumber()); + break; + } + case 's': { + if (typeof value.s === 'string') { + this.value = value.s; + } else if (value.s.filter((c) => c <= 32 && c >= 128).length === 0) { + this.value = om.Utility.decodeText(value.s); + } else { + this.value = value.s; + } + this.type = 'string'; + break; + } + case 'g': { + this.type = 'graph'; + this.value = new om.Graph(context, value.g); + break; + } + case 'func': { + break; + } + case 'list': { + const list = value.list; + this.value = []; + if (list.s && list.s.length > 0) { + this.value = list.s.map((v) => String.fromCharCode.apply(null, new Uint16Array(v))).join(', '); + this.type = 'string[]'; + } else if (list.b && list.b.length > 0) { + this.value = list.b; + this.type = 'boolean[]'; + } else if (list.i && list.i.length > 0) { + this.value = list.i; + this.type = 'int64[]'; + } else if (list.f && list.f.length > 0) { + this.value = list.f; + this.type = 'float32[]'; + } else if (list.type && list.type.length > 0) { + this.type = 'type[]'; + this.value = list.type.map((type) => om.Node.enum2Dtype(type) || '?'); + } else if (list.shape && list.shape.length > 0) { + this.type = 'shape[]'; + this.value = list.shape.map((shape) => new om.TensorShape(shape)); + } + break; + } + case 'list_list_int': { + this.value = value.list_list_int.list_list_i.map((list) => list.list_i); + break; + } + case 't': { + const type = om.Utility.tensorType(value.t.desc); + this.value = new om.Tensor('Constant', type, value.t.bytes); + this.type = 'tensor'; + break; + } + case undefined: { + this.value = null; + break; + } + default: { + throw new om.Error(`Unsupported attribute type '${JSON.stringify(value).substring(0, 32)}'.`); + } + } + } +}; + +om.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +om.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new om.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : type; + this.initializer = initializer || null; + } +}; + +om.Tensor = class { + + constructor(category, type, value) { + this.category = category; + this.type = type; + this.data = value; + } +}; + +om.TensorType = class { + + constructor(dataType, shape, denotation) { + this.dataType = dataType; + this.shape = shape; + this.denotation = denotation; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +om.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions.map((dim) => !Number.isInteger(dim) && dim && dim.toNumber ? dim.toNumber() : dim); + } + + equals(obj) { + if (obj && Array.isArray(obj.dimensions) && Array.isArray(this.dimensions)) { + if (this.dimensions.length === obj.dimensions.length && + obj.dimensions.every((value, index) => this.dimensions[index] === value)) { + return true; + } + if (obj.dimensions.every((dim) => Number.isInteger(dim)) && this.dimensions.every((dim) => Number.isInteger(dim))) { + const a = obj.dimensions.reduce((a, b) => a * b, 1); + const b = this.dimensions.reduce((a, b) => a * b, 1); + return a === b; + } + } + return false; + } + + toString() { + if (this.dimensions && Array.isArray(this.dimensions) && this.dimensions.length > 0) { + return `[${this.dimensions.map((dim) => dim ? dim.toString() : '?').join(',')}]`; + } + return ''; + } +}; + +om.Container = class { + + static open(context) { + const stream = context.stream; + if (stream && stream.length >= 256) { + const buffer = stream.peek(4); + const signature = Array.from(buffer).map((c) => String.fromCharCode(c)).join(''); + if (signature === 'IMOD' || signature === 'PICO') { + return new om.Container(context, signature); + } + } + return null; + } + + constructor(context, signature) { + this._context = context; + this._signature = signature; + } + + async read() { + const stream = this._context.stream; + const reader = new base.BinaryReader(stream); + const buffer = reader.read(4); + this.signature = Array.from(buffer).map((c) => String.fromCharCode(c)).join(''); + switch (this.signature) { + case 'IMOD': { + const decoder = new TextDecoder('utf-8'); + this.format = 'DaVinci OM'; + const header = {}; + header.headsize = reader.uint32(); + header.version = reader.uint32(); + header.checksum = reader.read(64); + header.length = reader.uint32(); + header.is_encrypt = reader.byte(); + header.is_checksum = reader.byte(); + header.modeltype = reader.byte(); // 0=IR model, 1=standard model, 2=OM Tiny model + header.genmode = reader.byte(); // 0=offline, 1=online + header.name = decoder.decode(reader.read(32)); + header.ops = reader.uint32(); + header.userdefineinfo = reader.read(32); + header.om_ir_version = reader.uint32(); + header.model_num = header.version >= 0x20000000 ? reader.uint32() : 1; + header.platform_version = decoder.decode(reader.read(20)); + header.platform_type = reader.byte(); + header.padd = [ reader.byte(), reader.byte(), reader.byte() ]; + header.model_length = reader.uint64(); + header.need_check_os_cpu_info = reader.byte(); + header.is_unknow_model = reader.byte(); // 0:static model 1:dynamic model + header.reserved = reader.read(62); + const partitions = new Map(); + let size = -1; + for (let align = 4; align <= 8; align += 4) { + reader.seek(header.headsize); + const count = reader.uint32(); + reader.skip(align - 4); + size = 4 + (align - 4) + (count * 3 * align); + for (let i = 0; i < count; i++) { + const type = align === 4 ? reader.uint32() : reader.uint64(); + const offset = align === 4 ? reader.uint32() : reader.uint64(); + const size = align === 4 ? reader.uint32() : reader.uint64(); + if (type >= 32 || partitions.has(type) || (offset + size) >= stream.length) { + partitions.clear(); + break; + } + partitions.set(type, { offset: offset, size: size }); + } + if (partitions.size > 0) { + break; + } + } + if (!partitions.has(0)) { + throw new om.Error('File does not contain a model definition.'); + } + const offset = header.headsize + size; + for (const [type, partition] of partitions) { + reader.seek(offset + partition.offset); + const buffer = reader.read(partition.size); + switch (type) { + case 0: { // MODEL_DEF + this.model = buffer; + break; + } + case 1: { // WEIGHTS_DATA + this.weights = buffer; + break; + } + case 2: // TASK_INFO + case 3: // TBE_KERNELS + case 4: { // CUST_AICPU_KERNELS + break; + } + case 5: { // DEVICE_CONFIG, SO_BINS + this.devices = new Map(); + const decoder = new TextDecoder('ascii'); + const reader = new base.BinaryReader(buffer); + reader.uint32(); + for (let position = 4; position < partition.size;) { + const length = reader.uint32(); + const buffer = reader.read(length); + const name = decoder.decode(buffer); + const device = reader.uint32(); + this.devices.set(name, device); + position += 4 + length + 4; + } + break; + } + case 6: // FLOW_MODEL + case 7: // FLOW_SUBMODEL + case 8: // MODEL_INOUT_INFO + case 9: // STATIC_TASK_DESC + case 10: // DYNAMIC_TASK_DESC + case 11: // TASK_PARAM + case 20: // PRE_MODEL_DESC + case 21: // PRE_MODEL_SQE + case 22: { // PRE_KERNEL_ARGS + break; + } + default: { + throw new om.Error('Unsupported DaVinci OM partition type.'); + } + } + } + await this._context.require('./om-proto'); + try { + om.proto = protobuf.get('om').ge.proto; + const reader = protobuf.BinaryReader.open(this.model); + this.model = om.proto.ModelDef.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new om.Error(`File format is not ge.proto.ModelDef (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'PICO': { + this.format = 'DaVinci OM SVP'; // SVP = Smart Vision PICO + reader.uint32(); // reserved + this.size = reader.uint32(); + const param_size = reader.uint32(); + const param_offset = reader.uint32(); + reader.uint32(); // tmp_bufsize + const tfm_offset = reader.uint32(); + reader.uint32(); // tfm_size + reader.seek(param_offset); + this.param = reader.read(param_size); + const buffer = reader.read(tfm_offset - reader.position); + this.model = new svp.ModelDef(buffer); + break; + } + default: { + throw new om.Error(`Unsupported DaVinci OM ${this.signature} signature.`); + } + } + } +}; + +om.Utility = class { + + static dtype(value) { + om.Utility._types = om.Utility._types || [ + 'undefined', 'float32', 'float16', 'int8', 'uint8', 'int16', 'uint16', 'int32', + 'int64', 'uint32', 'uint64', 'boolean', 'float64', 'string', 'dual_sub_int8', 'dual_sub_uint8', + 'complex64', 'complex128', 'qint8', 'qint16', 'qint32', 'quint8', 'quint16', 'resource', + 'stringref', 'dual', 'variant', 'bfloat16', 'int4', 'uint1', 'int2', 'uint2' + ]; + if (value >= om.Utility._types.length) { + throw new om.Error(`Unsupported dtype '${value}'.`); + } + return om.Utility._types[value]; + } + + static tensorType(desc) { + if (desc.shape && Array.isArray(desc.shape.dim)) { + const dataType = desc && desc.dtype ? om.Utility.dtype(desc.dtype) : '?'; + const shape = new om.TensorShape(desc.shape.dim); + return new om.TensorType(dataType, shape, desc.layout); + } + return null; + } + + static decodeText(value) { + om.Utility._textDecoder = om.Utility._textDecoder || new TextDecoder('utf-8'); + return om.Utility._textDecoder.decode(value); + } +}; + +om.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading DaVinci OM model.'; + } +}; + +svp.ModelDef = class ModelDef { + + constructor(buffer) { + const reader = new svp.BinaryReader(buffer); + this.attr = {}; + this.graph = []; + this.name = reader.find(0x800D, 'string'); + this.batch_num = reader.find(0x600A); + while (reader.position < reader.length) { + const tag = reader.uint16(); + const value = reader.value(tag); + switch (tag & 0x1fff) { + case 0x0040: { + this.graph.push(new svp.GraphDef(value)); + break; + } + case 0x0111: { + const op = new svp.OpDef(value); + for (const item of this.graph) { + if (op.attr && op.attr.seg_id && op.attr.seg_id.i === item.id) { + let out_num; + if (typeof op.output_index == 'number') { + out_num = op.output_index + 1; + } else { + const input_num = op.input.map((element) => element.split(":")[1]); + out_num = input_num.length > 0 ? Math.max(...input_num) + 1 : 1; + } + const out_types = []; + if (op.data_flow && op.data_flow !== '') { + const data = op.data_flow; + if (data.indexOf('o[{t') !== -1) { + const outs = data.substring(data.indexOf('o[{t')).split(','); + for (const out of outs) { + const startIndex = out.indexOf("\""); + const endIndex = out.indexOf("\"", startIndex + 1); + out_types.push(out.substring(startIndex + 1, endIndex)); + } + } + } + const out_list = []; + while (out_num > 0) { + const output_desc = {}; + output_desc.shape = { dim: op.output_shape_vector }; + output_desc.layout = 'NCHW'; + if (op.data_flow && out_types.length >= out_num) { + output_desc.dtype = out_types[op.output_index + 1 - out_num]; + } + out_list.push(output_desc); + out_num--; + } + + let curr_op = null; + for (const op_item of item.op) { + if (op_item.id === op.id) { + curr_op = op_item; + break; + } + } + if (curr_op != null) { + curr_op.output_desc = curr_op.output_desc.concat(out_list); + } else { + op.output_desc = op.output_desc.concat(out_list); + item.op.push(op); + } + break; + } + } + break; + } + default: { + break; + } + } + } + if (this.graph.length > 1) { + for (let i = 1; i < this.graph.length; i++) { + this.graph[0].op = this.graph[0].op.concat(this.graph[i].op); + } + } + } +}; + +svp.GraphDef = class { + + constructor(buffer) { + this.input = []; + this.output = []; + this.op = []; + this.attr = {}; + const reader = new svp.BinaryReader(buffer); + const input = (buffer) => { + const input = {}; + const reader = new svp.BinaryReader(buffer); + while (reader.position < reader.length) { + const tag = reader.uint16(); + switch (tag & 0x1fff) { + case 0x0051: input.id = reader.value(tag); break; + case 0x0058: input.name = reader.value(tag, 'string').trim(); break; + case 0x005a: input.shape_vector = reader.value(tag, 'uint32[]'); break; + default: reader.value(tag); break; + } + } + return input; + }; + const output = (buffer) => { + const output = {}; + const reader = new svp.BinaryReader(buffer); + while (reader.position < reader.length) { + const tag = reader.uint16(); + switch (tag & 0x1fff) { + case 0x0061: output.id = reader.value(tag); break; + case 0x0066: output.name = reader.value(tag, 'string').trim(); break; + case 0x0069: output.shape_vector = reader.value(tag, 'uint32[]'); break; + case 0x0110: output.layer_num = reader.value(tag); break; + default: reader.value(tag); break; + } + } + return output; + }; + while (reader.position < reader.length) { + const tag = reader.uint16(); + const value = reader.value(tag); + switch (tag & 0x1fff) { + case 0x0041: this.id = value; break; + case 0x0050: this.input.push(input(value)); break; + case 0x0060: this.output.push(output(value)); break; + default: break; + } + } + } +}; + +svp.OpDef = class { + + constructor(buffer) { + this.input = []; + this.attr = {}; + this.input_i = []; + this.output_i = []; + this.input_desc = []; + this.output_desc = []; + const reader = new svp.BinaryReader(buffer); + while (reader.position < reader.length) { + const tag = reader.uint16(); + switch (tag & 0x1fff) { + case 0x0114: this.name = reader.value(tag, 'string').trim(); break; + case 0x0112: this.id = reader.value(tag); break; + case 0x0119: this.attr.output_m2m_flag = reader.attribute(tag, 'i'); break; + case 0x0121: this.attr.batch_flag = reader.attribute(tag, 'i'); break; + case 0x0124: this.attr.dequant_scale = reader.attribute(tag, 'i'); break; + case 0x0126: this.attr.output_address = reader.attribute(tag, 'i'); break; + case 0x0125: this.attr.dequant_offset = reader.attribute(tag, 'i'); break; + case 0x0127: this.attr.first_inst_addr = reader.attribute(tag, 'i'); break; + case 0x0128: this.attr.last_inst_addr = reader.attribute(tag, 'i'); break; + case 0x013B: this.attr.is_fusion_layer = reader.attribute(tag, 'i'); break; + case 0x013C: this.input = reader.value(tag, 'string').split(','); break; + case 0x014B: this.attr.seg_id = reader.attribute(tag, 'i'); break; + case 0x0150: this.attr.is_not_last_merge_layer = reader.attribute(tag, 'i'); break; + case 0x0151: this.attr.is_dump_avavilable = reader.attribute(tag, 'i'); break; + case 0x0153: this.attr.debug_dump_offset = reader.attribute(tag, 'i'); break; + case 0x0152: this.type = reader.value(tag, 'string'); break; + case 0x0154: this.output_shape_vector = reader.value(tag, 'uint32[]'); break; + case 0x0155: this.input_index = reader.value(tag); break; + case 0x015B: this.output_index = reader.value(tag); break; + case 0x0156: this.attr.trap_inst_pc = reader.attribute(tag, 'i'); break; + case 0x0157: this.attr.profile_layer_id = reader.attribute(tag, 'i'); break; + case 0xA15A: + this.data_flow = reader.value(tag, 'string'); + this.attr.data_flow = new svp.AttrDef(this.data_flow.replace('i[{t', 'input[{type').replace(',f[{t', '\tforward[{type').replace(',o[{t', '\toutput[{type').replace(',{[t', ',{type'), 's'); + break; + default: reader.value(tag); break; + } + } + for (let i = 0; i < this.input.length; i++) { + this.input_desc.push({ layout: 'NCHW', shape: {} }); + } + } +}; + +svp.AttrDef = class { + + constructor(item, type) { + switch (type) { + case 's': this.s = item; break; + case 'i': this.i = item; break; + default: throw new svp.Error(`Unsupported attribute type '${type}'.`); + } + } + + get value() { + if (this.s !== undefined) { + return 's'; + } + if (this.i !== undefined) { + return 'i'; + } + return undefined; + } +}; + +svp.BinaryReader = class extends base.BinaryReader { + + value(tag, type) { + let value; + switch (tag >> 13) { + case 1: value = this.int8(); break; + case 2: value = this.uint16(); break; + case 3: value = this.uint32(); break; + case 4: value = this.read(this.int8()); break; + case 5: value = this.read(this.uint16()); break; + case 6: value = this.read(this.uint32()); break; + default: throw new svp.Error(`Unsupported value identifier '${tag}'.`); + } + return type ? this._cast(value, type, tag) : value; + } + + find(tag, type) { + let value = null; + let match = false; + while (!match && this.position < this.length) { + const current = this.uint16(); + value = this.value(current); + match = current === tag; + } + this.seek(0); + return match && type ? this._cast(value, type, tag) : value; + } + + attribute(tag, type) { + const value = this.value(tag); + return new svp.AttrDef(value, type); + } + + _cast(value, type, tag) { + switch (type) { + case 'string': { + if (value instanceof Uint8Array) { + svp.BinaryReader._decoder = svp.BinaryReader._decoder || new TextDecoder('utf-8'); + return svp.BinaryReader._decoder.decode(value).replace(/\0.*$/g, ''); + } + throw new om.Error(`Invalid 'string' tag '${tag.toString(16)}'.`); + } + case 'uint32[]': { + const reader = new base.BinaryReader(value); + value = []; + while (reader.position < reader.length) { + value.push(reader.uint32()); + } + return value; + } + default: { + return value; + } + } + } +}; + +svp.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading DaVinci SVP model.'; + } +}; + +export const ModelFactory = om.ModelFactory; diff --git a/onednn-metadata.json b/onednn-metadata.json new file mode 100644 index 00000000000..d0846d1b76f --- /dev/null +++ b/onednn-metadata.json @@ -0,0 +1,414 @@ +[ + { + "name": "End" + }, + { + "name": "Wildcard" + }, + { + "name": "StaticReshape", + "category": "Shape" + }, + { + "name": "AvgPoolBackward", + "category": "Pool" + }, + { + "name": "AvgPool", + "category": "Pool" + }, + { + "name": "MaxPoolBackward", + "category": "Pool" + }, + { + "name": "MaxPool", + "category": "Pool" + }, + { + "name": "LayerNormBackward", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "diff_dst" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "diff_src" }, + { "name": "diff_gamma" }, + { "name": "diff_beta" } + ] + }, + { + "name": "BatchNormInference", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "BatchNormTrainingBackward", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "diff_dst" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "gamma" } + ], + "outputs": [ + { "name": "diff_src" }, + { "name": "diff_gamma" }, + { "name": "diff_beta" } + ] + }, + { + "name": "BatchNormForwardTraining", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "output" }, + { "name": "running_mean" }, + { "name": "running_variance" }, + { "name": "batch_mean" }, + { "name": "batch_variance" } + ] + }, + { + "name": "LayerNorm", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" } + ], + "outputs": [ + { "name": "dst" }, + { "name": "mean" }, + { "name": "variance" } + ] + }, + { + "name": "DynamicDequantize", + "category": "Quantization" + }, + { + "name": "DynamicQuantize", + "category": "Quantization" + }, + { + "name": "Dequantize", + "category": "Quantization" + }, + { + "name": "Quantize", + "category": "Quantization" + }, + { + "name": "TypeCast", + "category": "Quantization" + }, + { + "name": "ReLU", + "category": "Activation" + }, + { + "name": "Sigmoid", + "category": "Activation" + }, + { + "name": "SoftMaxBackward", + "category": "Activation" + }, + { + "name": "MishBackward", + "category": "Activation" + }, + { + "name": "GELU", + "category": "Activation" + }, + { + "name": "ClampBackward", + "category": "Activation" + }, + { + "name": "Mish", + "category": "Activation" + }, + { + "name": "PReLU", + "category": "Activation" + }, + { + "name": "SigmoidBackward", + "category": "Activation" + }, + { + "name": "LogSoftmax", + "category": "Activation" + }, + { + "name": "Clamp", + "category": "Activation" + }, + { + "name": "TanhBackward", + "category": "Activation" + }, + { + "name": "ReLUBackward", + "category": "Activation" + }, + { + "name": "Exp", + "category": "Activation" + }, + { + "name": "SoftMax", + "category": "Activation" + }, + { + "name": "HardSwish", + "category": "Activation" + }, + { + "name": "LeakyReLU", + "category": "Activation" + }, + { + "name": "LogSoftmaxBackward", + "category": "Activation" + }, + { + "name": "Tanh", + "category": "Activation" + }, + { + "name": "Elu", + "category": "Activation" + }, + { + "name": "SoftPlus", + "category": "Activation" + }, + { + "name": "EluBackward", + "category": "Activation" + }, + { + "name": "GELUBackward", + "category": "Activation" + }, + { + "name": "SoftPlusBackward", + "category": "Activation" + }, + { + "name": "PReLUBackward", + "category": "Activation" + }, + { + "name": "HardSwishBackward", + "category": "Activation" + }, + { + "name": "Interpolate" + }, + { + "name": "InterpolateBackward" + }, + { + "name": "ReduceMax" + }, + { + "name": "ReduceL2" + }, + { + "name": "ReduceL1" + }, + { + "name": "ReduceSum" + }, + { + "name": "ReduceProd" + }, + { + "name": "ReduceMean" + }, + { + "name": "ReduceMin" + }, + { + "name": "Reorder", + "category": "Transform" + }, + { + "name": "Concat", + "category": "Tensor" + }, + { + "name": "StaticTranspose", + "category": "Transform" + }, + { + "name": "Abs" + }, + { + "name": "Sqrt" + }, + { + "name": "Round" + }, + { + "name": "Subtract" + }, + { + "name": "Erf" + }, + { + "name": "Minimum" + }, + { + "name": "Reciprocal" + }, + { + "name": "BiasAddBackward" + }, + { + "name": "Divide" + }, + { + "name": "SquaredDifference" + }, + { + "name": "Multiply" + }, + { + "name": "AbsBackward" + }, + { + "name": "Square" + }, + { + "name": "Maximum" + }, + { + "name": "Add" + }, + { + "name": "SqrtBackward" + }, + { + "name": "Log" + }, + { + "name": "BiasAdd", + "inputs": [ + { "name": "input" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvTranspose", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Convolution", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvTransposeBackwardData", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvolutionBackwardData", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "dst_shape" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvolutionBackwardWeights", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "diff_dst" }, + { "name": "weights_shape" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConvTransposeBackwardWeights", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "diff_dst" }, + { "name": "weights_shape" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MatMul", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + } +] \ No newline at end of file diff --git a/onednn.js b/onednn.js new file mode 100644 index 00000000000..f4076a9d42b --- /dev/null +++ b/onednn.js @@ -0,0 +1,409 @@ + +const onednn = {}; + +onednn.ModelFactory = class { + + match(context) { + const obj = context.peek('json'); + if (obj && obj.version && obj.engine_kind && obj.fpmath_mode && obj.graph) { + return obj; + } + return null; + } + + async open(context, target) { + const metadata = await context.metadata('onednn-metadata.json'); + return new onednn.Model(metadata, target); + } +}; + +onednn.Model = class { + + constructor(metadata, symbol) { + const version = symbol.version; + this._format = `oneDNN Graph${version ? ` v${version}` : ''}`; + this._runtime = `${symbol.engine_kind} ${symbol.fpmath_mode}`; + this._graphs = [ new onednn.Graph(metadata, symbol) ]; + } + + get format() { + return this._format; + } + + get version() { + return this._version; + } + + get runtime() { + return this._runtime; + } + + get graphs() { + return this._graphs; + } +}; + +onednn.Graph = class { + + constructor(metadata, symbol) { + this._metadata = metadata; + this._nodes = []; + this._inputs = []; + this._outputs = []; + const nodes = []; + const tensors = new Set(); + for (const node of symbol.graph) { + if (node.kind == 'Wildcard' && node.inputs.length == 0) { + for (const output of node.outputs) { + tensors.add(output.id); + } + } else { + nodes.push(node); + } + } + const values = new Map(); + const value = (obj) => { + const id = obj.id; + const shape = !obj.shape || (obj.shape.length === 1 && obj.shape[0] === -1) ? null : new onednn.TensorShape(obj.shape); + const type = new onednn.TensorType(obj.dtype, shape); + const tensor = tensors.has(id) ? new onednn.Tensor(type, obj.property_type) : null; + if (!values.has(id)) { + values.set(id, new onednn.Value(id.toString(), type, tensor)); + } else if ((type && !type.equals(values.get(id).type)) || (tensor && !tensor.equals(values.get(id).initializer))) { + throw new onednn.Error(`Duplicate value '${id}'.`); + } + return values.get(id); + }; + for (const node of nodes) { + for (const input of node.inputs) { + value(input); + } + for (const output of node.outputs) { + value(output); + } + } + const engine = symbol.engine_kind; + for (const node of nodes) { + this._nodes.push(new onednn.Node(this._metadata, node, engine, value, tensors)); + } + const inputs = symbol.input_ports || []; + for (let i = 0; i < inputs.length; i++) { + const id = inputs[i]; + const value = values.get(id); + if (value) { + this._inputs.push(new onednn.Argument(id.toString(), [ value ])); + } + } + const outputs = symbol.output_ports || []; + for (let i = 0; i < outputs.length; i++) { + const id = outputs[i]; + const value = values.get(id); + if (value) { + this._outputs.push(new onednn.Argument(id.toString(), [ value ])); + } + } + } + + get name() { + return ''; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +onednn.Node = class { + + constructor(metadata, node, device, value) { + this._name = node.name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._type = metadata.type(node.kind) || { name: node.kind }; + this._device = device; + this._location = node.id; + const attrs = node.attrs; + if (attrs) { + for (const [name, value] of Object.entries(attrs)) { + this._attributes.push(new onednn.Attribute(name, value.type, value.value)); + } + } + const inputs = node.inputs || []; + for (let i = 0; i < inputs.length; i++) { + let name = inputs.length === 1 ? 'input' : i.toString(); + if (this._type && this._type.inputs && this._type.inputs.length > 0) { + name = this._type.inputs[i].name; + } + this._inputs.push(new onednn.Argument(name, [ value(inputs[i]) ])); + } + const outputs = node.outputs || []; + for (let i = 0; i < outputs.length; i++) { + let name = outputs.length === 1 ? 'output' : i.toString(); + if (this._type && this._type.outputs && this._type.outputs.length > 0) { + name = this._type.outputs[i].name; + } + this._outputs.push(new onednn.Argument(name, [ value(outputs[i]) ])); + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get location() { + return this._location; + } + + get device() { + return this._device; + } +}; + +onednn.Attribute = class { + + constructor(name, type, value) { + this._name = name; + this._value = value; + let number; + switch (type) { + case 'bool': + this._type = 'boolean'; + switch (value) { + case 1: this._value = true; break; + case 0: this._value = false; break; + default: throw new onednn.Error(`Unsupported attribute boolean value '${value}'.`); + } + break; + case 's64': + this._type = 'int64'; + number = Number.parseInt(this._value, 10); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 's64[]': + this._type = 'int64[]'; + if (this._value.length > 2 && this._value.toString().startsWith('[') && this._value.toString().endsWith(']')) { + let array = []; + const items = this._value.substring(1, this._value.length - 1).split(',') + .map((item) => item.trim()) + .map((item) => item.endsWith('L') ? item.substring(0, item.length - 1) : item); + for (const item of items) { + number = Number.parseInt(item, 10); + if (Number.isNaN(item - number)) { + array = null; + } else if (array != null) { + array.push(number); + } + } + if (array != null) { + this._value = array; + } + } + break; + case 'f32': + this._type = 'float32'; + number = Number.parseFloat(this._value); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 'f32[]': + this._type = 'float32[]'; + if (this._value.length > 2 && this._value.toString().startsWith('[') && this._value.toString().endsWith(']')) { + let array = []; + const items = this._value.substring(1, this._value.length - 1).split(',') + .map((item) => item.trim()) + .map((item) => item.endsWith('L') ? item.substring(0, item.length - 1) : item); + for (const item of items) { + number = Number.parseFloat(item); + if (Number.isNaN(item - number)) { + array = null; + } else if (array != null) { + array.push(number); + } + } + if (array != null) { + this._value = array; + } + } + break; + case 'string': + this._type = 'string'; + break; + default: { + throw new onednn.Error(`Unsupported attribute array data type '${type}'.`); + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +onednn.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +onednn.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new onednn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +onednn.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case 'f8_e4m3': this._dataType = 'float8e4m3'; break; + case 'f8_e5m2': this._dataType = 'float8e5m2'; break; + case 'f16': this._dataType = 'float16'; break; + case 'f32': this._dataType = 'float32'; break; + case 's8': this._dataType = 'int8'; break; + case 's32': this._dataType = 'int32'; break; + case 'u8': this._dataType = 'uint8'; break; + case 'bf16': this._dataType = 'bfloat16'; break; + case 'boolean': this._dataType = 'boolean'; break; + case 'undef': this._dataType = '?'; break; + default: throw new onednn.Error(`Unsupported tensor data type '${dataType}'.`); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + equals(obj) { + return obj && this._dataType === obj.dataType && + ((this._shape && this._shape.equals(obj.shape)) || (this._shape === null && obj.shape === null)); + } + + toString() { + return this._dataType + (this._shape ? this._shape.toString() : '[?]'); + } +}; + +onednn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && + Array.isArray(this._dimensions) && this._dimensions.length === obj.dimensions.length + && obj.dimensions.every((value, index) => this._dimensions[index] === value); + } + + toString() { + return this._dimensions ? (`[${this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : ''; + } +}; + +onednn.Tensor = class { + + constructor(type, property_type) { + this._type = type; + this._category = property_type; + } + + get type() { + return this._type; + } + + get category() { + return this._category; + } + + equals(obj) { + return obj && this._type.equals(obj.type) && this.category === obj.category; + } +}; + +onednn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading oneDNN Graph model.'; + } +}; + +export const ModelFactory = onednn.ModelFactory; + diff --git a/onnx-metadata.json b/onnx-metadata.json new file mode 100644 index 00000000000..411ad7fb16f --- /dev/null +++ b/onnx-metadata.json @@ -0,0 +1,42594 @@ +[ + { + "name": "Abs", + "module": "ai.onnx", + "version": 1, + "description": "Absolute takes one input data (Tensor) and produces one output data\n(Tensor) where the absolute is, y = abs(x), is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "abs", + "code": "node = onnx.helper.make_node(\n \"Abs\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = abs(x)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_abs\")" + } + ] + }, + { + "name": "Abs", + "module": "ai.onnx", + "version": 6, + "description": "Absolute takes one input data (Tensor) and produces one output data\n(Tensor) where the absolute is, y = abs(x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "abs", + "code": "node = onnx.helper.make_node(\n \"Abs\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = abs(x)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_abs\")" + } + ] + }, + { + "name": "Abs", + "module": "ai.onnx", + "version": 13, + "description": "Absolute takes one input data (Tensor) and produces one output data\n(Tensor) where absolute value, y = abs(x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "abs", + "code": "node = onnx.helper.make_node(\n \"Abs\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = abs(x)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_abs\")" + } + ] + }, + { + "name": "Acos", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The arccosine of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "acos", + "code": "node = onnx.helper.make_node(\n \"Acos\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arccos(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_acos_example\")\n\nx = np.random.rand(3, 4, 5).astype(np.float32)\ny = np.arccos(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_acos\")" + } + ] + }, + { + "name": "Acosh", + "module": "ai.onnx", + "version": 9, + "description": "Calculates the hyperbolic arccosine of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic arccosine values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "acosh", + "code": "node = onnx.helper.make_node(\n \"Acosh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([10, np.e, 1]).astype(np.float32)\ny = np.arccosh(x) # expected output [2.99322295, 1.65745449, 0.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_acosh_example\")\n\nx = np.random.uniform(1.0, 10.0, (3, 4, 5)).astype(np.float32)\ny = np.arccosh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_acosh\")" + } + ] + }, + { + "name": "Adagrad", + "module": "ai.onnx.preview.training", + "version": 1, + "description": "Compute one iteration of ADAGRAD, a stochastic gradient based optimization\n algorithm. This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. As you can imagine, ADAGRAD requires\n some parameters:\n\n - The initial learning-rate \"R\".\n - The update count \"T\". That is, the number of training iterations conducted.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A learning-rate decay factor \"decay_factor\".\n - A small constant \"epsilon\" to avoid dividing-by-zero.\n\n At each ADAGRAD iteration, the optimized tensors are moved along a direction\n computed based on their estimated gradient and accumulated squared gradient. Assume\n that only a single tensor \"X\" is updated by this operator. We need the value of \"X\",\n its gradient \"G\", and its accumulated squared gradient \"H\". Therefore, variables in\n this operator's input list are sequentially \"R\", \"T\", \"X\", \"G\", and \"H\". Other\n parameters are given as attributes because they are usually constants. Also, the\n corresponding output tensors are the new value of \"X\" (called \"X_new\"), and then\n the new accumulated squared gradient (called \"H_new\"). Those outputs are computed\n from the given inputs following the pseudo code below.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise arithmetic operations with\n numpy-style broadcasting support. The pseudo code to compute those outputs is:\n\n // Compute a scalar learning-rate factor. At the first update of X, T is generally\n // 0 (0-based update index) or 1 (1-based update index).\n r = R / (1 + T * decay_factor);\n\n // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm.\n G_regularized = norm_coefficient * X + G;\n\n // Compute new accumulated squared gradient.\n H_new = H + G_regularized * G_regularized;\n\n // Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(...)\n // computes element-wise square-root.\n H_adaptive = Sqrt(H_new) + epsilon\n\n // Compute the new value of \"X\".\n X_new = X - r * G_regularized / H_adaptive;\n\n If one assign this operators to optimize multiple inputs, for example, \"X_1\" and \"X_2\", the same\n pseudo code may be extended to handle all tensors jointly. More specifically, we can view \"X\" as a\n concatenation of \"X_1\" and \"X_2\" (of course, their gradient and accumulate gradient should\n be concatenated too) and then just reuse the entire pseudo code.\n\n Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.\n In that reference paper, this operator is a special case of the Figure 1's composite mirror\n descent update.\n", + "attributes": [ + { + "name": "decay_factor", + "type": "float32", + "required": false, + "description": "The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn't reduce the learning rate." + }, + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999974752427e-07, + "description": "Small scalar to avoid dividing by zero." + }, + { + "name": "norm_coefficient", + "type": "float32", + "required": false, + "description": "Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization." + } + ], + "inputs": [ + { + "name": "R", + "type": "T1", + "description": "The initial learning rate." + }, + { + "name": "T", + "type": "T2", + "description": "The update count of \"X\". It should be a scalar." + }, + { + "name": "inputs", + "type": "T3", + "list": true, + "description": "The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor \"X_1\" and \"X_2\" are optimized, The input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", accumulated squared gradient of \"X_1\", accumulated squared gradient of \"X_2\"]." + } + ], + "min_input": 3, + "max_input": 2147483647, + "outputs": [ + { + "name": "outputs", + "type": "T3", + "list": true, + "description": "Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor \"X_1\" and \"X_2\" are optimized, the output list would be [new value of \"X_1,\" new value of \"X_2\" new accumulated squared gradient of \"X_1\", new accumulated squared gradient of \"X_2\"]." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "3 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to float scalars.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "adagrad", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nepsilon = 1e-5\ndecay_factor = 0.1\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"Adagrad\",\n inputs=[\"R\", \"T\", \"X\", \"G\", \"H\"],\n outputs=[\"X_new\", \"H_new\"],\n norm_coefficient=norm_coefficient,\n epsilon=epsilon,\n decay_factor=decay_factor,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.0], dtype=np.float32)\ng = np.array([-1.0], dtype=np.float32)\nh = np.array([2.0], dtype=np.float32)\n\n# Compute expected outputs of Adagrad.\nx_new, h_new = apply_adagrad(\n r, t, x, g, h, norm_coefficient, epsilon, decay_factor\n)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x, g, h],\n outputs=[x_new, h_new],\n name=\"test_adagrad\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + }, + { + "summary": "adagrad_multiple", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nepsilon = 1e-5\ndecay_factor = 0.1\n\nnode = onnx.helper.make_node(\n \"Adagrad\",\n inputs=[\"R\", \"T\", \"X1\", \"X2\", \"G1\", \"G2\", \"H1\", \"H2\"],\n outputs=[\"X1_new\", \"X2_new\", \"H1_new\", \"H2_new\"],\n norm_coefficient=norm_coefficient,\n epsilon=epsilon,\n decay_factor=decay_factor,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nh1 = np.array([2.0], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nh2 = np.array([4.0, 1.0], dtype=np.float32)\n\n# Compute expected outputs of Adagrad.\nx1_new, h1_new = apply_adagrad(\n r, t, x1, g1, h1, norm_coefficient, epsilon, decay_factor\n)\nx2_new, h2_new = apply_adagrad(\n r, t, x2, g2, h2, norm_coefficient, epsilon, decay_factor\n)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x1, x2, g1, g2, h1, h2],\n outputs=[x1_new, x2_new, h1_new, h2_new],\n name=\"test_adagrad_multiple\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + } + ] + }, + { + "name": "Adam", + "module": "ai.onnx.preview.training", + "version": 1, + "description": "Compute one iteration of Adam, a stochastic gradient based optimization\n algorithm. This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. First of all, Adam requires\n some parameters:\n\n - The learning-rate \"R\".\n - The update count \"T\". That is, the number of training iterations conducted.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A small constant \"epsilon\" to avoid dividing-by-zero.\n - Two coefficients, \"alpha\" and \"beta\".\n\n At each Adam iteration, the optimized tensors are moved along a direction\n computed based on their exponentially-averaged historical gradient and\n exponentially-averaged historical squared gradient. Assume that only a tensor\n \"X\" is being optimized. The rest of required information is\n\n - the value of \"X\",\n - \"X\"'s gradient (denoted by \"G\"),\n - \"X\"'s exponentially-averaged historical gradient (denoted by \"V\"), and\n - \"X\"'s exponentially-averaged historical squared gradient (denoted by \"H\").\n\n Some of those parameters are passed into this operator as input tensors and others\n are stored as this operator's attributes. Specifically, this operator's input tensor\n list is [\"R\", \"T\", \"X\", \"G\", \"V\", \"H\"]. That is, \"R\" is the first input, \"T\" is\n the second input, and so on. Other parameters are given as attributes because they\n are constants. Moreover, the corresponding output tensors are\n\n - the new value of \"X\" (called \"X_new\"),\n - the new exponentially-averaged historical gradient (denoted by \"V_new\"), and\n - the new exponentially-averaged historical squared gradient (denoted by \"H_new\").\n\n Those outputs are computed following the pseudo code below.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise arithmetic operations with\n numpy-style broadcasting support. The pseudo code to compute those outputs is:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm.\n G_regularized = norm_coefficient * X + G\n\n // Update exponentially-averaged historical gradient.\n V_new = alpha * V + (1 - alpha) * G_regularized\n\n // Update exponentially-averaged historical squared gradient.\n H_new = beta * H + (1 - beta) * G_regularized * G_regularized\n\n // Compute the element-wise square-root of H_new. V_new will be element-wisely\n // divided by H_sqrt for a better update direction.\n H_sqrt = Sqrt(H_new) + epsilon\n\n // Compute learning-rate. Note that \"alpha**T\"/\"beta**T\" is alpha's/beta's T-th power.\n R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R\n\n // Compute new value of \"X\".\n X_new = X - R_adjusted * V_new / H_sqrt\n\n // Post-update regularization.\n X_final = (1 - norm_coefficient_post) * X_new\n\n If there are multiple inputs to be optimized, the pseudo code will be applied\n independently to each of them.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Coefficient of previously accumulated gradient in running average. Default to 0.9." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 0.9990000128746033, + "description": "Coefficient of previously accumulated squared-gradient in running average. Default to 0.999." + }, + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999974752427e-07, + "description": "Small scalar to avoid dividing by zero." + }, + { + "name": "norm_coefficient", + "type": "float32", + "required": false, + "description": "Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization." + }, + { + "name": "norm_coefficient_post", + "type": "float32", + "required": false, + "description": "Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization." + } + ], + "inputs": [ + { + "name": "R", + "type": "T1", + "description": "The initial learning rate." + }, + { + "name": "T", + "type": "T2", + "description": "The update count of \"X\". It should be a scalar." + }, + { + "name": "inputs", + "type": "T3", + "list": true, + "description": "The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors \"X_1\" and \"X_2,\", the input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", accumulated gradient of \"X_1\", accumulated gradient of \"X_2\", accumulated squared gradient of \"X_1\", accumulated squared gradient of \"X_2\"]." + } + ], + "min_input": 3, + "max_input": 2147483647, + "outputs": [ + { + "name": "outputs", + "type": "T3", + "list": true, + "description": "New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors \"X_1\" and \"X_2\" are optimized, the outputs list would be [new value of \"X_1\", new value of \"X_2\", new accumulated gradient of \"X_1\", new accumulated gradient of \"X_2\", new accumulated squared gradient of \"X_1\", new accumulated squared gradient of \"X_2\"]." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "3 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to float scalars.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "adam", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.1\nepsilon = 1e-7\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"Adam\",\n inputs=[\"R\", \"T\", \"X\", \"G\", \"V\", \"H\"],\n outputs=[\"X_new\", \"V_new\", \"H_new\"],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n epsilon=epsilon,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\nh = np.array([0.1, 0.1], dtype=np.float32)\n\n# Compute expected outputs of Adam.\nx_new, v_new, h_new = apply_adam(\n r, t, x, g, v, h, norm_coefficient, 0.0, alpha, beta, epsilon\n)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x, g, v, h],\n outputs=[x_new, v_new, h_new],\n name=\"test_adam\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + }, + { + "summary": "adam_multiple", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.85\nepsilon = 1e-2\n\nnode = onnx.helper.make_node(\n \"Adam\",\n inputs=[\"R\", \"T\", \"X1\", \"X2\", \"G1\", \"G2\", \"V1\", \"V2\", \"H1\", \"H2\"],\n outputs=[\"X1_new\", \"X2_new\", \"V1_new\", \"V2_new\", \"H1_new\", \"H2_new\"],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nv1 = np.array([2.0], dtype=np.float32)\nh1 = np.array([0.5], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nv2 = np.array([4.0, 1.0], dtype=np.float32)\nh2 = np.array([1.0, 10.0], dtype=np.float32)\n\n# Compute expected outputs of Adam.\nx1_new, v1_new, h1_new = apply_adam(\n r, t, x1, g1, v1, h1, norm_coefficient, 0.0, alpha, beta, epsilon\n)\nx2_new, v2_new, h2_new = apply_adam(\n r, t, x2, g2, v2, h2, norm_coefficient, 0.0, alpha, beta, epsilon\n)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x1, x2, g1, g2, v1, v2, h1, h2],\n outputs=[x1_new, x2_new, v1_new, v2_new, h1_new, h2_new],\n name=\"test_adam_multiple\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + } + ] + }, + { + "name": "Add", + "module": "ai.onnx", + "version": 1, + "description": "Performs element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "add", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add\")" + }, + { + "summary": "add_broadcast", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_bcast\")" + }, + { + "summary": "add_uint8", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_uint8\")" + } + ] + }, + { + "name": "Add", + "module": "ai.onnx", + "version": 6, + "description": "Performs element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "add", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add\")" + }, + { + "summary": "add_broadcast", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_bcast\")" + }, + { + "summary": "add_uint8", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_uint8\")" + } + ] + }, + { + "name": "Add", + "module": "ai.onnx", + "version": 7, + "description": "Performs element-wise binary addition (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "add", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add\")" + }, + { + "summary": "add_broadcast", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_bcast\")" + }, + { + "summary": "add_uint8", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_uint8\")" + } + ] + }, + { + "name": "Add", + "module": "ai.onnx", + "version": 13, + "description": "Performs element-wise binary addition (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "add", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add\")" + }, + { + "summary": "add_broadcast", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_bcast\")" + }, + { + "summary": "add_uint8", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_uint8\")" + } + ] + }, + { + "name": "Add", + "module": "ai.onnx", + "version": 14, + "description": "Performs element-wise binary addition (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n\n(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "add", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add\")" + }, + { + "summary": "add_broadcast", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_bcast\")" + }, + { + "summary": "add_uint8", + "code": "node = onnx.helper.make_node(\n \"Add\",\n inputs=[\"x\", \"y\"],\n outputs=[\"sum\"],\n)\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nexpect(node, inputs=[x, y], outputs=[x + y], name=\"test_add_uint8\")" + } + ] + }, + { + "name": "AffineGrid", + "module": "ai.onnx", + "version": 20, + "description": "Generates a 2D or 3D flow field (sampling grid), given a batch of affine matrices theta\n(https://pytorch.org/docs/stable/generated/torch.nn.functional.affine_grid.html).\nAn affine matrix `theta` is applied to a position tensor represented in its homogeneous expression. Here is an example in 3D:\n```\n[r00, r01, r02, t0] [x] [x']\n[r10, r11, r12, t1] * [y] = [y']\n[r20, r21, r22, t2] [z] [z']\n[0, 0, 0, 1 ] [1] [1 ]\n```\nwhere `(x, y, z)` is the position in the original space, `(x', y', z')` is the position in the output space.\nThe last row is always `[0, 0, 0, 1]` and is not stored in the affine matrix. Therefore we have `theta` of shape `(N, 2, 3)` for 2D or `(N, 3, 4)` for 3D.\n\nInput `size` is used to define grid of positions evenly spaced in the original 2D or 3D space, with dimensions ranging from `-1` to `1`.\nThe output `grid` contains positions in the output space.\n\nWhen `align_corners=1`, consider `-1` and `1` to refer to the centers of the corner pixels (mark `v` in illustration).\n```\nv v v v\n|-------------------|------------------|\n-1 0 1\n```\nWhen `align_corners=0`, consider `-1` and `1` to refer to the outer edge of the corner pixels.\n```\n v v v v\n|------------------|-------------------|\n-1 0 1\n```\n", + "attributes": [ + { + "name": "align_corners", + "type": "int64", + "required": false, + "description": "if align_corners=1, consider -1 and 1 to refer to the centers of the corner pixels. if align_corners=0, consider -1 and 1 to refer to the outer edge the corner pixels." + } + ], + "inputs": [ + { + "name": "theta", + "type": "T1", + "description": "input batch of affine matrices with shape (N, 2, 3) for 2D or (N, 3, 4) for 3D" + }, + { + "name": "size", + "type": "T2", + "description": "the target output image size (N, C, H, W) for 2D or (N, C, D, H, W) for 3D" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "grid", + "type": "T1", + "description": "output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, H, W, 3) of 3D sample coordinates." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain grid types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain size's type to int64 tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "2d_no_reference_evaluator", + "code": "theta_2d = create_theta_2d()\nN, C, H, W = len(theta_2d), 3, 5, 6\ndata_size = (H, W)\nfor align_corners in (0, 1):\n node = onnx.helper.make_node(\n \"AffineGrid\",\n inputs=[\"theta\", \"size\"],\n outputs=[\"grid\"],\n align_corners=align_corners,\n )\n\n original_grid = construct_original_grid(data_size, align_corners)\n grid = apply_affine_transform(theta_2d, original_grid)\n\n test_name = \"test_affine_grid_2d\"\n if align_corners == 1:\n test_name += \"_align_corners\"\n expect(\n node,\n inputs=[theta_2d, np.array([N, C, H, W], dtype=np.int64)],\n outputs=[grid],\n name=test_name,\n )" + }, + { + "summary": "3d_no_reference_evaluator", + "code": "theta_3d = create_theta_3d()\nN, C, D, H, W = len(theta_3d), 3, 4, 5, 6\ndata_size = (D, H, W)\nfor align_corners in (0, 1):\n node = onnx.helper.make_node(\n \"AffineGrid\",\n inputs=[\"theta\", \"size\"],\n outputs=[\"grid\"],\n align_corners=align_corners,\n )\n\n original_grid = construct_original_grid(data_size, align_corners)\n grid = apply_affine_transform(theta_3d, original_grid)\n\n test_name = \"test_affine_grid_3d\"\n if align_corners == 1:\n test_name += \"_align_corners\"\n expect(\n node,\n inputs=[theta_3d, np.array([N, C, D, H, W], dtype=np.int64)],\n outputs=[grid],\n name=test_name,\n )" + } + ] + }, + { + "name": "And", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `and` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "and", + "code": "node = onnx.helper.make_node(\n \"And\",\n inputs=[\"x\", \"y\"],\n outputs=[\"and\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and4d\")" + }, + { + "summary": "and_broadcast", + "code": "node = onnx.helper.make_node(\n \"And\",\n inputs=[\"x\", \"y\"],\n outputs=[\"and\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v4d\")" + } + ] + }, + { + "name": "And", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `and` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "and", + "code": "node = onnx.helper.make_node(\n \"And\",\n inputs=[\"x\", \"y\"],\n outputs=[\"and\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and4d\")" + }, + { + "summary": "and_broadcast", + "code": "node = onnx.helper.make_node(\n \"And\",\n inputs=[\"x\", \"y\"],\n outputs=[\"and\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_and_bcast4v4d\")" + } + ] + }, + { + "name": "ArgMax", + "module": "ai.onnx", + "version": 1, + "description": "Computes the indices of the max elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulted tensor have the reduced dimension pruned.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [0, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [1, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMax", + "module": "ai.onnx", + "version": 11, + "description": "Computes the indices of the max elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulting tensor has the reduced dimension pruned.\nThe input tensor must not be empty.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [0, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [1, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMax", + "module": "ai.onnx", + "version": 12, + "description": "Computes the indices of the max elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulting tensor has the reduced dimension pruned.\nIf select_last_index is True (default False), the index of the last occurrence of the max\nis selected if the max appears more than once in the input. Otherwise the index of the\nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "select_last_index", + "type": "int64", + "required": false, + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [0, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [1, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMax", + "module": "ai.onnx", + "version": 13, + "description": "Computes the indices of the max elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equals 0, then the resulting tensor has the reduced dimension pruned.\nIf select_last_index is True (default False), the index of the last occurrence of the max\nis selected if the max appears more than once in the input. Otherwise the index of the\nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "select_last_index", + "type": "int64", + "required": false, + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# result: [0, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmax_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMax\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [1, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmax_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMin", + "module": "ai.onnx", + "version": 1, + "description": "Computes the indices of the min elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulted tensor have the reduced dimension pruned.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMin", + "module": "ai.onnx", + "version": 11, + "description": "Computes the indices of the min elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulting tensor has the reduced dimension pruned.\nThe input tensor must not be empty.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMin", + "module": "ai.onnx", + "version": 12, + "description": "Computes the indices of the min elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equal 0, then the resulting tensor has the reduced dimension pruned.\nIf select_last_index is True (default False), the index of the last occurrence of the min\nis selected if the min appears more than once in the input. Otherwise the index of the\nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "select_last_index", + "type": "int64", + "required": false, + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArgMin", + "module": "ai.onnx", + "version": 13, + "description": "Computes the indices of the min elements of the input tensor's element along the\nprovided axis. The resulting tensor has the same rank as the input if keepdims equals 1.\nIf keepdims equals 0, then the resulting tensor has the reduced dimension pruned.\nIf select_last_index is True (default False), the index of the last occurrence of the min\nis selected if the min appears more than once in the input. Otherwise the index of the\nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "select_last_index", + "type": "int64", + "required": false, + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "tensor(int64)", + "description": "Reduced output tensor with integer data type." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], keepdims=keepdims\n)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random\",\n)" + }, + { + "summary": "default_axes_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n keepdims=keepdims,\n select_last_index=True,\n)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_default_axis_random_select_last_index\",\n)" + }, + { + "summary": "keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_example\"\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_keepdims_random\"\n)" + }, + { + "summary": "keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "negative_axis_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random\",\n)" + }, + { + "summary": "negative_axis_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_negative_axis_keepdims_random_select_last_index\",\n)" + }, + { + "summary": "no_keepdims", + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"data\"], outputs=[\"result\"], axis=axis, keepdims=keepdims\n)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(\n node, inputs=[data], outputs=[result], name=\"test_argmin_no_keepdims_random\"\n)" + }, + { + "summary": "no_keepdims_select_last_index", + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ArgMin\",\n inputs=[\"data\"],\n outputs=[\"result\"],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True,\n)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_example_select_last_index\",\n)\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(\n node,\n inputs=[data],\n outputs=[result],\n name=\"test_argmin_no_keepdims_random_select_last_index\",\n)" + } + ] + }, + { + "name": "ArrayFeatureExtractor", + "module": "ai.onnx.ml", + "version": 1, + "description": "Select elements of the input tensor based on the indices passed.
\n The indices are applied to the last axes of the tensor.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be selected" + }, + { + "name": "Y", + "type": "tensor(int64)", + "description": "The indices, based on 0 as the first index of any dimension." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Selected output data as an array" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type or string. The output will be of the same tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)", + "tensor(string)" + ] + } + ], + "examples": [ + { + "summary": "arrayfeatureextractor", + "code": "node = onnx.helper.make_node(\n \"ArrayFeatureExtractor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n domain=\"ai.onnx.ml\",\n)\n\nx = np.arange(12).reshape((3, 4)).astype(np.float32)\ny = np.array([0, 1], dtype=np.int64)\nz = np.array([[0, 4, 8], [1, 5, 9]], dtype=np.float32).T\nexpect(\n node,\n inputs=[x, y],\n outputs=[z],\n name=\"test_ai_onnx_ml_array_feature_extractor\",\n)" + } + ] + }, + { + "name": "Asin", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The arcsine of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "asin", + "code": "node = onnx.helper.make_node(\n \"Asin\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arcsin(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_asin_example\")\n\nx = np.random.rand(3, 4, 5).astype(np.float32)\ny = np.arcsin(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_asin\")" + } + ] + }, + { + "name": "Asinh", + "module": "ai.onnx", + "version": 9, + "description": "Calculates the hyperbolic arcsine of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic arcsine values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "asinh", + "code": "node = onnx.helper.make_node(\n \"Asinh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358]\nexpect(node, inputs=[x], outputs=[y], name=\"test_asinh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.arcsinh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_asinh\")" + } + ] + }, + { + "name": "Atan", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The arctangent of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "atan", + "code": "node = onnx.helper.make_node(\n \"Atan\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.arctan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_atan_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.arctan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_atan\")" + } + ] + }, + { + "name": "Atanh", + "module": "ai.onnx", + "version": 9, + "description": "Calculates the hyperbolic arctangent of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic arctangent values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "atanh", + "code": "node = onnx.helper.make_node(\n \"Atanh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]\nexpect(node, inputs=[x], outputs=[y], name=\"test_atanh_example\")\n\nx = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)\ny = np.arctanh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_atanh\")" + } + ] + }, + { + "name": "AveragePool", + "module": "ai.onnx", + "version": 1, + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements exclude pad.\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "averagepool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_1d_default\")" + }, + { + "summary": "averagepool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_ceil\")" + }, + { + "summary": "averagepool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_default\")" + }, + { + "summary": "averagepool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4]\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_dilations\")" + }, + { + "summary": "averagepool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=np.nan,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_pads\")" + }, + { + "summary": "averagepool_2d_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\ndilations = (1, 1)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n x_shape,\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads,\n count_include_pad=1,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_precomputed_pads\"\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_same_upper\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_strides\",\n)" + }, + { + "summary": "averagepool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_lower\")" + }, + { + "summary": "averagepool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_upper\")" + }, + { + "summary": "averagepool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_strides\")" + }, + { + "summary": "averagepool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_default\")" + }, + { + "summary": "averagepool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4, 4]\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_dilations_small\"\n)" + }, + { + "summary": "averagepool_3d_dilations_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\ncount_include_pad = 0\n\nfor count_include_pad in (0, 1):\n for ceil_mode in (True, False):\n node = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n x = np.random.randn(1, 1, *x_shape).astype(np.float32)\n out_shape, pads = get_output_shape_explicit_padding(\n None,\n x_shape,\n kernel_shape,\n strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n )\n padded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0 if count_include_pad == 1 else np.nan,\n )\n y = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads=pads,\n dilations=dilations,\n count_include_pad=count_include_pad,\n )\n\n test_name = f\"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}\"\n expect(node, inputs=[x], outputs=[y], name=test_name)" + } + ], + "category": "Pool" + }, + { + "name": "AveragePool", + "module": "ai.onnx", + "version": 7, + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "count_include_pad", + "type": "int64", + "required": false, + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "averagepool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_1d_default\")" + }, + { + "summary": "averagepool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_ceil\")" + }, + { + "summary": "averagepool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_default\")" + }, + { + "summary": "averagepool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4]\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_dilations\")" + }, + { + "summary": "averagepool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=np.nan,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_pads\")" + }, + { + "summary": "averagepool_2d_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\ndilations = (1, 1)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n x_shape,\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads,\n count_include_pad=1,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_precomputed_pads\"\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_same_upper\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_strides\",\n)" + }, + { + "summary": "averagepool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_lower\")" + }, + { + "summary": "averagepool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_upper\")" + }, + { + "summary": "averagepool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_strides\")" + }, + { + "summary": "averagepool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_default\")" + }, + { + "summary": "averagepool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4, 4]\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_dilations_small\"\n)" + }, + { + "summary": "averagepool_3d_dilations_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\ncount_include_pad = 0\n\nfor count_include_pad in (0, 1):\n for ceil_mode in (True, False):\n node = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n x = np.random.randn(1, 1, *x_shape).astype(np.float32)\n out_shape, pads = get_output_shape_explicit_padding(\n None,\n x_shape,\n kernel_shape,\n strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n )\n padded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0 if count_include_pad == 1 else np.nan,\n )\n y = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads=pads,\n dilations=dilations,\n count_include_pad=count_include_pad,\n )\n\n test_name = f\"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}\"\n expect(node, inputs=[x], outputs=[y], name=test_name)" + } + ], + "category": "Pool" + }, + { + "name": "AveragePool", + "module": "ai.onnx", + "version": 10, + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "count_include_pad", + "type": "int64", + "required": false, + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "averagepool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_1d_default\")" + }, + { + "summary": "averagepool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_ceil\")" + }, + { + "summary": "averagepool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_default\")" + }, + { + "summary": "averagepool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4]\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_dilations\")" + }, + { + "summary": "averagepool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=np.nan,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_pads\")" + }, + { + "summary": "averagepool_2d_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\ndilations = (1, 1)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n x_shape,\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads,\n count_include_pad=1,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_precomputed_pads\"\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_same_upper\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_strides\",\n)" + }, + { + "summary": "averagepool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_lower\")" + }, + { + "summary": "averagepool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_upper\")" + }, + { + "summary": "averagepool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_strides\")" + }, + { + "summary": "averagepool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_default\")" + }, + { + "summary": "averagepool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4, 4]\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_dilations_small\"\n)" + }, + { + "summary": "averagepool_3d_dilations_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\ncount_include_pad = 0\n\nfor count_include_pad in (0, 1):\n for ceil_mode in (True, False):\n node = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n x = np.random.randn(1, 1, *x_shape).astype(np.float32)\n out_shape, pads = get_output_shape_explicit_padding(\n None,\n x_shape,\n kernel_shape,\n strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n )\n padded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0 if count_include_pad == 1 else np.nan,\n )\n y = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads=pads,\n dilations=dilations,\n count_include_pad=count_include_pad,\n )\n\n test_name = f\"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}\"\n expect(node, inputs=[x], outputs=[y], name=test_name)" + } + ], + "category": "Pool" + }, + { + "name": "AveragePool", + "module": "ai.onnx", + "version": 11, + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\nor when ceil_mode is disabled:\n ```\n VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "count_include_pad", + "type": "int64", + "required": false, + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "averagepool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_1d_default\")" + }, + { + "summary": "averagepool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_ceil\")" + }, + { + "summary": "averagepool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_default\")" + }, + { + "summary": "averagepool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4]\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_dilations\")" + }, + { + "summary": "averagepool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=np.nan,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_pads\")" + }, + { + "summary": "averagepool_2d_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\ndilations = (1, 1)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n x_shape,\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads,\n count_include_pad=1,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_precomputed_pads\"\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_same_upper\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_strides\",\n)" + }, + { + "summary": "averagepool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_lower\")" + }, + { + "summary": "averagepool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_upper\")" + }, + { + "summary": "averagepool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_strides\")" + }, + { + "summary": "averagepool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_default\")" + }, + { + "summary": "averagepool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4, 4]\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_dilations_small\"\n)" + }, + { + "summary": "averagepool_3d_dilations_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\ncount_include_pad = 0\n\nfor count_include_pad in (0, 1):\n for ceil_mode in (True, False):\n node = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n x = np.random.randn(1, 1, *x_shape).astype(np.float32)\n out_shape, pads = get_output_shape_explicit_padding(\n None,\n x_shape,\n kernel_shape,\n strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n )\n padded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0 if count_include_pad == 1 else np.nan,\n )\n y = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads=pads,\n dilations=dilations,\n count_include_pad=count_include_pad,\n )\n\n test_name = f\"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}\"\n expect(node, inputs=[x], outputs=[y], name=test_name)" + } + ], + "category": "Pool" + }, + { + "name": "AveragePool", + "module": "ai.onnx", + "version": 19, + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape is calculated differently\n depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.\n With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D):\n ```\n VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "count_include_pad", + "type": "int64", + "required": false, + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "averagepool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_1d_default\")" + }, + { + "summary": "averagepool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_ceil\")" + }, + { + "summary": "averagepool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_default\")" + }, + { + "summary": "averagepool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4]\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_dilations\")" + }, + { + "summary": "averagepool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=np.nan,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_pads\")" + }, + { + "summary": "averagepool_2d_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\ndilations = (1, 1)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n x_shape,\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads,\n count_include_pad=1,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_precomputed_pads\"\n)" + }, + { + "summary": "averagepool_2d_precomputed_pads_count_include_pad", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_pads_count_include_pad\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_same_upper\",\n)" + }, + { + "summary": "averagepool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_averagepool_2d_precomputed_strides\",\n)" + }, + { + "summary": "averagepool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_lower\")" + }, + { + "summary": "averagepool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = (pad_top, pad_left, pad_bottom, pad_right)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_same_upper\")" + }, + { + "summary": "averagepool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape[2:], kernel_shape, strides, ceil_mode=False\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_2d_strides\")" + }, + { + "summary": "averagepool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"AVG\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_default\")" + }, + { + "summary": "averagepool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n ceil_mode=True,\n)\n\n# input shape: [1, 1, 4, 4, 4]\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_averagepool_3d_dilations_small\"\n)" + }, + { + "summary": "averagepool_3d_dilations_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\ncount_include_pad = 0\n\nfor count_include_pad in (0, 1):\n for ceil_mode in (True, False):\n node = onnx.helper.make_node(\n \"AveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n x = np.random.randn(1, 1, *x_shape).astype(np.float32)\n out_shape, pads = get_output_shape_explicit_padding(\n None,\n x_shape,\n kernel_shape,\n strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n )\n padded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0 if count_include_pad == 1 else np.nan,\n )\n y = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"AVG\",\n pads=pads,\n dilations=dilations,\n count_include_pad=count_include_pad,\n )\n\n test_name = f\"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}\"\n expect(node, inputs=[x], outputs=[y], name=test_name)" + } + ], + "category": "Pool" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 1, + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n ", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": true, + "description": "legacy optimization attribute." + }, + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f." + }, + { + "name": "is_test", + "type": "int64", + "required": false, + "description": "If set to nonzero, run spatial batch normalization in test mode, default is 0." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f." + }, + { + "name": "spatial", + "type": "int64", + "required": false, + "default": 1, + "description": "If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input 4-dimensional tensor of shape NCHW." + }, + { + "name": "scale", + "type": "T", + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output." + }, + { + "name": "B", + "type": "T", + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output." + }, + { + "name": "mean", + "type": "T", + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C." + }, + { + "name": "var", + "type": "T", + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output 4-dimensional tensor of the same shape as X." + }, + { + "name": "mean", + "type": "T", + "option": "optional", + "description": "The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing." + }, + { + "name": "var", + "type": "T", + "option": "optional", + "description": "The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing." + }, + { + "name": "saved_mean", + "type": "T", + "option": "optional", + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing." + }, + { + "name": "saved_var", + "type": "T", + "option": "optional", + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing." + } + ], + "min_output": 1, + "max_output": 5, + "outputs_range": "1 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 6, + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f." + }, + { + "name": "is_test", + "type": "int64", + "required": false, + "description": "If set to nonzero, run spatial batch normalization in test mode, default is 0." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f." + }, + { + "name": "spatial", + "type": "int64", + "required": false, + "default": 1, + "description": "If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + }, + { + "name": "scale", + "type": "T", + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output." + }, + { + "name": "B", + "type": "T", + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output." + }, + { + "name": "mean", + "type": "T", + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C." + }, + { + "name": "var", + "type": "T", + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as X." + }, + { + "name": "mean", + "type": "T", + "option": "optional", + "description": "The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing." + }, + { + "name": "var", + "type": "T", + "option": "optional", + "description": "The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing." + }, + { + "name": "saved_mean", + "type": "T", + "option": "optional", + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing." + }, + { + "name": "saved_var", + "type": "T", + "option": "optional", + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing." + } + ], + "min_output": 1, + "max_output": 5, + "outputs_range": "1 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 7, + "description": "Carries out batch normalization as described in the paper\n https://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\n there are multiple cases for the number of outputs, which we list below:\n\n Output case #1: Y, mean, var, saved_mean, saved_var (training mode)\n Output case #2: Y (test mode)\n This operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum)." + }, + { + "name": "spatial", + "type": "int64", + "required": false, + "default": 1, + "description": "If true, compute the mean and variance across per activation. If false, compute the mean and variance across per feature over each mini-batch." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + }, + { + "name": "scale", + "type": "T", + "description": "If spatial is true, the dimension of scale is (C). If spatial is false, the dimensions of scale are (C x D1 x ... x Dn)" + }, + { + "name": "B", + "type": "T", + "description": "If spatial is true, the dimension of bias is (C). If spatial is false, the dimensions of bias are (C x D1 x ... x Dn)" + }, + { + "name": "mean", + "type": "T", + "description": "If spatial is true, the dimension of the running mean (training) or the estimated mean (testing) is (C). If spatial is false, the dimensions of the running mean (training) or the estimated mean (testing) are (C x D1 x ... x Dn)." + }, + { + "name": "var", + "type": "T", + "description": "If spatial is true, the dimension of the running variance(training) or the estimated variance (testing) is (C). If spatial is false, the dimensions of the running variance(training) or the estimated variance (testing) are (C x D1 x ... x Dn)." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as X" + }, + { + "name": "mean", + "type": "T", + "option": "optional", + "description": "The running mean after the BatchNormalization operator." + }, + { + "name": "var", + "type": "T", + "option": "optional", + "description": "The running variance after the BatchNormalization operator." + }, + { + "name": "saved_mean", + "type": "T", + "option": "optional", + "description": "Saved mean used during training to speed up gradient computation." + }, + { + "name": "saved_var", + "type": "T", + "option": "optional", + "description": "Saved variance used during training to speed up gradient computation." + } + ], + "min_output": 1, + "max_output": 5, + "outputs_range": "1 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 9, + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum)." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1" + }, + { + "name": "scale", + "type": "T", + "description": "Scale tensor of shape (C)." + }, + { + "name": "B", + "type": "T", + "description": "Bias tensor of shape (C)." + }, + { + "name": "mean", + "type": "T", + "description": "running (training) or estimated (testing) mean tensor of shape (C)." + }, + { + "name": "var", + "type": "T", + "description": "running (training) or estimated (testing) variance tensor of shape (C)." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as X" + }, + { + "name": "mean", + "type": "T", + "option": "optional", + "description": "The running mean after the BatchNormalization operator." + }, + { + "name": "var", + "type": "T", + "option": "optional", + "description": "The running variance after the BatchNormalization operator." + }, + { + "name": "saved_mean", + "type": "T", + "option": "optional", + "description": "Saved mean used during training to speed up gradient computation." + }, + { + "name": "saved_var", + "type": "T", + "option": "optional", + "description": "Saved variance used during training to speed up gradient computation." + } + ], + "min_output": 1, + "max_output": 5, + "outputs_range": "1 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 14, + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nThere are five required inputs 'X', 'scale', 'B', 'input_mean' and\n'input_var'.\nNote that 'input_mean' and 'input_var' are expected to be the estimated\nstatistics in inference mode (training_mode=False, default),\nand the running statistics in training mode (training_mode=True).\nThere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, running_mean, running_var (training_mode=True)\nOutput case #2: Y (training_mode=False)\n\nWhen training_mode=False, extra outputs are invalid.\nThe outputs are updated as follows when training_mode=True:\n```\nrunning_mean = input_mean * momentum + current_mean * (1 - momentum)\nrunning_var = input_var * momentum + current_var * (1 - momentum)\n\nY = (X - current_mean) / sqrt(current_var + epsilon) * scale + B\n\nwhere:\n\ncurrent_mean = ReduceMean(X, axis=all_except_channel_index)\ncurrent_var = ReduceVar(X, axis=all_except_channel_index)\n\nNotice that ReduceVar refers to the population variance, and it equals to\nsum(sqrd(x_i - x_avg)) / N\nwhere N is the population size (this formula does not use sample size N - 1).\n\n```\n\nWhen training_mode=False:\n```\nY = (X - input_mean) / sqrt(input_var + epsilon) * scale + B\n```\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum)." + }, + { + "name": "training_mode", + "type": "int64", + "required": false, + "description": "If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1" + }, + { + "name": "scale", + "type": "T", + "description": "Scale tensor of shape (C)." + }, + { + "name": "B", + "type": "T", + "description": "Bias tensor of shape (C)." + }, + { + "name": "input_mean", + "type": "U", + "description": "running (training) or estimated (testing) mean tensor of shape (C)." + }, + { + "name": "input_var", + "type": "U", + "description": "running (training) or estimated (testing) variance tensor of shape (C)." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as X" + }, + { + "name": "running_mean", + "type": "U", + "option": "optional", + "description": "The running mean after the BatchNormalization operator." + }, + { + "name": "running_var", + "type": "U", + "option": "optional", + "description": "The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1." + } + ], + "min_output": 1, + "max_output": 3, + "outputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain mean and variance types to float tensors. It allows all float type for U.", + "type_param_str": "U", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "BatchNormalization", + "module": "ai.onnx", + "version": 15, + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nThere are five required inputs 'X', 'scale', 'B', 'input_mean' and\n'input_var'.\nNote that 'input_mean' and 'input_var' are expected to be the estimated\nstatistics in inference mode (training_mode=False, default),\nand the running statistics in training mode (training_mode=True).\nThere are multiple cases for the number of outputs, which we list below:\n\n* Output case #1: Y, running_mean, running_var (training_mode=True)\n* Output case #2: Y (training_mode=False)\n\nWhen training_mode=False, extra outputs are invalid.\nThe outputs are updated as follows when training_mode=True:\n```\nrunning_mean = input_mean * momentum + current_mean * (1 - momentum)\nrunning_var = input_var * momentum + current_var * (1 - momentum)\n\nY = (X - current_mean) / sqrt(current_var + epsilon) * scale + B\n```\nwhere:\n```\ncurrent_mean = ReduceMean(X, axis=all_except_channel_index)\ncurrent_var = ReduceVar(X, axis=all_except_channel_index)\n```\nNotice that `ReduceVar` refers to the population variance, and it equals to\n`sum(sqrd(x_i - x_avg)) / N`\nwhere `N` is the population size (this formula does not use sample size `N - 1`).\n\nThe computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.\n\nWhen training_mode=False:\n```\nY = (X - input_mean) / sqrt(input_var + epsilon) * scale + B\n```\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "momentum", + "type": "float32", + "required": false, + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum)." + }, + { + "name": "training_mode", + "type": "int64", + "required": false, + "description": "If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1" + }, + { + "name": "scale", + "type": "T1", + "description": "Scale tensor of shape (C)." + }, + { + "name": "B", + "type": "T1", + "description": "Bias tensor of shape (C)." + }, + { + "name": "input_mean", + "type": "T2", + "description": "running (training) or estimated (testing) mean tensor of shape (C)." + }, + { + "name": "input_var", + "type": "T2", + "description": "running (training) or estimated (testing) variance tensor of shape (C)." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as X" + }, + { + "name": "running_mean", + "type": "T2", + "option": "optional", + "description": "The running mean after the BatchNormalization operator." + }, + { + "name": "running_var", + "type": "T2", + "option": "optional", + "description": "The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1." + } + ], + "min_output": 1, + "max_output": 3, + "outputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain scale and bias types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain mean and variance types to float tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "batchnormalization", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_example\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y],\n name=\"test_batchnorm_epsilon\",\n)" + }, + { + "summary": "train", + "code": "# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\n# using np.bool(1) while generating test data with \"'bool' object has no attribute 'dtype'\"\n# working around by using np.byte(1).astype(bool)\ntraining_mode = 1\ny, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_example_training_mode\",\n)\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\ntraining_mode = 1\nmomentum = 0.9\nepsilon = 1e-2\ny, output_mean, output_var = _batchnorm_training_mode(\n x, s, bias, mean, var, momentum, epsilon\n)\n\nnode = onnx.helper.make_node(\n \"BatchNormalization\",\n inputs=[\"x\", \"s\", \"bias\", \"mean\", \"var\"],\n outputs=[\"y\", \"output_mean\", \"output_var\"],\n epsilon=epsilon,\n training_mode=training_mode,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(\n node,\n inputs=[x, s, bias, mean, var],\n outputs=[y, output_mean, output_var],\n name=\"test_batchnorm_epsilon_training_mode\",\n)" + } + ], + "category": "Normalization" + }, + { + "name": "Bernoulli", + "module": "ai.onnx", + "version": 15, + "description": "Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor\ncontaining probabilities p (a value in the range [0,1]) to be used for drawing the binary random number,\nwhere an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p).\n\nThis operator is non-deterministic and may not produce the same values in different\nimplementations (even if a seed is specified).\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "description": "The data type for the elements of the output tensor. if not specified, we will use the data type of the input tensor." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "All values in input have to be in the range:[0, 1]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "The returned output tensor only has values 0 or 1, same shape as input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output types to all numeric tensors and bool tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "bernoulli_with_dtype", + "code": "node = onnx.helper.make_node(\n \"Bernoulli\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n dtype=onnx.TensorProto.DOUBLE,\n)\n\nx = np.random.uniform(0.0, 1.0, 10).astype(np.float32)\ny = bernoulli_reference_implementation(x, float)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bernoulli_double\")" + }, + { + "summary": "bernoulli_with_seed", + "code": "seed = float(0)\nnode = onnx.helper.make_node(\n \"Bernoulli\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n seed=seed,\n)\n\nx = np.random.uniform(0.0, 1.0, 10).astype(np.float32)\ny = bernoulli_reference_implementation(x, np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bernoulli_seed\")" + }, + { + "summary": "bernoulli_without_dtype", + "code": "node = onnx.helper.make_node(\n \"Bernoulli\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.uniform(0.0, 1.0, 10).astype(float)\ny = bernoulli_reference_implementation(x, float)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bernoulli\")" + } + ] + }, + { + "name": "Binarizer", + "module": "ai.onnx.ml", + "version": 1, + "description": "Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.\n", + "attributes": [ + { + "name": "threshold", + "type": "float32", + "required": false, + "description": "Values greater than this are mapped to 1, others to 0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be binarized" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Binarized output data" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type. The output will be of the same tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "binarizer", + "code": "threshold = 1.0\nnode = onnx.helper.make_node(\n \"Binarizer\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n threshold=threshold,\n domain=\"ai.onnx.ml\",\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = compute_binarizer(x, threshold)[0]\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_ai_onnx_ml_binarizer\")" + } + ] + }, + { + "name": "BitShift", + "module": "ai.onnx", + "version": 11, + "description": "Bitwise shift operator performs element-wise operation. For each input element, if the\nattribute \"direction\" is \"RIGHT\", this operator moves its binary representation toward\nthe right side so that the input value is effectively decreased. If the attribute \"direction\"\nis \"LEFT\", bits of binary representation moves toward the left side, which results the\nincrease of its actual value. The input X is the tensor to be shifted and another input\nY specifies the amounts of shifting. For example, if \"direction\" is \"Right\", X is [1, 4],\nand S is [1, 1], the corresponding output Z would be [0, 2]. If \"direction\" is \"LEFT\" with\nX=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].\n\nBecause this operator supports Numpy-style broadcasting, X's and Y's shapes are\nnot necessarily identical.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "attributes": [ + { + "name": "direction", + "type": "string", + "required": true, + "description": "Direction of moving bits. It can be either \"RIGHT\" (for right shift) or \"LEFT\" (for left shift)." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "First operand, input to be shifted." + }, + { + "name": "Y", + "type": "T", + "description": "Second operand, amounts of shift." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to integer tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)" + ] + } + ], + "examples": [ + { + "summary": "left_unit16", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint16)\ny = np.array([1, 2, 3]).astype(np.uint16)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_left_uint16\")" + }, + { + "summary": "left_unit32", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint32)\ny = np.array([1, 2, 3]).astype(np.uint32)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_left_uint32\")" + }, + { + "summary": "left_unit64", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint64)\ny = np.array([1, 2, 3]).astype(np.uint64)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_left_uint64\")" + }, + { + "summary": "left_unit8", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint8)\ny = np.array([1, 2, 3]).astype(np.uint8)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_left_uint8\")" + }, + { + "summary": "right_unit16", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint16)\ny = np.array([1, 2, 3]).astype(np.uint16)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_right_uint16\")" + }, + { + "summary": "right_unit32", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint32)\ny = np.array([1, 2, 3]).astype(np.uint32)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_right_uint32\")" + }, + { + "summary": "right_unit64", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint64)\ny = np.array([1, 2, 3]).astype(np.uint64)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_right_uint64\")" + }, + { + "summary": "right_unit8", + "code": "node = onnx.helper.make_node(\n \"BitShift\", inputs=[\"x\", \"y\"], outputs=[\"z\"], direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint8)\ny = np.array([1, 2, 3]).astype(np.uint8)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitshift_right_uint8\")" + } + ] + }, + { + "name": "BitwiseAnd", + "module": "ai.onnx", + "version": 18, + "description": "Returns the tensor resulting from performing the bitwise `and` operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the bitwise operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the bitwise operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to integer tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "bitwiseand", + "code": "node = onnx.helper.make_node(\n \"BitwiseAnd\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwiseand\"],\n)\n\n# 2d\nx = create_random_int((3, 4), np.int32)\ny = create_random_int((3, 4), np.int32)\nz = np.bitwise_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_and_i32_2d\")\n\n# 3d\nx = create_random_int((3, 4, 5), np.int16)\ny = create_random_int((3, 4, 5), np.int16)\nz = np.bitwise_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_and_i16_3d\")" + }, + { + "summary": "bitwiseand_broadcast", + "code": "node = onnx.helper.make_node(\n \"BitwiseAnd\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwiseand\"],\n)\n\n# 3d vs 1d\nx = create_random_int((3, 4, 5), np.uint64)\ny = create_random_int((5,), np.uint64)\nz = np.bitwise_and(x, y)\nexpect(\n node, inputs=[x, y], outputs=[z], name=\"test_bitwise_and_ui64_bcast_3v1d\"\n)\n\n# 4d vs 3d\nx = create_random_int((3, 4, 5, 6), np.uint8)\ny = create_random_int((4, 5, 6), np.uint8)\nz = np.bitwise_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_and_ui8_bcast_4v3d\")" + } + ] + }, + { + "name": "BitwiseNot", + "module": "ai.onnx", + "version": 18, + "description": "Returns the bitwise not of the input tensor element-wise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input/output to integer tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "bitwisenot", + "code": "node = onnx.helper.make_node(\n \"BitwiseNot\",\n inputs=[\"x\"],\n outputs=[\"bitwise_not\"],\n)\n\n# 2d\nx = create_random_int((3, 4), np.int32)\ny = np.bitwise_not(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bitwise_not_2d\")\n\n# 3d\nx = create_random_int((3, 4, 5), np.uint16)\ny = np.bitwise_not(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bitwise_not_3d\")\n\n# 4d\nx = create_random_int((3, 4, 5, 6), np.uint8)\ny = np.bitwise_not(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_bitwise_not_4d\")" + } + ] + }, + { + "name": "BitwiseOr", + "module": "ai.onnx", + "version": 18, + "description": "Returns the tensor resulting from performing the bitwise `or` operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the bitwise operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the bitwise operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to integer tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "bitwiseor", + "code": "node = onnx.helper.make_node(\n \"BitwiseOr\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwiseor\"],\n)\n# 2d\nx = create_random_int((3, 4), np.int32)\ny = create_random_int((3, 4), np.int32)\nz = np.bitwise_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_or_i32_2d\")\n\n# 4d\nx = create_random_int((3, 4, 5, 6), np.int8)\ny = create_random_int((3, 4, 5, 6), np.int8)\nz = np.bitwise_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_or_i16_4d\")" + }, + { + "summary": "bitwiseor_broadcast", + "code": "node = onnx.helper.make_node(\n \"BitwiseOr\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwiseor\"],\n)\n\n# 3d vs 1d\nx = create_random_int((3, 4, 5), np.uint64)\ny = create_random_int((5,), np.uint64)\nz = np.bitwise_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_or_ui64_bcast_3v1d\")\n\n# 4d vs 3d\nx = create_random_int((3, 4, 5, 6), np.uint8)\ny = create_random_int((4, 5, 6), np.uint8)\nz = np.bitwise_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_or_ui8_bcast_4v3d\")" + } + ] + }, + { + "name": "BitwiseXor", + "module": "ai.onnx", + "version": 18, + "description": "Returns the tensor resulting from performing the bitwise `xor` operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the bitwise operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the bitwise operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to integer tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "bitwiseor_broadcast", + "code": "node = onnx.helper.make_node(\n \"BitwiseXor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwisexor\"],\n)\n\n# 3d vs 1d\nx = create_random_int((3, 4, 5), np.uint64)\ny = create_random_int((5,), np.uint64)\nz = np.bitwise_xor(x, y)\nexpect(\n node, inputs=[x, y], outputs=[z], name=\"test_bitwise_xor_ui64_bcast_3v1d\"\n)\n\n# 4d vs 3d\nx = create_random_int((3, 4, 5, 6), np.uint8)\ny = create_random_int((4, 5, 6), np.uint8)\nz = np.bitwise_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_xor_ui8_bcast_4v3d\")" + }, + { + "summary": "bitwisexor", + "code": "node = onnx.helper.make_node(\n \"BitwiseXor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"bitwisexor\"],\n)\n\n# 2d\nx = create_random_int((3, 4), np.int32)\ny = create_random_int((3, 4), np.int32)\nz = np.bitwise_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_xor_i32_2d\")\n\n# 3d\nx = create_random_int((3, 4, 5), np.int16)\ny = create_random_int((3, 4, 5), np.int16)\nz = np.bitwise_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_bitwise_xor_i16_3d\")" + } + ] + }, + { + "name": "BlackmanWindow", + "module": "ai.onnx", + "version": 17, + "description": "Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106.\n", + "attributes": [ + { + "name": "output_datatype", + "type": "int64", + "required": false, + "default": 1, + "description": "The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. " + }, + { + "name": "periodic", + "type": "int64", + "required": false, + "default": 1, + "description": "If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. " + } + ], + "inputs": [ + { + "name": "size", + "type": "T1", + "description": "A scalar value indicating the length of the window." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "A Blackman window with length: size. The output has the shape: [size]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain the input size to int64_t.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to numeric tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "blackmanwindow", + "code": "# Test periodic window\nnode = onnx.helper.make_node(\n \"BlackmanWindow\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nsize = np.int32(10)\na0 = 0.42\na1 = -0.5\na2 = 0.08\ny = a0\ny += a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)\ny += a2 * np.cos(4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)\nexpect(node, inputs=[size], outputs=[y], name=\"test_blackmanwindow\")\n\n# Test symmetric window\nnode = onnx.helper.make_node(\n \"BlackmanWindow\", inputs=[\"x\"], outputs=[\"y\"], periodic=0\n)\nsize = np.int32(10)\na0 = 0.42\na1 = -0.5\na2 = 0.08\ny = a0\ny += a1 * np.cos(\n 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)\n)\ny += a2 * np.cos(\n 4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)\n)\nexpect(node, inputs=[size], outputs=[y], name=\"test_blackmanwindow_symmetric\")" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 1, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\nNOTE: Casting to and from strings is not supported yet.\n", + "attributes": [ + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from strings and complex are not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + }, + { + "description": "Constrain output types. Casting to strings and complex are not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 6, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\nNOTE: Casting to and from strings is not supported yet.\n", + "attributes": [ + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from strings and complex are not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + }, + { + "description": "Constrain output types. Casting to strings and complex are not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 9, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nyield result 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n", + "attributes": [ + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 13, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nyield result 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n\nIn more detail, the conversion among numerical types should follow these rules:\n\n* Casting from floating point to:\n * floating point: +/- infinity if OOR (out of range).\n * fixed point: undefined if OOR.\n * bool: +/- 0.0 to False; all else to True.\n* Casting from fixed point to:\n * floating point: +/- infinity if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for\n signed types). For example, 200 (int16) -> -56 (int8).\n * bool: zero to False; nonzero to True.\n* Casting from bool to:\n * floating point: `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n", + "attributes": [ + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 19, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nyield result 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n\nIn more detail, the conversion among numerical types should follow these rules\nif the destination type is not a float 8 type.\n\n* Casting from floating point to:\n * floating point: +/- infinity if OOR (out of range).\n * fixed point: undefined if OOR.\n * bool: +/- 0.0 to False; all else to True.\n* Casting from fixed point to:\n * floating point: +/- infinity if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for\n signed types). For example, 200 (int16) -> -56 (int8).\n * bool: zero to False; nonzero to True.\n* Casting from bool to:\n * floating point: `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n\nFloat 8 type were introduced to speed up the training of\ndeep models. By default the conversion of a float *x* obeys\nto the following rules. `[x]` means the value rounded to\nthe target mantissa width.\n\n| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |\n|------|----|----|----|----|\n| 0 | 0 | 0 | 0 | 0 |\n|-0 | -0 | 0 | -0 | 0 |\n| NaN | NaN | NaN | NaN | NaN |\n| +/- Inf | +/- FLT_MAX | NaN | FLT_MAX | NaN |\n| [x] > FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX |\n| [x] < -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX |\n| else | RNE | RNE | RNE | RNE |\n\nThe behavior changes if the parameter 'saturate' is set to False.\nThe rules then become:\n\n| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |\n|------|----|----|----|----|\n| 0 | 0 | 0 | 0 | 0 |\n|-0 | -0 | 0 | -0 | 0 |\n| NaN | NaN | NaN | NaN | NaN |\n| +/- Inf | NaN | NaN | +/- Inf | NaN |\n| [x] > FLT_MAX | NaN | NaN | Inf | NaN |\n| [x] < -FLT_MAX | NaN | NaN | -Inf | NaN |\n| else | RNE | RNE | RNE | RNE |\n", + "attributes": [ + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. All cases are fully described in two tables inserted in the operator description." + }, + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "Cast", + "module": "ai.onnx", + "version": 21, + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nyield result 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n\nIn more detail, the conversion among numerical types should follow these rules\nif the destination type is not a float 8 type.\n\n* Casting from floating point to:\n * floating point: +/- infinity if OOR (out of range).\n * fixed point: undefined if OOR.\n * bool: +/- 0.0 to False; all else to True.\n* Casting from fixed point to:\n * floating point: +/- infinity if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for\n signed types). For example, 200 (int16) -> -56 (int8).\n * bool: zero to False; nonzero to True.\n* Casting from bool to:\n * floating point: `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n\nFloat 8 type were introduced to speed up the training of\ndeep models. By default the conversion of a float *x* obeys\nto the following rules. `[x]` means the value rounded to\nthe target mantissa width.\n\n| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |\n|------|----|----|----|----|\n| 0 | 0 | 0 | 0 | 0 |\n|-0 | -0 | 0 | -0 | 0 |\n| NaN | NaN | NaN | NaN | NaN |\n| +/- Inf | +/- FLT_MAX | NaN | FLT_MAX | NaN |\n| [x] > FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX |\n| [x] < -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX |\n| else | RNE | RNE | RNE | RNE |\n\nThe behavior changes if the parameter 'saturate' is set to False.\nThe rules then become:\n\n| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |\n|------|----|----|----|----|\n| 0 | 0 | 0 | 0 | 0 |\n|-0 | -0 | 0 | -0 | 0 |\n| NaN | NaN | NaN | NaN | NaN |\n| +/- Inf | NaN | NaN | +/- Inf | NaN |\n| [x] > FLT_MAX | NaN | NaN | Inf | NaN |\n| [x] < -FLT_MAX | NaN | NaN | -Inf | NaN |\n| else | RNE | RNE | RNE | RNE |\n", + "attributes": [ + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. All cases are fully described in two tables inserted in the operator description." + }, + { + "name": "to", + "type": "DataType", + "required": true, + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with the same shape as input with type specified by the 'to' argument" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "cast", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FN\", \"FLOAT16\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2\", \"FLOAT16\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT16\"),\n (\"FLOAT\", \"UINT4\"),\n (\"FLOAT16\", \"UINT4\"),\n (\"FLOAT\", \"INT4\"),\n (\"FLOAT16\", \"INT4\"),\n (\"UINT4\", \"FLOAT\"),\n (\"UINT4\", \"FLOAT16\"),\n (\"UINT4\", \"UINT8\"),\n (\"INT4\", \"FLOAT\"),\n (\"INT4\", \"FLOAT16\"),\n (\"INT4\", \"INT8\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\nvect_float32_to_uint4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False)\n)\nvect_float32_to_int4 = np.vectorize(\n lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True)\n)\n\nf8_types = (\"FLOAT8E4M3FN\", \"FLOAT8E4M3FNUZ\", \"FLOAT8E5M2\", \"FLOAT8E5M2FNUZ\")\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n elif from_type in f8_types or to_type in f8_types:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FN\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E4M3FNUZ\":\n input_values = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist()\n )\n elif from_type == \"FLOAT8E5M2FNUZ\":\n input_values = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n input = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values)\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(input_values, uz=True), uz=True\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values)\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(input_values, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16).astype(np.float32)\n elif to_type == \"FLOAT\":\n expected = input_values\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"x\", getattr(TensorProto, to_type), [3, 5], expected.tolist()\n )\n output = expected_tensor\n elif from_type in (\"UINT4\", \"INT4\") or to_type in (\"UINT4\", \"INT4\"):\n np_fp32 = np.arange(-9, 16).astype(np.float32)\n input_shape = (5, 5)\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\n \"x\", TensorProto.FLOAT, input_shape, input_values.tolist()\n )\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, input_shape, input_values.tolist()\n )\n elif from_type == \"UINT4\":\n input_values = vect_float32_to_uint4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.UINT4, input_shape, input_values.tolist()\n )\n elif from_type == \"INT4\":\n input_values = vect_float32_to_int4(np_fp32)\n input = make_tensor(\n \"x\", TensorProto.INT4, input_shape, input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n if to_type == \"UINT4\":\n expected = vect_float32_to_uint4(input_values).astype(custom.uint4)\n elif to_type == \"INT4\":\n expected = vect_float32_to_int4(input_values).astype(custom.int4)\n elif to_type == \"FLOAT16\":\n expected = input_values.astype(np.float16)\n elif to_type == \"FLOAT\":\n expected = input_values\n elif to_type == \"UINT8\":\n expected = input_values.astype(np.uint8)\n elif to_type == \"INT8\":\n expected = input_values.astype(np.int8)\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n expected_tensor = make_tensor(\n \"y\", getattr(TensorProto, to_type), input_shape, expected.tolist()\n )\n output = expected_tensor\n input_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, from_type), input_shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n getattr(TensorProto, to_type), input_shape\n )\n\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n )\n if input_type_proto and output_type_proto:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_\" + from_type + \"_to_\" + to_type,\n )" + }, + { + "summary": "saturate_false", + "code": "test_cases = [\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT16\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT16\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT16\", \"FLOAT8E5M2FNUZ\"),\n]\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.7229038\",\n \"1000000\",\n \"1e-7\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n \"-0.0000001\",\n \"0.0000001\",\n \"-1000000\",\n ],\n dtype=np.float32,\n )\n\n if from_type == \"FLOAT\":\n input_values = np_fp32\n input = make_tensor(\"x\", TensorProto.FLOAT, [3, 5], np_fp32.tolist())\n elif from_type == \"FLOAT16\":\n input_values = np_fp32.astype(np.float16).astype(np.float32)\n input = make_tensor(\n \"x\", TensorProto.FLOAT16, [3, 5], input_values.tolist()\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n if to_type == \"FLOAT8E4M3FN\":\n expected = vect_float32_to_float8e4m3(input_values, saturate=False)\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = vect_float32_to_float8e4m3(\n input_values, uz=True, saturate=False\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = vect_float32_to_float8e5m2(input_values, saturate=False)\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = vect_float32_to_float8e5m2(\n input_values, fn=True, uz=True, saturate=False\n )\n else:\n raise ValueError(\n \"Conversion from {from_type} to {to_type} is not tested.\"\n )\n\n ivals = bytes([int(i) for i in expected])\n tensor = TensorProto()\n tensor.data_type = getattr(TensorProto, to_type)\n tensor.name = \"x\"\n tensor.dims.extend([3, 5])\n field = tensor_dtype_to_field(tensor.data_type)\n getattr(tensor, field).extend(ivals)\n\n output = tensor\n\n node = onnx.helper.make_node(\n \"Cast\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n to=getattr(TensorProto, to_type),\n saturate=0,\n )\n expect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_cast_no_saturate_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "CastLike", + "module": "ai.onnx", + "version": 15, + "description": "The operator casts the elements of a given input tensor (the first input) to\nthe same data type as the elements of the second input tensor.\nSee documentation of the Cast operator for further details.\n", + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + }, + { + "name": "target_type", + "type": "T2", + "description": "The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor produced by casting the first input tensor to have the same type as the second input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "castlike", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n like = output.flatten()[0:1]\n elif from_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ) or to_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ):\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [1], expected[:1]\n )\n if from_type == \"FLOAT\":\n input = np_fp32.reshape((3, 4))\n output = expected_tensor\n like = like_tensor\n else:\n assert to_type == \"FLOAT\"\n input = expected_tensor\n output = expected.reshape((3, 4))\n like = output.flatten()[:1]\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n node = onnx.helper.make_node(\n \"CastLike\",\n inputs=[\"input\", \"like\"],\n outputs=[\"output\"],\n )\n if input_type_proto and output_type_proto:\n like_type_proto = onnx.helper.make_tensor_type_proto(\n output_type_proto.tensor_type.elem_type, like.shape\n )\n\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto, like_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "CastLike", + "module": "ai.onnx", + "version": 19, + "description": "The operator casts the elements of a given input tensor (the first input) to\nthe same data type as the elements of the second input tensor.\nSee documentation of the Cast operator for further details.\n", + "attributes": [ + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. Please refer to operator Cast description for further details." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + }, + { + "name": "target_type", + "type": "T2", + "description": "The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor produced by casting the first input tensor to have the same type as the second input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "castlike", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n like = output.flatten()[0:1]\n elif from_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ) or to_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ):\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [1], expected[:1]\n )\n if from_type == \"FLOAT\":\n input = np_fp32.reshape((3, 4))\n output = expected_tensor\n like = like_tensor\n else:\n assert to_type == \"FLOAT\"\n input = expected_tensor\n output = expected.reshape((3, 4))\n like = output.flatten()[:1]\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n node = onnx.helper.make_node(\n \"CastLike\",\n inputs=[\"input\", \"like\"],\n outputs=[\"output\"],\n )\n if input_type_proto and output_type_proto:\n like_type_proto = onnx.helper.make_tensor_type_proto(\n output_type_proto.tensor_type.elem_type, like.shape\n )\n\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto, like_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "CastLike", + "module": "ai.onnx", + "version": 21, + "description": "The operator casts the elements of a given input tensor (the first input) to\nthe same data type as the elements of the second input tensor.\nSee documentation of the Cast operator for further details.\n", + "attributes": [ + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. Please refer to operator Cast description for further details." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to be cast." + }, + { + "name": "target_type", + "type": "T2", + "description": "The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor produced by casting the first input tensor to have the same type as the second input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "castlike", + "code": "shape = (3, 4)\ntest_cases = [\n (\"FLOAT\", \"FLOAT16\"),\n (\"FLOAT\", \"DOUBLE\"),\n (\"FLOAT16\", \"FLOAT\"),\n (\"FLOAT16\", \"DOUBLE\"),\n (\"DOUBLE\", \"FLOAT\"),\n (\"DOUBLE\", \"FLOAT16\"),\n (\"FLOAT\", \"STRING\"),\n (\"STRING\", \"FLOAT\"),\n (\"FLOAT\", \"BFLOAT16\"),\n (\"BFLOAT16\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E4M3FN\"),\n (\"FLOAT\", \"FLOAT8E4M3FNUZ\"),\n (\"FLOAT8E4M3FN\", \"FLOAT\"),\n (\"FLOAT8E4M3FNUZ\", \"FLOAT\"),\n (\"FLOAT\", \"FLOAT8E5M2\"),\n (\"FLOAT\", \"FLOAT8E5M2FNUZ\"),\n (\"FLOAT8E5M2\", \"FLOAT\"),\n (\"FLOAT8E5M2FNUZ\", \"FLOAT\"),\n]\n\nvect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3)\nvect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2)\n\nfor from_type, to_type in test_cases:\n input_type_proto = None\n output_type_proto = None\n if from_type == \"BFLOAT16\" or to_type == \"BFLOAT16\":\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n little_endisan = sys.byteorder == \"little\"\n np_uint16_view = np_fp32.view(dtype=np.uint16)\n np_bfp16 = (\n np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2]\n )\n if to_type == \"BFLOAT16\":\n assert from_type == \"FLOAT\"\n input = np_fp32.reshape([3, 4])\n output = np_bfp16.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), output.shape\n )\n else:\n assert to_type == \"FLOAT\"\n input = np_bfp16.reshape([3, 4])\n # convert bfloat to FLOAT\n np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16)\n if little_endisan:\n np_fp32_zeros[1::2] = np_bfp16\n else:\n np_fp32_zeros[0::2] = np_bfp16\n np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32)\n output = np_fp32_from_bfloat.reshape([3, 4])\n input_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.BFLOAT16), input.shape\n )\n output_type_proto = onnx.helper.make_tensor_type_proto(\n int(TensorProto.FLOAT), output.shape\n )\n like = output.flatten()[0:1]\n elif from_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ) or to_type in (\n \"FLOAT8E4M3FN\",\n \"FLOAT8E4M3FNUZ\",\n \"FLOAT8E5M2\",\n \"FLOAT8E5M2FNUZ\",\n ):\n np_fp32 = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.float32,\n )\n if to_type == \"FLOAT8E4M3FN\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FN, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E4M3FNUZ\":\n expected = float8e4m3_to_float32(\n vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E4M3FNUZ, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32)\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2, [1], expected[:1]\n )\n elif to_type == \"FLOAT8E5M2FNUZ\":\n expected = float8e5m2_to_float32(\n vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True),\n fn=True,\n uz=True,\n )\n expected_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [3, 4], expected.tolist()\n )\n like_tensor = make_tensor(\n \"x\", TensorProto.FLOAT8E5M2FNUZ, [1], expected[:1]\n )\n if from_type == \"FLOAT\":\n input = np_fp32.reshape((3, 4))\n output = expected_tensor\n like = like_tensor\n else:\n assert to_type == \"FLOAT\"\n input = expected_tensor\n output = expected.reshape((3, 4))\n like = output.flatten()[:1]\n elif from_type != \"STRING\":\n input = np.random.random_sample(shape).astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type))\n )\n if to_type == \"STRING\":\n # Converting input to str, then give it object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode(\"utf-8\")\n su = s.decode(\"utf-8\")\n ss.append(su)\n\n output = np.array(ss).astype(object).reshape([3, 4])\n else:\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n else:\n input = np.array(\n [\n \"0.47892547\",\n \"0.48033667\",\n \"0.49968487\",\n \"0.81910545\",\n \"0.47031248\",\n \"0.816468\",\n \"0.21087195\",\n \"0.7229038\",\n \"NaN\",\n \"INF\",\n \"+INF\",\n \"-INF\",\n ],\n dtype=np.dtype(object),\n ).reshape([3, 4])\n output = input.astype(\n helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type))\n )\n like = output.flatten()[0:1]\n node = onnx.helper.make_node(\n \"CastLike\",\n inputs=[\"input\", \"like\"],\n outputs=[\"output\"],\n )\n if input_type_proto and output_type_proto:\n like_type_proto = onnx.helper.make_tensor_type_proto(\n output_type_proto.tensor_type.elem_type, like.shape\n )\n\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n input_type_protos=[input_type_proto, like_type_proto],\n output_type_protos=[output_type_proto],\n )\n else:\n expect(\n node,\n inputs=[input, like],\n outputs=[output],\n name=\"test_castlike_\" + from_type + \"_to_\" + to_type,\n )" + } + ] + }, + { + "name": "CastMap", + "module": "ai.onnx.ml", + "version": 1, + "description": "Converts a map to a tensor.
The map key must be an int64 and the values will be ordered\n in ascending order based on this key.
The operator supports dense packing or sparse packing.\n If using sparse packing, the key cannot exceed the max_map-1 value.\n", + "attributes": [ + { + "name": "cast_to", + "type": "string", + "required": false, + "default": "TO_FLOAT", + "description": "A string indicating the desired element type of the output tensor, one of 'TO_FLOAT', 'TO_STRING', 'TO_INT64'." + }, + { + "name": "map_form", + "type": "string", + "required": false, + "default": "DENSE", + "description": "Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).
One of 'DENSE', 'SPARSE'." + }, + { + "name": "max_map", + "type": "int64", + "required": false, + "default": 1, + "description": "If the value of map_form is 'SPARSE,' this attribute indicates the total length of the output tensor." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "The input map that is to be cast to a tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "A tensor representing the same data as the input map, ordered by their keys" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be an integer map to either string or float.", + "type_param_str": "T1", + "allowed_type_strs": [ + "map(int64, string)", + "map(int64, float)" + ] + }, + { + "description": "The output is a 1-D tensor of string, float, or integer.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(float)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "CategoryMapper", + "module": "ai.onnx.ml", + "version": 1, + "description": "Converts strings to integers and vice versa.
\n Two sequences of equal length are used to map between integers and strings,\n with strings and integers at the same index detailing the mapping.
\n Each operator converts either integers to strings or strings to integers, depending\n on which default value attribute is provided. Only one default value attribute\n should be defined.
\n If the string default value is set, it will convert integers to strings.\n If the int default value is set, it will convert strings to integers.\n", + "attributes": [ + { + "name": "cats_int64s", + "type": "int64[]", + "required": false, + "description": "The integers of the map. This sequence must be the same length as the 'cats_strings' sequence." + }, + { + "name": "cats_strings", + "type": "string[]", + "required": false, + "description": "The strings of the map. This sequence must be the same length as the 'cats_int64s' sequence" + }, + { + "name": "default_int64", + "type": "int64", + "required": false, + "default": -1, + "description": "An integer to use when an input string value is not found in the map.
One and only one of the 'default_*' attributes must be defined." + }, + { + "name": "default_string", + "type": "string", + "required": false, + "default": "_Unused", + "description": "A string to use when an input integer value is not found in the map.
One and only one of the 'default_*' attributes must be defined." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Output data. If strings are input, the output values are integers, and vice versa." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of strings or integers, either [N,C] or [C].", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + }, + { + "description": "The output is a tensor of strings or integers. Its shape will be the same as the input shape.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "Ceil", + "module": "ai.onnx", + "version": 1, + "description": "Ceil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "ceil", + "code": "node = onnx.helper.make_node(\n \"Ceil\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2]).astype(np.float32)\ny = np.ceil(x) # expected output [-1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.ceil(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil\")" + } + ] + }, + { + "name": "Ceil", + "module": "ai.onnx", + "version": 6, + "description": "Ceil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "ceil", + "code": "node = onnx.helper.make_node(\n \"Ceil\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2]).astype(np.float32)\ny = np.ceil(x) # expected output [-1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.ceil(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil\")" + } + ] + }, + { + "name": "Ceil", + "module": "ai.onnx", + "version": 13, + "description": "Ceil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "ceil", + "code": "node = onnx.helper.make_node(\n \"Ceil\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2]).astype(np.float32)\ny = np.ceil(x) # expected output [-1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.ceil(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_ceil\")" + } + ] + }, + { + "name": "Celu", + "module": "ai.onnx", + "version": 12, + "description": "Continuously Differentiable Exponential Linear Units:\nPerform the linear unit element-wise on the input tensor X\nusing formula:\n\n```\nmax(0,x) + min(0,alpha*(exp(x/alpha)-1))\n```\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float32 tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)" + ] + } + ], + "examples": [ + { + "summary": "celu", + "code": "alpha = 2.0\nnode = onnx.helper.make_node(\n \"Celu\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n alpha=alpha,\n)\n\ninput_data = np.array(\n [\n [\n [[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]],\n ],\n [\n [[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]],\n ],\n [\n [[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]],\n ],\n ],\n dtype=np.float32,\n)\n\n# Calculate expected output data\npositive_input = np.maximum(0, input_data)\nnegative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1))\nexpected_output = positive_input + negative_input\n\nexpect(node, inputs=[input_data], outputs=[expected_output], name=\"test_celu\")" + } + ] + }, + { + "name": "CenterCropPad", + "module": "ai.onnx", + "version": 18, + "description": "Center crop or pad an input to given dimensions.\n\nThe crop/pad dimensions can be specified for a subset of the `axes`. Non-specified dimensions will not be\ncropped or padded.\n\nIf the input dimensions are bigger than the crop shape, a centered cropping window is extracted from the input.\nIf the input dimensions are smaller than the crop shape, the input is padded on each side equally,\nso that the input is centered in the output.\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "If provided, it specifies a subset of axes that 'shape' refer to. If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). Negative value means counting dimensions from the back. Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined if an axis is repeated." + } + ], + "inputs": [ + { + "name": "input_data", + "type": "T", + "description": "Input to extract the centered crop from." + }, + { + "name": "shape", + "type": "Tind", + "description": "1-D tensor representing the cropping window dimensions." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output_data", + "type": "T", + "description": "Output data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "center_crop_pad_crop", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n)\n\n# First dim is even diff, second is uneven\nx = np.random.randn(20, 10, 3).astype(np.float32)\nshape = np.array([10, 7, 3], dtype=np.int64)\ny = x[5:15, 1:8, :]\n\nexpect(node, inputs=[x, shape], outputs=[y], name=\"test_center_crop_pad_crop\")" + }, + { + "summary": "center_crop_pad_crop_and_pad", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n)\n\n# Cropping on first dim, padding on second, third stays the same\nx = np.random.randn(20, 8, 3).astype(np.float32)\nshape = np.array([10, 10, 3], dtype=np.int64)\ny = np.zeros([10, 10, 3], dtype=np.float32)\ny[:, 1:9, :] = x[5:15, :, :]\n\nexpect(\n node,\n inputs=[x, shape],\n outputs=[y],\n name=\"test_center_crop_pad_crop_and_pad\",\n)" + }, + { + "summary": "center_crop_pad_crop_axes_chw", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n axes=[1, 2],\n)\n\n# Cropping on second dim, padding on third, first stays the same\nx = np.random.randn(3, 20, 8).astype(np.float32)\nshape = np.array([10, 9], dtype=np.int64)\ny = np.zeros([3, 10, 9], dtype=np.float32)\ny[:, :, :8] = x[:, 5:15, :]\n\nexpect(\n node,\n inputs=[x, shape],\n outputs=[y],\n name=\"test_center_crop_pad_crop_axes_chw\",\n)" + }, + { + "summary": "center_crop_pad_crop_axes_hwc", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n axes=[0, 1],\n)\n\n# Cropping on first dim, padding on second, third stays the same\nx = np.random.randn(20, 8, 3).astype(np.float32)\nshape = np.array([10, 9], dtype=np.int64)\ny = np.zeros([10, 9, 3], dtype=np.float32)\ny[:, :8, :] = x[5:15, :, :]\n\nexpect(\n node,\n inputs=[x, shape],\n outputs=[y],\n name=\"test_center_crop_pad_crop_axes_hwc\",\n)" + }, + { + "summary": "center_crop_pad_crop_negative_axes_hwc", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n axes=[-3, -2],\n)\n\n# Cropping on first dim, padding on second, third stays the same\nx = np.random.randn(20, 8, 3).astype(np.float32)\nshape = np.array([10, 9], dtype=np.int64)\ny = np.zeros([10, 9, 3], dtype=np.float32)\ny[:, :8, :] = x[5:15, :, :]\n\nexpect(\n node,\n inputs=[x, shape],\n outputs=[y],\n name=\"test_center_crop_pad_crop_negative_axes_hwc\",\n)" + }, + { + "summary": "center_crop_pad_pad", + "code": "node = onnx.helper.make_node(\n \"CenterCropPad\",\n inputs=[\"x\", \"shape\"],\n outputs=[\"y\"],\n)\n\n# First dim is even diff, second is uneven\nx = np.random.randn(10, 7, 3).astype(np.float32)\nshape = np.array([20, 10, 3], dtype=np.int64)\ny = np.zeros([20, 10, 3], dtype=np.float32)\ny[5:15, 1:8, :] = x\n\nexpect(node, inputs=[x, shape], outputs=[y], name=\"test_center_crop_pad_pad\")" + } + ] + }, + { + "name": "Clip", + "module": "ai.onnx", + "version": 1, + "description": "Clip operator limits the given input within an interval. The interval is\nspecified with arguments 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max() respectively.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + }, + { + "name": "max", + "type": "float32", + "required": false, + "description": "Maximum value, above which element is replaced by max" + }, + { + "name": "min", + "type": "float32", + "required": false, + "description": "Minimum value, under which element is replaced by min" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor whose elements to be clipped" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor with clipped input elements" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "clip", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_example\"\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip\")\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_inbounds\"\n)\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_outbounds\"\n)\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(\n node,\n inputs=[x, min_val, max_val],\n outputs=[y],\n name=\"test_clip_splitbounds\",\n)" + }, + { + "summary": "clip_default", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_min\")\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_max\")\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_inbounds\")" + }, + { + "summary": "clip_default_int8", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(\n node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_int8_min\"\n)\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(\n node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_int8_max\"\n)\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_int8_inbounds\")" + } + ], + "category": "Activation" + }, + { + "name": "Clip", + "module": "ai.onnx", + "version": 6, + "description": "Clip operator limits the given input within an interval. The interval is\nspecified with arguments 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max() respectively.\n", + "attributes": [ + { + "name": "max", + "type": "float32", + "required": false, + "default": 3.4028234663852886e+38, + "description": "Maximum value, above which element is replaced by max" + }, + { + "name": "min", + "type": "float32", + "required": false, + "default": -3.4028234663852886e+38, + "description": "Minimum value, under which element is replaced by min" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor whose elements to be clipped" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor with clipped input elements" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "clip", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_example\"\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip\")\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_inbounds\"\n)\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_outbounds\"\n)\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(\n node,\n inputs=[x, min_val, max_val],\n outputs=[y],\n name=\"test_clip_splitbounds\",\n)" + }, + { + "summary": "clip_default", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_min\")\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_max\")\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_inbounds\")" + }, + { + "summary": "clip_default_int8", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(\n node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_int8_min\"\n)\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(\n node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_int8_max\"\n)\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_int8_inbounds\")" + } + ], + "category": "Activation" + }, + { + "name": "Clip", + "module": "ai.onnx", + "version": 11, + "description": "Clip operator limits the given input within an interval. The interval is\nspecified by the inputs 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max(), respectively.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor whose elements to be clipped" + }, + { + "name": "min", + "type": "T", + "option": "optional", + "description": "Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape)." + }, + { + "name": "max", + "type": "T", + "option": "optional", + "description": "Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor with clipped input elements" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "clip", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_example\"\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip\")\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_inbounds\"\n)\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_outbounds\"\n)\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(\n node,\n inputs=[x, min_val, max_val],\n outputs=[y],\n name=\"test_clip_splitbounds\",\n)" + }, + { + "summary": "clip_default", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_min\")\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_max\")\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_inbounds\")" + }, + { + "summary": "clip_default_int8", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(\n node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_int8_min\"\n)\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(\n node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_int8_max\"\n)\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_int8_inbounds\")" + } + ], + "category": "Activation" + }, + { + "name": "Clip", + "module": "ai.onnx", + "version": 12, + "description": "Clip operator limits the given input within an interval. The interval is\nspecified by the inputs 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max(), respectively.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor whose elements to be clipped" + }, + { + "name": "min", + "type": "T", + "option": "optional", + "description": "Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape)." + }, + { + "name": "max", + "type": "T", + "option": "optional", + "description": "Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor with clipped input elements" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "clip", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_example\"\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip\")\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_inbounds\"\n)\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_outbounds\"\n)\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(\n node,\n inputs=[x, min_val, max_val],\n outputs=[y],\n name=\"test_clip_splitbounds\",\n)" + }, + { + "summary": "clip_default", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_min\")\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_max\")\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_inbounds\")" + }, + { + "summary": "clip_default_int8", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(\n node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_int8_min\"\n)\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(\n node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_int8_max\"\n)\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_int8_inbounds\")" + } + ], + "category": "Activation" + }, + { + "name": "Clip", + "module": "ai.onnx", + "version": 13, + "description": "Clip operator limits the given input within an interval. The interval is\nspecified by the inputs 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max(), respectively.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor whose elements to be clipped" + }, + { + "name": "min", + "type": "T", + "option": "optional", + "description": "Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape)." + }, + { + "name": "max", + "type": "T", + "option": "optional", + "description": "Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor with clipped input elements" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "clip", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_example\"\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip\")\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\", \"max\"],\n outputs=[\"y\"],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_inbounds\"\n)\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(\n node, inputs=[x, min_val, max_val], outputs=[y], name=\"test_clip_outbounds\"\n)\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(\n node,\n inputs=[x, min_val, max_val],\n outputs=[y],\n name=\"test_clip_splitbounds\",\n)" + }, + { + "summary": "clip_default", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_min\")\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_max\")\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_inbounds\")" + }, + { + "summary": "clip_default_int8", + "code": "node = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", \"min\"],\n outputs=[\"y\"],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(\n node, inputs=[x, min_val], outputs=[y], name=\"test_clip_default_int8_min\"\n)\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, \"max\"],\n outputs=[\"y\"],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(\n node, inputs=[x, max_val], outputs=[y], name=\"test_clip_default_int8_max\"\n)\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Clip\",\n inputs=[\"x\", no_min, no_max],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y], name=\"test_clip_default_int8_inbounds\")" + } + ], + "category": "Activation" + }, + { + "name": "Col2Im", + "module": "ai.onnx", + "version": 18, + "description": "The operator rearranges column blocks back into a multidimensional image\n\nCol2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html,\nbut it only supports *batched* multi-dimensional image tensors.\nAnother implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/.\n\nNOTE:\n Although specifying image_shape looks redundant because it could be calculated from\n convolution formulas, it is required as input for more advanced scenarios as explained\n at PyTorch's implementation (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10)\n", + "attributes": [ + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "1-dimensional tensor with dilation value along each spatial axis of the image. If not present, the dilation defaults to 1 along each spatial axis of the image." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "1-dimensional tensor with padding value for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "1-dimensional tensor with stride value along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks.The blocks are enumerated in increasing lexicographic-order of their indices.For example, with an image-size 10*20 and block-size 9*18, there would be 2*3 blocks, enumerated in the order block(0, 0), block(0, 1), block(0, 2), block(1, 0), block(1, 1), block(1, 2)." + }, + { + "name": "image_shape", + "type": "tensor(int64)", + "description": "The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, ..., dim_iN] for a N-D image." + }, + { + "name": "block_shape", + "type": "tensor(int64)", + "description": "The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is the block-shape before dilation is applied to it." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor produced by rearranging blocks into an image." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "col2im", + "code": "input = np.array(\n [\n [\n [1.0, 6.0, 11.0, 16.0, 21.0], # (1, 5, 5)\n [2.0, 7.0, 12.0, 17.0, 22.0],\n [3.0, 8.0, 13.0, 18.0, 23.0],\n [4.0, 9.0, 14.0, 19.0, 24.0],\n [5.0, 0.0, 15.0, 20.0, 25.0],\n ]\n ]\n).astype(np.float32)\n\nimage_shape = np.array([5, 5]).astype(np.int64)\nblock_shape = np.array([1, 5]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Col2Im\", [\"input\", \"image_shape\", \"block_shape\"], [\"output\"]\n)\n\noutput = np.array(\n [\n [\n [\n [1.0, 2.0, 3.0, 4.0, 5.0], # (1, 1, 5, 5)\n [6.0, 7.0, 8.0, 9.0, 0.0],\n [11.0, 12.0, 13.0, 14.0, 15.0],\n [16.0, 17.0, 18.0, 19.0, 20.0],\n [21.0, 22.0, 23.0, 24.0, 25.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[input, image_shape, block_shape],\n outputs=[output],\n name=\"test_col2im\",\n)" + }, + { + "summary": "col2im_5d", + "code": "input = np.array(\n [\n [\n [1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56], # (1, 10, 12)\n [2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57],\n [3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58],\n [4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59],\n [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60],\n [61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116],\n [62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117],\n [63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118],\n [64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119],\n [65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120],\n ]\n ]\n).astype(np.float32)\nimage_shape = np.array([3, 4, 5]).astype(np.int64)\nblock_shape = np.array([1, 1, 5]).astype(np.int64)\n\noutput = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4, 5], # (1, 2, 3, 4, 5)\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n ],\n [\n [21, 22, 23, 24, 25],\n [26, 27, 28, 29, 30],\n [31, 32, 33, 34, 35],\n [36, 37, 38, 39, 40],\n ],\n [\n [41, 42, 43, 44, 45],\n [46, 47, 48, 49, 50],\n [51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60],\n ],\n ],\n [\n [\n [61, 62, 63, 64, 65],\n [66, 67, 68, 69, 70],\n [71, 72, 73, 74, 75],\n [76, 77, 78, 79, 80],\n ],\n [\n [81, 82, 83, 84, 85],\n [86, 87, 88, 89, 90],\n [91, 92, 93, 94, 95],\n [96, 97, 98, 99, 100],\n ],\n [\n [101, 102, 103, 104, 105],\n [106, 107, 108, 109, 110],\n [111, 112, 113, 114, 115],\n [116, 117, 118, 119, 120],\n ],\n ],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Col2Im\", [\"input\", \"image_shape\", \"block_shape\"], [\"output\"]\n)\nexpect(\n node,\n inputs=[input, image_shape, block_shape],\n outputs=[output],\n name=\"test_col2im_5d\",\n)" + }, + { + "summary": "col2im_dilations", + "code": "input = np.array(\n [\n [\n [1.0, 5.0, 9.0, 13.0, 17], # (1, 4, 5)\n [2.0, 6.0, 10.0, 14.0, 18],\n [3.0, 7.0, 11.0, 15.0, 19],\n [4.0, 8.0, 12.0, 16.0, 20],\n ]\n ]\n).astype(np.float32)\nimage_shape = np.array([6, 6]).astype(np.int64)\nblock_shape = np.array([2, 2]).astype(np.int64)\n\noutput = np.array(\n [\n [\n [\n [1.0, 0.0, 0.0, 0.0, 0.0, 2.0], # (1, 1, 6, 6)\n [8.0, 0.0, 0.0, 0.0, 0.0, 10.0],\n [16.0, 0.0, 0.0, 0.0, 0.0, 18.0],\n [24.0, 0.0, 0.0, 0.0, 0.0, 26.0],\n [32.0, 0.0, 0.0, 0.0, 0.0, 34.0],\n [19.0, 0.0, 0.0, 0.0, 0.0, 20.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Col2Im\",\n [\"input\", \"image_shape\", \"block_shape\"],\n [\"output\"],\n dilations=[1, 5],\n)\nexpect(\n node,\n inputs=[input, image_shape, block_shape],\n outputs=[output],\n name=\"test_col2im_dilations\",\n)" + }, + { + "summary": "col2im_pads", + "code": "input = np.array(\n [\n [\n [\n 1.0,\n 6.0,\n 11.0,\n 16.0,\n 21.0,\n 26,\n 31,\n 36,\n 41,\n 46,\n 51,\n 56,\n 61,\n 66,\n 71,\n ], # (1, 5, 15)\n [\n 2.0,\n 7.0,\n 12.0,\n 17.0,\n 22.0,\n 27,\n 32,\n 37,\n 42,\n 47,\n 52,\n 57,\n 62,\n 67,\n 72,\n ],\n [\n 3.0,\n 8.0,\n 13.0,\n 18.0,\n 23.0,\n 28,\n 33,\n 38,\n 43,\n 48,\n 53,\n 58,\n 63,\n 68,\n 73,\n ],\n [\n 4.0,\n 9.0,\n 14.0,\n 19.0,\n 24.0,\n 29,\n 34,\n 39,\n 44,\n 49,\n 54,\n 59,\n 64,\n 69,\n 74,\n ],\n [\n 5.0,\n 10.0,\n 15.0,\n 20.0,\n 25.0,\n 30,\n 35,\n 40,\n 45,\n 50,\n 55,\n 60,\n 65,\n 70,\n 75,\n ],\n ]\n ]\n).astype(np.float32)\nimage_shape = np.array([5, 5]).astype(np.int64)\nblock_shape = np.array([1, 5]).astype(np.int64)\n\noutput = np.array(\n [\n [\n [\n [8.0, 21.0, 24.0, 27.0, 24.0], # (1, 1, 5, 5)\n [38.0, 66.0, 69.0, 72.0, 54.0],\n [68.0, 111.0, 114.0, 117.0, 84.0],\n [98.0, 156.0, 159.0, 162.0, 114.0],\n [128.0, 201.0, 204.0, 207.0, 144.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Col2Im\",\n [\"input\", \"image_shape\", \"block_shape\"],\n [\"output\"],\n pads=[0, 1, 0, 1],\n)\nexpect(\n node,\n inputs=[input, image_shape, block_shape],\n outputs=[output],\n name=\"test_col2im_pads\",\n)" + }, + { + "summary": "col2im_strides", + "code": "input = np.array(\n [\n [\n [0.0, 0.0, 0.0, 0.0], # (1, 9, 4)\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n ]\n ]\n).astype(np.float32)\nimage_shape = np.array([5, 5]).astype(np.int64)\nblock_shape = np.array([3, 3]).astype(np.int64)\n\noutput = np.array(\n [\n [\n [\n [0.0, 1.0, 1.0, 1.0, 1.0], # (1, 1, 5, 5)\n [1.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 2.0, 1.0, 2.0, 1.0],\n [1.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 1.0, 0.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Col2Im\",\n [\"input\", \"image_shape\", \"block_shape\"],\n [\"output\"],\n strides=[2, 2],\n)\nexpect(\n node,\n inputs=[input, image_shape, block_shape],\n outputs=[output],\n name=\"test_col2im_strides\",\n)" + } + ] + }, + { + "name": "Compress", + "module": "ai.onnx", + "version": 9, + "description": "Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index.\n In case axis is not provided, input is flattened before elements are selected.\n Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html\n ", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "(Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "condition", + "type": "T1", + "description": "Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length alone the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain to boolean tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "compress_0", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=0,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 1])\noutput = np.compress(condition, input, axis=0)\n# print(output)\n# [[ 3. 4.]\n# [ 5. 6.]]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_0\",\n)" + }, + { + "summary": "compress_1", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=1)\n# print(output)\n# [[ 2.]\n# [ 4.]\n# [ 6.]]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_1\",\n)" + }, + { + "summary": "compress_default_axis", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 0, 0, 1])\noutput = np.compress(condition, input)\n# print(output)\n# [ 2., 5.]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_default_axis\",\n)" + }, + { + "summary": "compress_negative_axis", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=-1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=-1)\n# print(output)\n# [[ 2.]\n# [ 4.]\n# [ 6.]]\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_negative_axis\",\n)" + } + ] + }, + { + "name": "Compress", + "module": "ai.onnx", + "version": 11, + "description": "Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index.\n In case axis is not provided, input is flattened before elements are selected.\n Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html\n ", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "(Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "condition", + "type": "T1", + "description": "Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain to boolean tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "compress_0", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=0,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 1])\noutput = np.compress(condition, input, axis=0)\n# print(output)\n# [[ 3. 4.]\n# [ 5. 6.]]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_0\",\n)" + }, + { + "summary": "compress_1", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=1)\n# print(output)\n# [[ 2.]\n# [ 4.]\n# [ 6.]]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_1\",\n)" + }, + { + "summary": "compress_default_axis", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 0, 0, 1])\noutput = np.compress(condition, input)\n# print(output)\n# [ 2., 5.]\n\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_default_axis\",\n)" + }, + { + "summary": "compress_negative_axis", + "code": "node = onnx.helper.make_node(\n \"Compress\",\n inputs=[\"input\", \"condition\"],\n outputs=[\"output\"],\n axis=-1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=-1)\n# print(output)\n# [[ 2.]\n# [ 4.]\n# [ 6.]]\nexpect(\n node,\n inputs=[input, condition.astype(bool)],\n outputs=[output],\n name=\"test_compress_negative_axis\",\n)" + } + ] + }, + { + "name": "Concat", + "module": "ai.onnx", + "version": 1, + "description": "Concatenate a list of tensors into a single tensor", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to concat on. Default value is 1." + } + ], + "inputs": [ + { + "name": "inputs", + "type": "T", + "list": true, + "description": "List of tensors for concatenation" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "concat_result", + "type": "T", + "description": "Concatenated tensor" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "concat", + "code": "test_cases: Dict[str, Sequence[Any]] = {\n \"1d\": ([1, 2], [3, 4]),\n \"2d\": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),\n \"3d\": (\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]],\n ),\n}\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_\" + str(i),\n )\n\n for i in range(-len(values[0].shape), 0):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_negative_\" + str(abs(i)),\n )" + } + ], + "category": "Tensor" + }, + { + "name": "Concat", + "module": "ai.onnx", + "version": 4, + "description": "Concatenate a list of tensors into a single tensor", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": true, + "description": "Which axis to concat on" + } + ], + "inputs": [ + { + "name": "inputs", + "type": "T", + "list": true, + "description": "List of tensors for concatenation" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "concat_result", + "type": "T", + "description": "Concatenated tensor" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "concat", + "code": "test_cases: Dict[str, Sequence[Any]] = {\n \"1d\": ([1, 2], [3, 4]),\n \"2d\": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),\n \"3d\": (\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]],\n ),\n}\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_\" + str(i),\n )\n\n for i in range(-len(values[0].shape), 0):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_negative_\" + str(abs(i)),\n )" + } + ], + "category": "Tensor" + }, + { + "name": "Concat", + "module": "ai.onnx", + "version": 11, + "description": "Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": true, + "description": "Which axis to concat on. A negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(inputs).." + } + ], + "inputs": [ + { + "name": "inputs", + "type": "T", + "list": true, + "description": "List of tensors for concatenation" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "concat_result", + "type": "T", + "description": "Concatenated tensor" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "concat", + "code": "test_cases: Dict[str, Sequence[Any]] = {\n \"1d\": ([1, 2], [3, 4]),\n \"2d\": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),\n \"3d\": (\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]],\n ),\n}\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_\" + str(i),\n )\n\n for i in range(-len(values[0].shape), 0):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_negative_\" + str(abs(i)),\n )" + } + ], + "category": "Tensor" + }, + { + "name": "Concat", + "module": "ai.onnx", + "version": 13, + "description": "Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": true, + "description": "Which axis to concat on. A negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(inputs).." + } + ], + "inputs": [ + { + "name": "inputs", + "type": "T", + "list": true, + "description": "List of tensors for concatenation" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "concat_result", + "type": "T", + "description": "Concatenated tensor" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "concat", + "code": "test_cases: Dict[str, Sequence[Any]] = {\n \"1d\": ([1, 2], [3, 4]),\n \"2d\": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),\n \"3d\": (\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]],\n ),\n}\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_\" + str(i),\n )\n\n for i in range(-len(values[0].shape), 0):\n in_args = [\"value\" + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n \"Concat\", inputs=list(in_args), outputs=[\"output\"], axis=i\n )\n output = np.concatenate(values, i)\n expect(\n node,\n inputs=list(values),\n outputs=[output],\n name=\"test_concat_\" + test_case + \"_axis_negative_\" + str(abs(i)),\n )" + } + ], + "category": "Tensor" + }, + { + "name": "ConcatFromSequence", + "module": "ai.onnx", + "version": 11, + "description": "Concatenate a sequence of tensors into a single tensor.\nAll input tensors must have the same shape, except for the dimension size of the axis to concatenate on.\nBy default 'new_axis' is 0, the behavior is similar to numpy.concatenate.\nWhen 'new_axis' is 1, the behavior is similar to numpy.stack.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": true, + "description": "Which axis to concat on. Accepted range in `[-r, r - 1]`, where `r` is the rank of input tensors. When `new_axis` is 1, accepted range is `[-r - 1, r]`. " + }, + { + "name": "new_axis", + "type": "int64", + "required": false, + "description": "Insert and concatenate on a new axis or not, default 0 means do not insert new axis." + } + ], + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Sequence of tensors for concatenation" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "concat_result", + "type": "T", + "description": "Concatenated tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ] + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 1, + "description": "A constant tensor.", + "attributes": [ + { + "name": "value", + "type": "tensor", + "required": true, + "description": "The value for the elements of the output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 9, + "description": "A constant tensor.", + "attributes": [ + { + "name": "value", + "type": "tensor", + "required": true, + "description": "The value for the elements of the output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 11, + "description": "A constant tensor. Exactly one of the two attributes, either value or sparse_value,\nmust be specified.\n", + "attributes": [ + { + "name": "sparse_value", + "required": false, + "description": "The value for the elements of the output tensor in sparse format." + }, + { + "name": "value", + "type": "tensor", + "required": false, + "description": "The value for the elements of the output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 12, + "description": "This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,\nor value_* must be specified.\n", + "attributes": [ + { + "name": "sparse_value", + "required": false, + "description": "The value for the elements of the output tensor in sparse format." + }, + { + "name": "value", + "type": "tensor", + "required": false, + "description": "The value for the elements of the output tensor." + }, + { + "name": "value_float", + "type": "float32", + "required": false, + "description": "The value for the sole element for the scalar, float32, output tensor." + }, + { + "name": "value_floats", + "type": "float32[]", + "required": false, + "description": "The values for the elements for the 1D, float32, output tensor." + }, + { + "name": "value_int", + "type": "int64", + "required": false, + "description": "The value for the sole element for the scalar, int64, output tensor." + }, + { + "name": "value_ints", + "type": "int64[]", + "required": false, + "description": "The values for the elements for the 1D, int64, output tensor." + }, + { + "name": "value_string", + "type": "string", + "required": false, + "description": "The value for the sole element for the scalar, UTF-8 string, output tensor." + }, + { + "name": "value_strings", + "type": "string[]", + "required": false, + "description": "The values for the elements for the 1D, UTF-8 string, output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 13, + "description": "This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,\nor value_* must be specified.\n", + "attributes": [ + { + "name": "sparse_value", + "required": false, + "description": "The value for the elements of the output tensor in sparse format." + }, + { + "name": "value", + "type": "tensor", + "required": false, + "description": "The value for the elements of the output tensor." + }, + { + "name": "value_float", + "type": "float32", + "required": false, + "description": "The value for the sole element for the scalar, float32, output tensor." + }, + { + "name": "value_floats", + "type": "float32[]", + "required": false, + "description": "The values for the elements for the 1D, float32, output tensor." + }, + { + "name": "value_int", + "type": "int64", + "required": false, + "description": "The value for the sole element for the scalar, int64, output tensor." + }, + { + "name": "value_ints", + "type": "int64[]", + "required": false, + "description": "The values for the elements for the 1D, int64, output tensor." + }, + { + "name": "value_string", + "type": "string", + "required": false, + "description": "The value for the sole element for the scalar, UTF-8 string, output tensor." + }, + { + "name": "value_strings", + "type": "string[]", + "required": false, + "description": "The values for the elements for the 1D, UTF-8 string, output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 19, + "description": "This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,\nor value_* must be specified.\n", + "attributes": [ + { + "name": "sparse_value", + "required": false, + "description": "The value for the elements of the output tensor in sparse format." + }, + { + "name": "value", + "type": "tensor", + "required": false, + "description": "The value for the elements of the output tensor." + }, + { + "name": "value_float", + "type": "float32", + "required": false, + "description": "The value for the sole element for the scalar, float32, output tensor." + }, + { + "name": "value_floats", + "type": "float32[]", + "required": false, + "description": "The values for the elements for the 1D, float32, output tensor." + }, + { + "name": "value_int", + "type": "int64", + "required": false, + "description": "The value for the sole element for the scalar, int64, output tensor." + }, + { + "name": "value_ints", + "type": "int64[]", + "required": false, + "description": "The values for the elements for the 1D, int64, output tensor." + }, + { + "name": "value_string", + "type": "string", + "required": false, + "description": "The value for the sole element for the scalar, UTF-8 string, output tensor." + }, + { + "name": "value_strings", + "type": "string[]", + "required": false, + "description": "The values for the elements for the 1D, UTF-8 string, output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "Constant", + "module": "ai.onnx", + "version": 21, + "description": "This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,\nor value_* must be specified.\n", + "attributes": [ + { + "name": "sparse_value", + "required": false, + "description": "The value for the elements of the output tensor in sparse format." + }, + { + "name": "value", + "type": "tensor", + "required": false, + "description": "The value for the elements of the output tensor." + }, + { + "name": "value_float", + "type": "float32", + "required": false, + "description": "The value for the sole element for the scalar, float32, output tensor." + }, + { + "name": "value_floats", + "type": "float32[]", + "required": false, + "description": "The values for the elements for the 1D, float32, output tensor." + }, + { + "name": "value_int", + "type": "int64", + "required": false, + "description": "The value for the sole element for the scalar, int64, output tensor." + }, + { + "name": "value_ints", + "type": "int64[]", + "required": false, + "description": "The values for the elements for the 1D, int64, output tensor." + }, + { + "name": "value_string", + "type": "string", + "required": false, + "description": "The value for the sole element for the scalar, UTF-8 string, output tensor." + }, + { + "name": "value_strings", + "type": "string[]", + "required": false, + "description": "The values for the elements for the 1D, UTF-8 string, output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor containing the same value of the provided tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "constant", + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"values\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values], name=\"test_constant\")" + } + ], + "category": "Constant" + }, + { + "name": "ConstantOfShape", + "module": "ai.onnx", + "version": 9, + "description": "Generate a tensor with given value and shape.\n", + "attributes": [ + { + "name": "value", + "type": "tensor", + "required": false, + "description": "(Optional) The value of the output elements.Should be a one-element tensor. If not specified, it defaults to a tensor of value 0 and datatype float32" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor of shape specified by 'input'.If attribute 'value' is specified, the value and datatype of the output tensor is taken from 'value'.If attribute 'value' is not specified, the value in the output defaults to 0, and the datatype defaults to float32." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to be numerics.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "float_ones", + "code": "x = np.array([4, 3, 2]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.FLOAT, [1], [1]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\n\ny = np.ones(x, dtype=np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_float_ones\")" + }, + { + "summary": "int32_shape_zero", + "code": "x = np.array(\n [\n 0,\n ]\n).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_shape_zero\"\n)" + }, + { + "summary": "int32_zeros", + "code": "x = np.array([10, 6]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_zeros\")" + } + ] + }, + { + "name": "ConstantOfShape", + "module": "ai.onnx", + "version": 20, + "description": "Generate a tensor with given value and shape.\n", + "attributes": [ + { + "name": "value", + "type": "tensor", + "required": false, + "description": "(Optional) The value of the output elements.Should be a one-element tensor. If not specified, it defaults to a tensor of value 0 and datatype float32" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor of shape specified by 'input'.If attribute 'value' is specified, the value and datatype of the output tensor is taken from 'value'.If attribute 'value' is not specified, the value in the output defaults to 0, and the datatype defaults to float32." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to be numerics.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "float_ones", + "code": "x = np.array([4, 3, 2]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.FLOAT, [1], [1]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\n\ny = np.ones(x, dtype=np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_float_ones\")" + }, + { + "summary": "int32_shape_zero", + "code": "x = np.array(\n [\n 0,\n ]\n).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_shape_zero\"\n)" + }, + { + "summary": "int32_zeros", + "code": "x = np.array([10, 6]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_zeros\")" + } + ] + }, + { + "name": "ConstantOfShape", + "module": "ai.onnx", + "version": 21, + "description": "Generate a tensor with given value and shape.\n", + "attributes": [ + { + "name": "value", + "type": "tensor", + "required": false, + "description": "(Optional) The value of the output elements.Should be a one-element tensor. If not specified, it defaults to a tensor of value 0 and datatype float32" + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor of shape specified by 'input'.If attribute 'value' is specified, the value and datatype of the output tensor is taken from 'value'.If attribute 'value' is not specified, the value in the output defaults to 0, and the datatype defaults to float32." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to be numerics.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "float_ones", + "code": "x = np.array([4, 3, 2]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.FLOAT, [1], [1]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\n\ny = np.ones(x, dtype=np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_float_ones\")" + }, + { + "summary": "int32_shape_zero", + "code": "x = np.array(\n [\n 0,\n ]\n).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_shape_zero\"\n)" + }, + { + "summary": "int32_zeros", + "code": "x = np.array([10, 6]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\n \"value\", onnx.TensorProto.INT32, [1], [0]\n)\nnode = onnx.helper.make_node(\n \"ConstantOfShape\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_constantofshape_int_zeros\")" + } + ] + }, + { + "name": "Conv", + "module": "ai.onnx", + "version": 1, + "description": "The convolution operator consumes an input tensor and a filter, and\ncomputes the output.", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input W." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. " + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Optional 1D bias to be added to the convolution, has size of M." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "conv", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with padding\nnode_with_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[1, 1, 1, 1],\n)\ny_with_padding = np.array(\n [\n [\n [\n [12.0, 21.0, 27.0, 33.0, 24.0], # (1, 1, 5, 5) output tensor\n [33.0, 54.0, 63.0, 72.0, 51.0],\n [63.0, 99.0, 108.0, 117.0, 81.0],\n [93.0, 144.0, 153.0, 162.0, 111.0],\n [72.0, 111.0, 117.0, 123.0, 84.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_padding,\n inputs=[x, W],\n outputs=[y_with_padding],\n name=\"test_basic_conv_with_padding\",\n)\n\n# Convolution without padding\nnode_without_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[0, 0, 0, 0],\n)\ny_without_padding = np.array(\n [\n [\n [\n [54.0, 63.0, 72.0], # (1, 1, 3, 3) output tensor\n [99.0, 108.0, 117.0],\n [144.0, 153.0, 162.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_without_padding,\n inputs=[x, W],\n outputs=[y_without_padding],\n name=\"test_basic_conv_without_padding\",\n)" + }, + { + "summary": "conv_with_autopad_same", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with auto_pad='SAME_LOWER' and strides=2\nnode = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n auto_pad=\"SAME_LOWER\",\n kernel_shape=[3, 3],\n strides=[2, 2],\n)\ny = np.array(\n [[[[12.0, 27.0, 24.0], [63.0, 108.0, 81.0], [72.0, 117.0, 84.0]]]]\n).astype(np.float32)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_conv_with_autopad_same\")" + }, + { + "summary": "conv_with_strides", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 7, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n [25.0, 26.0, 27.0, 28.0, 29.0],\n [30.0, 31.0, 32.0, 33.0, 34.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with strides=2 and padding\nnode_with_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_padding = np.array(\n [\n [\n [\n [12.0, 27.0, 24.0], # (1, 1, 4, 3) output tensor\n [63.0, 108.0, 81.0],\n [123.0, 198.0, 141.0],\n [112.0, 177.0, 124.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_padding,\n inputs=[x, W],\n outputs=[y_with_padding],\n name=\"test_conv_with_strides_padding\",\n)\n\n# Convolution with strides=2 and no padding\nnode_without_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[0, 0, 0, 0],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_without_padding = np.array(\n [\n [\n [\n [54.0, 72.0], # (1, 1, 3, 2) output tensor\n [144.0, 162.0],\n [234.0, 252.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_without_padding,\n inputs=[x, W],\n outputs=[y_without_padding],\n name=\"test_conv_with_strides_no_padding\",\n)\n\n# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)\nnode_with_asymmetric_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[1, 0, 1, 0],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_asymmetric_padding = np.array(\n [\n [\n [\n [21.0, 33.0], # (1, 1, 4, 2) output tensor\n [99.0, 117.0],\n [189.0, 207.0],\n [171.0, 183.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_asymmetric_padding,\n inputs=[x, W],\n outputs=[y_with_asymmetric_padding],\n name=\"test_conv_with_strides_and_asymmetric_padding\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "Conv", + "module": "ai.onnx", + "version": 11, + "description": "The convolution operator consumes an input tensor and a filter, and\ncomputes the output.", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults is 1 along each spatial axis." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input W." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults is 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Optional 1D bias to be added to the convolution, has size of M." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "conv", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with padding\nnode_with_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[1, 1, 1, 1],\n)\ny_with_padding = np.array(\n [\n [\n [\n [12.0, 21.0, 27.0, 33.0, 24.0], # (1, 1, 5, 5) output tensor\n [33.0, 54.0, 63.0, 72.0, 51.0],\n [63.0, 99.0, 108.0, 117.0, 81.0],\n [93.0, 144.0, 153.0, 162.0, 111.0],\n [72.0, 111.0, 117.0, 123.0, 84.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_padding,\n inputs=[x, W],\n outputs=[y_with_padding],\n name=\"test_basic_conv_with_padding\",\n)\n\n# Convolution without padding\nnode_without_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[0, 0, 0, 0],\n)\ny_without_padding = np.array(\n [\n [\n [\n [54.0, 63.0, 72.0], # (1, 1, 3, 3) output tensor\n [99.0, 108.0, 117.0],\n [144.0, 153.0, 162.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_without_padding,\n inputs=[x, W],\n outputs=[y_without_padding],\n name=\"test_basic_conv_without_padding\",\n)" + }, + { + "summary": "conv_with_autopad_same", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with auto_pad='SAME_LOWER' and strides=2\nnode = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n auto_pad=\"SAME_LOWER\",\n kernel_shape=[3, 3],\n strides=[2, 2],\n)\ny = np.array(\n [[[[12.0, 27.0, 24.0], [63.0, 108.0, 81.0], [72.0, 117.0, 84.0]]]]\n).astype(np.float32)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_conv_with_autopad_same\")" + }, + { + "summary": "conv_with_strides", + "code": "x = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 7, 5) input tensor\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n [20.0, 21.0, 22.0, 23.0, 24.0],\n [25.0, 26.0, 27.0, 28.0, 29.0],\n [30.0, 31.0, 32.0, 33.0, 34.0],\n ]\n ]\n ]\n).astype(np.float32)\nW = np.array(\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n ]\n ]\n).astype(np.float32)\n\n# Convolution with strides=2 and padding\nnode_with_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_padding = np.array(\n [\n [\n [\n [12.0, 27.0, 24.0], # (1, 1, 4, 3) output tensor\n [63.0, 108.0, 81.0],\n [123.0, 198.0, 141.0],\n [112.0, 177.0, 124.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_padding,\n inputs=[x, W],\n outputs=[y_with_padding],\n name=\"test_conv_with_strides_padding\",\n)\n\n# Convolution with strides=2 and no padding\nnode_without_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[0, 0, 0, 0],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_without_padding = np.array(\n [\n [\n [\n [54.0, 72.0], # (1, 1, 3, 2) output tensor\n [144.0, 162.0],\n [234.0, 252.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_without_padding,\n inputs=[x, W],\n outputs=[y_without_padding],\n name=\"test_conv_with_strides_no_padding\",\n)\n\n# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)\nnode_with_asymmetric_padding = onnx.helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[1, 0, 1, 0],\n strides=[\n 2,\n 2,\n ], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_asymmetric_padding = np.array(\n [\n [\n [\n [21.0, 33.0], # (1, 1, 4, 2) output tensor\n [99.0, 117.0],\n [189.0, 207.0],\n [171.0, 183.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_asymmetric_padding,\n inputs=[x, W],\n outputs=[y_with_asymmetric_padding],\n name=\"test_conv_with_strides_and_asymmetric_padding\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "ConvInteger", + "module": "ai.onnx", + "version": 10, + "description": "The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point,\nand computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each axis." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into. default is 1." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input 'w'." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0.The value represent the number of pixels added to the beginning and end part of the corresponding axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number ofpixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaultsto 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each axis." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "w", + "type": "T2", + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. " + }, + { + "name": "x_zero_point", + "type": "T1", + "option": "optional", + "description": "Zero point tensor for input 'x'. It's optional and default value is 0. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "w_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point tensor for input 'w'. It's optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M)" + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "y", + "type": "T3", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain input x and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain input w and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain output y data type to 32-bit integer tensor.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "with_padding", + "code": "x = (\n np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])\n .astype(np.uint8)\n .reshape((1, 1, 3, 3))\n)\nx_zero_point = np.uint8(1)\nw = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2))\n\ny = (\n np.array([1, 3, 5, 3, 5, 12, 16, 9, 11, 24, 28, 15, 7, 15, 17, 9])\n .astype(np.int32)\n .reshape((1, 1, 4, 4))\n)\n\n# ConvInteger with padding\nconvinteger_node_with_padding = onnx.helper.make_node(\n \"ConvInteger\",\n inputs=[\"x\", \"w\", \"x_zero_point\"],\n outputs=[\"y\"],\n pads=[1, 1, 1, 1],\n)\n\nexpect(\n convinteger_node_with_padding,\n inputs=[x, w, x_zero_point],\n outputs=[y],\n name=\"test_convinteger_with_padding\",\n)" + }, + { + "summary": "without_padding", + "code": "x = (\n np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])\n .astype(np.uint8)\n .reshape((1, 1, 3, 3))\n)\nx_zero_point = np.uint8(1)\nw = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2))\n\ny = np.array([12, 16, 24, 28]).astype(np.int32).reshape(1, 1, 2, 2)\n\n# ConvInteger without padding\nconvinteger_node = onnx.helper.make_node(\n \"ConvInteger\", inputs=[\"x\", \"w\", \"x_zero_point\"], outputs=[\"y\"]\n)\n\nexpect(\n convinteger_node,\n inputs=[x, w, x_zero_point],\n outputs=[y],\n name=\"test_convinteger_without_padding\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "ConvTranspose", + "module": "ai.onnx", + "version": 1, + "description": "The convolution transpose operator consumes an input tensor and a filter,\nand computes the output.\n\nIf the pads parameter is provided the shape of the output is calculated via the following equation:\n\n output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]\n\noutput_shape can also be explicitly specified in which case pads values are auto generated using these equations:\n\n total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i]\n If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2)\n Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).\n\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input W." + }, + { + "name": "output_padding", + "type": "int64[]", + "required": false, + "description": "The zero-padding added to one side of the output. This is also called adjs/adjustment in some frameworks." + }, + { + "name": "output_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads" + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn)" + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)" + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Optional 1D bias to be added to the convolution, has size of M." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "convtranspose", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [\n [\n [\n [0.0, 1.0, 3.0, 3.0, 2.0], # (1, 2, 5, 5)\n [3.0, 8.0, 15.0, 12.0, 7.0],\n [9.0, 21.0, 36.0, 27.0, 15.0],\n [9.0, 20.0, 33.0, 24.0, 13.0],\n [6.0, 13.0, 21.0, 15.0, 8.0],\n ],\n [\n [0.0, 1.0, 3.0, 3.0, 2.0],\n [3.0, 8.0, 15.0, 12.0, 7.0],\n [9.0, 21.0, 36.0, 27.0, 15.0],\n [9.0, 20.0, 33.0, 24.0, 13.0],\n [6.0, 13.0, 21.0, 15.0, 8.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose\")" + }, + { + "summary": "convtranspose_1d", + "code": "x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3)\n\nW = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype( # (1, 2, 3)\n np.float32\n)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [[[0.0, 1.0, 3.0, 3.0, 2.0], [0.0, 1.0, 3.0, 3.0, 2.0]]] # (1, 2, 5)\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_1d\")" + }, + { + "summary": "convtranspose_3d", + "code": "x = np.array(\n [\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5)\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n ],\n [\n [20.0, 21.0, 22.0, 23.0, 24.0],\n [25.0, 26.0, 27.0, 28.0, 29.0],\n [30.0, 31.0, 32.0, 33.0, 34.0],\n [35.0, 36.0, 37.0, 38.0, 39.0],\n ],\n [\n [40.0, 41.0, 42.0, 43.0, 44.0],\n [45.0, 46.0, 47.0, 48.0, 49.0],\n [50.0, 51.0, 52.0, 53.0, 54.0],\n [55.0, 56.0, 57.0, 58.0, 59.0],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 2, 3, 3, 3)\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ],\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [\n [\n [\n [\n [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], # (1, 2, 5, 6, 7)\n [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],\n [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],\n [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],\n [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],\n [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],\n ],\n [\n [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],\n [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],\n [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],\n [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],\n [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],\n [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],\n ],\n [\n [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],\n [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],\n [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],\n [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],\n [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],\n [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],\n ],\n [\n [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],\n [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],\n [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],\n [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],\n [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],\n [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],\n ],\n [\n [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],\n [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],\n [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],\n [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],\n [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],\n [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],\n ],\n ],\n [\n [\n [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0],\n [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],\n [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],\n [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],\n [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],\n [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],\n ],\n [\n [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],\n [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],\n [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],\n [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],\n [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],\n [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],\n ],\n [\n [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],\n [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],\n [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],\n [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],\n [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],\n [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],\n ],\n [\n [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],\n [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],\n [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],\n [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],\n [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],\n [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],\n ],\n [\n [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],\n [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],\n [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],\n [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],\n [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],\n [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],\n ],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_3d\")" + }, + { + "summary": "convtranspose_attributes", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], # (1, 2, 10, 8)\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], output_shape=[10, 8]\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_output_shape\")\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], output_padding=[1, 1]\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_pad\")\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"X\", \"W\"],\n [\"Y\"],\n name=\"test\",\n strides=[3, 2],\n output_shape=[10, 8],\n kernel_shape=[3, 3],\n output_padding=[1, 1],\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_kernel_shape\")" + }, + { + "summary": "convtranspose_autopad_same", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], auto_pad=\"SAME_UPPER\", strides=[2, 2]\n)\n\ny = np.array(\n [\n [\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [3.0, 3.0, 8.0, 5.0, 12.0, 7.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0],\n [9.0, 9.0, 20.0, 11.0, 24.0, 13.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [3.0, 3.0, 8.0, 5.0, 12.0, 7.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0],\n [9.0, 9.0, 20.0, 11.0, 24.0, 13.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_autopad_same\")" + }, + { + "summary": "convtranspose_dilations", + "code": "x = np.array(\n [[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\nW = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32) # (1, 1, 2, 2)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], dilations=[2, 2]\n)\n\ny = np.array(\n [\n [\n [\n [21.0, 56.0, 13.0, 16.0, 2.0], # [1, 1, 5, 5]\n [63.0, 35.0, 67.0, 10.0, 14.0],\n [24.0, 22.0, 76.0, 76.0, 21.0],\n [9.0, 5.0, 88.0, 45.0, 63.0],\n [3.0, 2.0, 33.0, 18.0, 54.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_dilations\")" + }, + { + "summary": "convtranspose_pads", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], pads=[1, 2, 1, 2]\n)\n\ny = np.array(\n [\n [\n [\n [1.0, 1.0, 3.0], # (1, 2, 7, 3)\n [1.0, 1.0, 3.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [13.0, 7.0, 15.0],\n [13.0, 7.0, 15.0],\n ],\n [\n [1.0, 1.0, 3.0],\n [1.0, 1.0, 3.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [13.0, 7.0, 15.0],\n [13.0, 7.0, 15.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_pads\")" + } + ], + "category": "Layer" + }, + { + "name": "ConvTranspose", + "module": "ai.onnx", + "version": 11, + "description": "The convolution transpose operator consumes an input tensor and a filter,\nand computes the output.\n\nIf the pads parameter is provided the shape of the output is calculated via the following equation:\n\n output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]\n\noutput_shape can also be explicitly specified in which case pads values are auto generated using these equations:\n\n total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i]\n If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2)\n Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).\n\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = input_shape[i] * strides[i]` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input W." + }, + { + "name": "output_padding", + "type": "int64[]", + "required": false, + "description": "Additional elements added to the side with higher coordinate indices in the output. Each padding value in \"output_padding\" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. Note that this attribute doesn't directly affect the computed output values. It only controls the selection of the computed values, so changing this attribute only adds or removes output elements. If \"output_shape\" is explicitly provided, \"output_padding\" does not contribute additional size to \"output_shape\" but participates in the computation of the needed padding amount. This is also called adjs or adjustment in some frameworks." + }, + { + "name": "output_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads. Note that the output_shape attribute value should not include dimensions for batch size and channels, which are automatically inferred." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn)" + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)" + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Optional 1D bias to be added to the convolution, has size of M." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "convtranspose", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [\n [\n [\n [0.0, 1.0, 3.0, 3.0, 2.0], # (1, 2, 5, 5)\n [3.0, 8.0, 15.0, 12.0, 7.0],\n [9.0, 21.0, 36.0, 27.0, 15.0],\n [9.0, 20.0, 33.0, 24.0, 13.0],\n [6.0, 13.0, 21.0, 15.0, 8.0],\n ],\n [\n [0.0, 1.0, 3.0, 3.0, 2.0],\n [3.0, 8.0, 15.0, 12.0, 7.0],\n [9.0, 21.0, 36.0, 27.0, 15.0],\n [9.0, 20.0, 33.0, 24.0, 13.0],\n [6.0, 13.0, 21.0, 15.0, 8.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose\")" + }, + { + "summary": "convtranspose_1d", + "code": "x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3)\n\nW = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype( # (1, 2, 3)\n np.float32\n)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [[[0.0, 1.0, 3.0, 3.0, 2.0], [0.0, 1.0, 3.0, 3.0, 2.0]]] # (1, 2, 5)\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_1d\")" + }, + { + "summary": "convtranspose_3d", + "code": "x = np.array(\n [\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5)\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 13.0, 14.0],\n [15.0, 16.0, 17.0, 18.0, 19.0],\n ],\n [\n [20.0, 21.0, 22.0, 23.0, 24.0],\n [25.0, 26.0, 27.0, 28.0, 29.0],\n [30.0, 31.0, 32.0, 33.0, 34.0],\n [35.0, 36.0, 37.0, 38.0, 39.0],\n ],\n [\n [40.0, 41.0, 42.0, 43.0, 44.0],\n [45.0, 46.0, 47.0, 48.0, 49.0],\n [50.0, 51.0, 52.0, 53.0, 54.0],\n [55.0, 56.0, 57.0, 58.0, 59.0],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [\n [\n [1.0, 1.0, 1.0], # (1, 2, 3, 3, 3)\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ],\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array(\n [\n [\n [\n [\n [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], # (1, 2, 5, 6, 7)\n [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],\n [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],\n [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],\n [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],\n [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],\n ],\n [\n [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],\n [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],\n [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],\n [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],\n [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],\n [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],\n ],\n [\n [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],\n [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],\n [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],\n [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],\n [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],\n [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],\n ],\n [\n [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],\n [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],\n [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],\n [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],\n [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],\n [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],\n ],\n [\n [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],\n [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],\n [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],\n [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],\n [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],\n [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],\n ],\n ],\n [\n [\n [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0],\n [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],\n [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],\n [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],\n [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],\n [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],\n ],\n [\n [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],\n [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],\n [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],\n [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],\n [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],\n [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],\n ],\n [\n [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],\n [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],\n [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],\n [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],\n [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],\n [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],\n ],\n [\n [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],\n [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],\n [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],\n [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],\n [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],\n [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],\n ],\n [\n [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],\n [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],\n [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],\n [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],\n [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],\n [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],\n ],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_3d\")" + }, + { + "summary": "convtranspose_attributes", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], # (1, 2, 10, 8)\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], output_shape=[10, 8]\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_output_shape\")\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], output_padding=[1, 1]\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_pad\")\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"X\", \"W\"],\n [\"Y\"],\n name=\"test\",\n strides=[3, 2],\n output_shape=[10, 8],\n kernel_shape=[3, 3],\n output_padding=[1, 1],\n)\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_kernel_shape\")" + }, + { + "summary": "convtranspose_autopad_same", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], auto_pad=\"SAME_UPPER\", strides=[2, 2]\n)\n\ny = np.array(\n [\n [\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [3.0, 3.0, 8.0, 5.0, 12.0, 7.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0],\n [9.0, 9.0, 20.0, 11.0, 24.0, 13.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [0.0, 0.0, 1.0, 1.0, 3.0, 2.0],\n [3.0, 3.0, 8.0, 5.0, 12.0, 7.0],\n [3.0, 3.0, 7.0, 4.0, 9.0, 5.0],\n [9.0, 9.0, 20.0, 11.0, 24.0, 13.0],\n [6.0, 6.0, 13.0, 7.0, 15.0, 8.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_autopad_same\")" + }, + { + "summary": "convtranspose_dilations", + "code": "x = np.array(\n [[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\nW = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32) # (1, 1, 2, 2)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], dilations=[2, 2]\n)\n\ny = np.array(\n [\n [\n [\n [21.0, 56.0, 13.0, 16.0, 2.0], # [1, 1, 5, 5]\n [63.0, 35.0, 67.0, 10.0, 14.0],\n [24.0, 22.0, 76.0, 76.0, 21.0],\n [9.0, 5.0, 88.0, 45.0, 63.0],\n [3.0, 2.0, 33.0, 18.0, 54.0],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_dilations\")" + }, + { + "summary": "convtranspose_pads", + "code": "x = np.array(\n [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)\n).astype(np.float32)\n\nW = np.array(\n [\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n ]\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], strides=[3, 2], pads=[1, 2, 1, 2]\n)\n\ny = np.array(\n [\n [\n [\n [1.0, 1.0, 3.0], # (1, 2, 7, 3)\n [1.0, 1.0, 3.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [13.0, 7.0, 15.0],\n [13.0, 7.0, 15.0],\n ],\n [\n [1.0, 1.0, 3.0],\n [1.0, 1.0, 3.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [7.0, 4.0, 9.0],\n [13.0, 7.0, 15.0],\n [13.0, 7.0, 15.0],\n ],\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name=\"test_convtranspose_pads\")" + } + ], + "category": "Layer" + }, + { + "name": "Cos", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the cosine of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The cosine of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "cos", + "code": "node = onnx.helper.make_node(\n \"Cos\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.cos(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_cos_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.cos(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_cos\")" + } + ] + }, + { + "name": "Cosh", + "module": "ai.onnx", + "version": 9, + "description": "Calculates the hyperbolic cosine of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic cosine values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "cosh", + "code": "node = onnx.helper.make_node(\n \"Cosh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.cosh(x) # expected output [1.54308069, 1., 1.54308069]\nexpect(node, inputs=[x], outputs=[y], name=\"test_cosh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.cosh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_cosh\")" + } + ] + }, + { + "name": "CumSum", + "module": "ai.onnx", + "version": 11, + "description": "Performs cumulative sum of the input elements along the given axis.\nBy default, it will do the sum inclusively meaning the first element is copied as is.\nThrough an `exclusive` attribute, this behavior can change to exclude the first element.\nIt can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1.\n\nExample:\n```\ninput_x = [1, 2, 3]\naxis=0\noutput = [1, 3, 6]\nexclusive=1\noutput = [0, 1, 3]\nexclusive=0\nreverse=1\noutput = [6, 5, 3]\nexclusive=1\nreverse=1\noutput = [5, 3, 0]\n```\n ", + "attributes": [ + { + "name": "exclusive", + "type": "int64", + "required": false, + "description": "If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements." + }, + { + "name": "reverse", + "type": "int64", + "required": false, + "description": "If set to 1 will perform the sums in reverse direction." + } + ], + "inputs": [ + { + "name": "x", + "type": "T", + "description": "An input tensor that is to be processed." + }, + { + "name": "axis", + "type": "T2", + "description": "A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "y", + "type": "T", + "description": "Output tensor of the same type as 'x' with cumulative sums of the x's elements" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "axis tensor can be int32 or int64 only", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "cumsum_1d", + "code": "node = onnx.helper.make_node(\"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"])\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([1.0, 3.0, 6.0, 10.0, 15.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d\")" + }, + { + "summary": "cumsum_1d_exclusive", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], exclusive=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([0.0, 1.0, 3.0, 6.0, 10.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_exclusive\")" + }, + { + "summary": "cumsum_1d_reverse", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], reverse=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([15.0, 14.0, 12.0, 9.0, 5.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_reverse\")" + }, + { + "summary": "cumsum_1d_reverse_exclusive", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], reverse=1, exclusive=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([14.0, 12.0, 9.0, 5.0, 0.0]).astype(np.float64)\nexpect(\n node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_reverse_exclusive\"\n)" + }, + { + "summary": "cumsum_2d_axis_0", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(0)\ny = np.array([1.0, 2.0, 3.0, 5.0, 7.0, 9.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_axis_0\")" + }, + { + "summary": "cumsum_2d_axis_1", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(1)\ny = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_axis_1\")" + }, + { + "summary": "cumsum_2d_negative_axis", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(-1)\ny = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_negative_axis\")" + } + ] + }, + { + "name": "CumSum", + "module": "ai.onnx", + "version": 14, + "description": "Performs cumulative sum of the input elements along the given axis.\nBy default, it will do the sum inclusively meaning the first element is copied as is.\nThrough an `exclusive` attribute, this behavior can change to exclude the first element.\nIt can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1.\n\nExample:\n```\ninput_x = [1, 2, 3]\naxis=0\noutput = [1, 3, 6]\nexclusive=1\noutput = [0, 1, 3]\nexclusive=0\nreverse=1\noutput = [6, 5, 3]\nexclusive=1\nreverse=1\noutput = [5, 3, 0]\n```\n ", + "attributes": [ + { + "name": "exclusive", + "type": "int64", + "required": false, + "description": "If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements." + }, + { + "name": "reverse", + "type": "int64", + "required": false, + "description": "If set to 1 will perform the sums in reverse direction." + } + ], + "inputs": [ + { + "name": "x", + "type": "T", + "description": "An input tensor that is to be processed." + }, + { + "name": "axis", + "type": "T2", + "description": "A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "y", + "type": "T", + "description": "Output tensor of the same type as 'x' with cumulative sums of the x's elements" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "axis tensor can be int32 or int64 only", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "cumsum_1d", + "code": "node = onnx.helper.make_node(\"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"])\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([1.0, 3.0, 6.0, 10.0, 15.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d\")" + }, + { + "summary": "cumsum_1d_exclusive", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], exclusive=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([0.0, 1.0, 3.0, 6.0, 10.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_exclusive\")" + }, + { + "summary": "cumsum_1d_reverse", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], reverse=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([15.0, 14.0, 12.0, 9.0, 5.0]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_reverse\")" + }, + { + "summary": "cumsum_1d_reverse_exclusive", + "code": "node = onnx.helper.make_node(\n \"CumSum\", inputs=[\"x\", \"axis\"], outputs=[\"y\"], reverse=1, exclusive=1\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)\naxis = np.int32(0)\ny = np.array([14.0, 12.0, 9.0, 5.0, 0.0]).astype(np.float64)\nexpect(\n node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_1d_reverse_exclusive\"\n)" + }, + { + "summary": "cumsum_2d_axis_0", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(0)\ny = np.array([1.0, 2.0, 3.0, 5.0, 7.0, 9.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_axis_0\")" + }, + { + "summary": "cumsum_2d_axis_1", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(1)\ny = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_axis_1\")" + }, + { + "summary": "cumsum_2d_negative_axis", + "code": "node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"x\", \"axis\"],\n outputs=[\"y\"],\n)\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))\naxis = np.int32(-1)\ny = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_cumsum_2d_negative_axis\")" + } + ] + }, + { + "name": "DFT", + "module": "ai.onnx", + "version": 17, + "description": "Computes the discrete Fourier transform of input.", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "The axis on which to perform the DFT. By default this value is set to 1, which corresponds to the first dimension after the batch index. Negative value means counting dimensions from the back. Accepted range is $[-r, -2] \\cup [0, r-2]$ where `r = rank(input)`. The last dimension is for representing complex numbers and thus is an invalid axis." + }, + { + "name": "inverse", + "type": "int64", + "required": false, + "description": "Whether to perform the inverse discrete fourier transform. By default this value is set to 0, which corresponds to false." + }, + { + "name": "onesided", + "type": "int64", + "required": false, + "description": "If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m, n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT). When invoked with real or complex valued input, the default value is 0. Values can be 0 or 1." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "For real input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first dimension is the batch dimension. The following N dimensions correspond to the signal's dimensions. The final dimension represents the real and imaginary parts of the value in that order." + }, + { + "name": "dft_length", + "type": "T2", + "option": "optional", + "description": "The length of the signal as a scalar. If greater than the axis dimension, the signal will be zero-padded up to dft_length. If less than the axis dimension, only the first dft_length values will be used as the signal. It's an optional value. " + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T1", + "description": "The Fourier Transform of the input vector. If onesided is 0, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. If axis=1 and onesided is 1, the following shape is expected: [batch_idx][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]. If axis=2 and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][floor(signal_dim2/2)+1]...[signal_dimN][2]. If axis=N and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]. The signal_dim at the specified axis is equal to the dft_length." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain scalar length types to int64_t.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "dft", + "code": "node = onnx.helper.make_node(\"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"])\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\naxis = np.array(1, dtype=np.int64)\ny = np.fft.fft(x, axis=0)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft\")\n\nnode = onnx.helper.make_node(\"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"])\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\naxis = np.array(2, dtype=np.int64)\ny = np.fft.fft(x, axis=1)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft_axis\")\n\nnode = onnx.helper.make_node(\n \"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"], inverse=1\n)\nx = np.arange(0, 100, dtype=np.complex64).reshape(10, 10)\naxis = np.array(1, dtype=np.int64)\ny = np.fft.ifft(x, axis=0)\n\nx = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft_inverse\")" + }, + { + "summary": "opset19", + "code": "node = onnx.helper.make_node(\"DFT\", inputs=[\"x\"], outputs=[\"y\"], axis=1)\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\ny = np.fft.fft(x, axis=0)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)\n\nnode = onnx.helper.make_node(\"DFT\", inputs=[\"x\"], outputs=[\"y\"], axis=2)\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\ny = np.fft.fft(x, axis=1)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_axis_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)\n\nnode = onnx.helper.make_node(\n \"DFT\", inputs=[\"x\"], outputs=[\"y\"], inverse=1, axis=1\n)\nx = np.arange(0, 100, dtype=np.complex64).reshape(\n 10,\n 10,\n)\ny = np.fft.ifft(x, axis=0)\n\nx = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_inverse_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)" + } + ] + }, + { + "name": "DFT", + "module": "ai.onnx", + "version": 20, + "description": "Computes the discrete Fourier Transform (DFT) of the input.\n\nAssuming the input has shape `[M, N]`, where `N` is the dimension over which the\nDFT is computed and `M` denotes the conceptual \"all other dimensions,\"\nthe DFT `y[m, k]` of shape `[M, N]` is defined as\n\n$$y[m, k] = \\sum_{n=0}^{N-1} e^{-2 \\pi j \\frac{k n}{N} } x[m, n] ,$$\n\nand the inverse transform is defined as\n\n$$x[m, n] = \\frac{1}{N} \\sum_{k=0}^{N-1} e^{2 \\pi j \\frac{k n}{N} } y[m, k] ,$$\n\nwhere $j$ is the imaginary unit.\n\nThe actual shape of the output is specified in the \"output\" section.\n\nReference: https://docs.scipy.org/doc/scipy/tutorial/fft.html\n", + "attributes": [ + { + "name": "inverse", + "type": "int64", + "required": false, + "description": "Whether to perform the inverse discrete Fourier Transform. Default is 0, which corresponds to `false`." + }, + { + "name": "onesided", + "type": "int64", + "required": false, + "description": "If `onesided` is `1` and input is real, only values for `k` in `[0, 1, 2, ..., floor(n_fft/2) + 1]` are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., `X[m, k] = X[m, n_fft-k]*`, where `m` denotes \"all other dimensions\" DFT was not applied on. If the input tensor is complex, onesided output is not possible. Value can be `0` or `1`. Default is `0`." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "For real input, the following shape is expected: `[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][1]`. For complex input, the following shape is expected: `[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]`. The final dimension represents the real and imaginary parts of the value in that order." + }, + { + "name": "dft_length", + "type": "T2", + "option": "optional", + "description": "The length of the signal as a scalar. If greater than the axis dimension, the signal will be zero-padded up to `dft_length`. If less than the axis dimension, only the first `dft_length` values will be used as the signal. " + }, + { + "name": "axis", + "type": "tensor(int64)", + "option": "optional", + "description": "The axis as a scalar on which to perform the DFT. Default is `-2` (last signal axis). Negative value means counting dimensions from the back. Accepted range is $[-r, -2] \\cup [0, r-2]$ where `r = rank(input)`. The last dimension is for representing complex numbers and thus is an invalid axis." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T1", + "description": "The Fourier Transform of the input vector. If `onesided` is `0`, the following shape is expected: `[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]`. If `axis=0` and `onesided` is `1`, the following shape is expected: `[floor(signal_dim0/2)+1][signal_dim1][signal_dim2]...[signal_dimN][2]`. If `axis=1` and `onesided` is `1`, the following shape is expected: `[signal_dim0][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]`. If `axis=N` and `onesided` is `1`, the following shape is expected: `[signal_dim0][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]`. The `signal_dim` at the specified `axis` is equal to the `dft_length`." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain scalar length types to integers.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "dft", + "code": "node = onnx.helper.make_node(\"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"])\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\naxis = np.array(1, dtype=np.int64)\ny = np.fft.fft(x, axis=0)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft\")\n\nnode = onnx.helper.make_node(\"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"])\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\naxis = np.array(2, dtype=np.int64)\ny = np.fft.fft(x, axis=1)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft_axis\")\n\nnode = onnx.helper.make_node(\n \"DFT\", inputs=[\"x\", \"\", \"axis\"], outputs=[\"y\"], inverse=1\n)\nx = np.arange(0, 100, dtype=np.complex64).reshape(10, 10)\naxis = np.array(1, dtype=np.int64)\ny = np.fft.ifft(x, axis=0)\n\nx = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(node, inputs=[x, axis], outputs=[y], name=\"test_dft_inverse\")" + }, + { + "summary": "opset19", + "code": "node = onnx.helper.make_node(\"DFT\", inputs=[\"x\"], outputs=[\"y\"], axis=1)\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\ny = np.fft.fft(x, axis=0)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)\n\nnode = onnx.helper.make_node(\"DFT\", inputs=[\"x\"], outputs=[\"y\"], axis=2)\nx = np.arange(0, 100).reshape(10, 10).astype(np.float32)\ny = np.fft.fft(x, axis=1)\n\nx = x.reshape(1, 10, 10, 1)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_axis_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)\n\nnode = onnx.helper.make_node(\n \"DFT\", inputs=[\"x\"], outputs=[\"y\"], inverse=1, axis=1\n)\nx = np.arange(0, 100, dtype=np.complex64).reshape(\n 10,\n 10,\n)\ny = np.fft.ifft(x, axis=0)\n\nx = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\ny = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dft_inverse_opset19\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 19)],\n)" + } + ] + }, + { + "name": "DeformConv", + "module": "ai.onnx", + "version": 19, + "description": "Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168.\nThis operator specification supports the general N-D case. Note that most common use cases have 2D or 3D data.\n", + "attributes": [ + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "Dilation value along each spatial axis of the kernel. Default is 1 along each axis." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "Number of groups the input and output channels, C and oC, are divided into. C and oC must both be divisible by group. Default is 1." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "Shape of the convolution kernel. If not present, it is inferred from the shape of input W." + }, + { + "name": "offset_group", + "type": "int64", + "required": false, + "default": 1, + "description": "Number of groups of offset. C must be divisible by offset_group. Default is 1." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. The format should be as follows: [x1_begin, x2_begin, ..., x1_end, x2_end, ...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. Default is 0 along each axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. Default is 1 along each axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. In general, the shape is (N, C, D1, D2, ... , Dn) for n-dimensional data, where D1 to Dn are the spatial dimension sizes. Most common use cases have n = 2 or 3." + }, + { + "name": "W", + "type": "T", + "description": "Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. For more than 2 dimensions, it has shape (oC, C/group, k1, k2, ... , kn)." + }, + { + "name": "offset", + "type": "T", + "description": "Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data or (N, offset_group * k1 * k2 * ... * kn * n, o1, o2, ... , on) for nD data. Use linear interpolationfor fractional offset values. Sampling locations outside of the padded input tensor gives zero." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Optional 1D bias of length oC to be added to the convolution. Default is a tensor of zeros." + }, + { + "name": "mask", + "type": "T", + "option": "optional", + "description": "The mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data or (N, offset_group * k1 * k2 * ... * kn * n, o1, o2, ... , on) for nD data. Default is a tensor of ones." + } + ], + "min_input": 3, + "max_input": 5, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor that contains the result of convolution. It has shape (N, oC, oH, oW) for 2D data or (N, oC, o1, o2, ..., on) for nD data" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "3 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "deformconv", + "code": "X = np.arange(9).astype(np.float32)\nX.shape = (1, 1, 3, 3)\nW = np.ones((1, 1, 2, 2), dtype=np.float32)\n\n# Convolution with padding\noffset_with_padding = np.zeros((1, 8, 4, 4), dtype=np.float32)\noffset_with_padding[\n 0, 0, 0, 0\n] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0]\noffset_with_padding[\n 0, 5, 1, 2\n] = -0.1 # w-coord of [1, 0] element of kernel, at output position [1, 2]\n\nnode_with_padding = onnx.helper.make_node(\n \"DeformConv\",\n inputs=[\"X\", \"W\", \"offset_with_padding\"],\n outputs=[\"Y_with_padding\"],\n kernel_shape=[2, 2],\n pads=[1, 1, 1, 1],\n)\nY_with_padding = np.array(\n [\n [\n [\n [0.0, 1.0, 3.0, 2.0], # (1, 1, 4, 4) output tensor\n [3.0, 8.0, 11.9, 7.0],\n [9.0, 20.0, 24.0, 13.0],\n [6.0, 13.0, 15.0, 8.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_with_padding,\n inputs=[X, W, offset_with_padding],\n outputs=[Y_with_padding],\n name=\"test_basic_deform_conv_with_padding\",\n)\n\n# Convolution without padding\noffset_without_padding = np.zeros((1, 8, 2, 2), dtype=np.float32)\noffset_without_padding[\n 0, 0, 0, 0\n] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0]\noffset_without_padding[\n 0, 5, 0, 1\n] = -0.1 # w-coord of [1, 0] element of kernel, at output position [0, 1]\n\nnode_without_padding = onnx.helper.make_node(\n \"DeformConv\",\n inputs=[\"X\", \"W\", \"offset_without_padding\"],\n outputs=[\"Y_without_padding\"],\n kernel_shape=[2, 2],\n pads=[0, 0, 0, 0],\n)\nY_without_padding = np.array(\n [\n [\n [\n [9.5, 11.9], # (1, 1, 2, 2) output tensor\n [20.0, 24.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node_without_padding,\n inputs=[X, W, offset_without_padding],\n outputs=[Y_without_padding],\n name=\"test_basic_deform_conv_without_padding\",\n)" + }, + { + "summary": "deformconv_with_mask_bias", + "code": "X = np.arange(9).astype(np.float32)\nX.shape = (1, 1, 3, 3)\nW = np.ones((1, 1, 2, 2), dtype=np.float32)\nB = np.ones((1,), dtype=np.float32)\n\noffset = np.zeros((1, 8, 2, 2), dtype=np.float32)\noffset[\n 0, 0, 0, 0\n] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0]\noffset[\n 0, 5, 0, 1\n] = -0.1 # w-coord of [1, 0] element of kernel, at output position [0, 1]\n\nmask = np.ones((1, 4, 2, 2), dtype=np.float32)\nmask[0, 2, 1, 1] = 0.2 # [1, 0] element of kernel at output position [1, 1]\n\nnode = onnx.helper.make_node(\n \"DeformConv\",\n inputs=[\"X\", \"W\", \"offset\", \"B\", \"mask\"],\n outputs=[\"Y\"],\n kernel_shape=[2, 2],\n pads=[0, 0, 0, 0],\n)\nY = np.array(\n [\n [\n [\n [10.5, 12.9], # (1, 1, 2, 2) output tensor\n [21.0, 19.4],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node,\n inputs=[X, W, offset, B, mask],\n outputs=[Y],\n name=\"test_deform_conv_with_mask_bias\",\n)" + }, + { + "summary": "deformconv_with_multiple_offset_groups", + "code": "X = np.zeros((1, 2, 3, 3), dtype=np.float32)\nX[0, 0] = np.reshape(np.arange(9).astype(np.float32), (3, 3))\nX[0, 1] = np.reshape(np.arange(8, -1, -1).astype(np.float32), (3, 3))\nX.shape = (1, 2, 3, 3)\nW = np.ones((1, 2, 2, 2), dtype=np.float32)\n\noffset = np.zeros((1, 16, 2, 2), dtype=np.float32)\noffset[\n 0, 0, 0, 0\n] = 0.5 # h-coord of [0, 0] element of kernel in channel 0, at output position [0, 0]\noffset[\n 0, 13, 0, 1\n] = (\n -0.1\n) # w-coord of [1, 0] element of kernel in channel 1, at output position [0, 1]\n\nnode = onnx.helper.make_node(\n \"DeformConv\",\n inputs=[\"X\", \"W\", \"offset\"],\n outputs=[\"Y\"],\n kernel_shape=[2, 2],\n pads=[0, 0, 0, 0],\n offset_group=2,\n)\nY = np.array(\n [\n [\n [\n [33.5, 32.1], # (1, 1, 2, 2) output tensor\n [32.0, 32.0],\n ]\n ]\n ]\n).astype(np.float32)\nexpect(\n node,\n inputs=[X, W, offset],\n outputs=[Y],\n name=\"test_deform_conv_with_multiple_offset_groups\",\n)" + } + ] + }, + { + "name": "DepthToSpace", + "module": "ai.onnx", + "version": 1, + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions.\n", + "attributes": [ + { + "name": "blocksize", + "type": "int64", + "required": true, + "description": "Blocks of [blocksize, blocksize] are moved." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "crd_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"CRD\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 9.0, 1.0, 10.0, 2.0, 11.0],\n [18.0, 27.0, 19.0, 28.0, 20.0, 29.0],\n [3.0, 12.0, 4.0, 13.0, 5.0, 14.0],\n [21.0, 30.0, 22.0, 31.0, 23.0, 32.0],\n ],\n [\n [36.0, 45.0, 37.0, 46.0, 38.0, 47.0],\n [54.0, 63.0, 55.0, 64.0, 56.0, 65.0],\n [39.0, 48.0, 40.0, 49.0, 41.0, 50.0],\n [57.0, 66.0, 58.0, 67.0, 59.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_crd_mode_example\")" + }, + { + "summary": "default_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"DCR\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 18.0, 1.0, 19.0, 2.0, 20.0],\n [36.0, 54.0, 37.0, 55.0, 38.0, 56.0],\n [3.0, 21.0, 4.0, 22.0, 5.0, 23.0],\n [39.0, 57.0, 40.0, 58.0, 41.0, 59.0],\n ],\n [\n [9.0, 27.0, 10.0, 28.0, 11.0, 29.0],\n [45.0, 63.0, 46.0, 64.0, 47.0, 65.0],\n [12.0, 30.0, 13.0, 31.0, 14.0, 32.0],\n [48.0, 66.0, 49.0, 67.0, 50.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_example\")" + } + ] + }, + { + "name": "DepthToSpace", + "module": "ai.onnx", + "version": 11, + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions. By default, `mode` = `DCR`.\nIn the DCR mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: depth, column, and then row. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\n\ntmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\n\ny = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n\n\nIn the CRD mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: column, row, and the depth. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])\n\ntmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])\n\ny = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])\n\n", + "attributes": [ + { + "name": "blocksize", + "type": "int64", + "required": true, + "description": "Blocks of [blocksize, blocksize] are moved." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "DCR", + "description": "DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "crd_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"CRD\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 9.0, 1.0, 10.0, 2.0, 11.0],\n [18.0, 27.0, 19.0, 28.0, 20.0, 29.0],\n [3.0, 12.0, 4.0, 13.0, 5.0, 14.0],\n [21.0, 30.0, 22.0, 31.0, 23.0, 32.0],\n ],\n [\n [36.0, 45.0, 37.0, 46.0, 38.0, 47.0],\n [54.0, 63.0, 55.0, 64.0, 56.0, 65.0],\n [39.0, 48.0, 40.0, 49.0, 41.0, 50.0],\n [57.0, 66.0, 58.0, 67.0, 59.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_crd_mode_example\")" + }, + { + "summary": "default_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"DCR\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 18.0, 1.0, 19.0, 2.0, 20.0],\n [36.0, 54.0, 37.0, 55.0, 38.0, 56.0],\n [3.0, 21.0, 4.0, 22.0, 5.0, 23.0],\n [39.0, 57.0, 40.0, 58.0, 41.0, 59.0],\n ],\n [\n [9.0, 27.0, 10.0, 28.0, 11.0, 29.0],\n [45.0, 63.0, 46.0, 64.0, 47.0, 65.0],\n [12.0, 30.0, 13.0, 31.0, 14.0, 32.0],\n [48.0, 66.0, 49.0, 67.0, 50.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_example\")" + } + ] + }, + { + "name": "DepthToSpace", + "module": "ai.onnx", + "version": 13, + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions. By default, `mode` = `DCR`.\nIn the DCR mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: depth, column, and then row. The output y is computed from the input x as below:\n\n```\nb, c, h, w = x.shape\ntmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\ntmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\ny = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n```\n\nIn the CRD mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: column, row, and the depth. The output y is computed from the input x as below:\n\n```\nb, c, h, w = x.shape\ntmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])\ntmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])\ny = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])\n```\n", + "attributes": [ + { + "name": "blocksize", + "type": "int64", + "required": true, + "description": "Blocks of [blocksize, blocksize] are moved." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "DCR", + "description": "DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "crd_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"CRD\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 9.0, 1.0, 10.0, 2.0, 11.0],\n [18.0, 27.0, 19.0, 28.0, 20.0, 29.0],\n [3.0, 12.0, 4.0, 13.0, 5.0, 14.0],\n [21.0, 30.0, 22.0, 31.0, 23.0, 32.0],\n ],\n [\n [36.0, 45.0, 37.0, 46.0, 38.0, 47.0],\n [54.0, 63.0, 55.0, 64.0, 56.0, 65.0],\n [39.0, 48.0, 40.0, 49.0, 41.0, 50.0],\n [57.0, 66.0, 58.0, 67.0, 59.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_crd_mode_example\")" + }, + { + "summary": "default_mode_example", + "code": "node = onnx.helper.make_node(\n \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=2, mode=\"DCR\"\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array(\n [\n [\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],\n [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],\n [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],\n [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],\n [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],\n [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],\n [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],\n [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],\n ]\n ]\n).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array(\n [\n [\n [\n [0.0, 18.0, 1.0, 19.0, 2.0, 20.0],\n [36.0, 54.0, 37.0, 55.0, 38.0, 56.0],\n [3.0, 21.0, 4.0, 22.0, 5.0, 23.0],\n [39.0, 57.0, 40.0, 58.0, 41.0, 59.0],\n ],\n [\n [9.0, 27.0, 10.0, 28.0, 11.0, 29.0],\n [45.0, 63.0, 46.0, 64.0, 47.0, 65.0],\n [12.0, 30.0, 13.0, 31.0, 14.0, 32.0],\n [48.0, 66.0, 49.0, 67.0, 50.0, 68.0],\n ],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_depthtospace_example\")" + } + ] + }, + { + "name": "DequantizeLinear", + "module": "ai.onnx", + "version": 10, + "description": "The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor.\nThe dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' are both scalars.\n'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n", + "inputs": [ + { + "name": "x", + "type": "T", + "description": "N-D quantized input tensor to be de-quantized." + }, + { + "name": "x_scale", + "type": "tensor(float)", + "description": "Scale for input 'x'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "x_zero_point", + "type": "T", + "option": "optional", + "description": "Zero point for input 'x'. It's a scalar, which means a per-tensor/layer quantization. It's optional. 0 is the default value when it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "tensor(float)", + "description": "N-D full precision output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x_zero_point' and 'x' to 8-bit/32-bit integer tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# 1-D tensor zero point and scale of size equal to axis 1 of the input tensor\nx = np.array(\n [\n [\n [[3, 89], [34, 200], [74, 59]],\n [[5, 24], [24, 87], [32, 13]],\n [[245, 99], [4, 142], [121, 102]],\n ],\n ],\n dtype=np.uint8,\n)\nx_scale = np.array([2, 4, 5], dtype=np.float32)\nx_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (\n x.astype(np.float32) - x_zero_point.reshape(1, 3, 1, 1).astype(np.float32)\n) * x_scale.reshape(1, 3, 1, 1)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_axis\",\n)" + }, + { + "summary": "dequantizelinear", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# scalar zero point and scale\nx = np.array([0, 3, 128, 255]).astype(np.uint8)\nx_scale = np.float32(2)\nx_zero_point = np.uint8(128)\ny = np.array([-256, -250, 0, 254], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e4m3fn_float16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float16(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float16)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_float16\",\n)" + }, + { + "summary": "e4m3fn_zero_point", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nzero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_zero_point\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, -96])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 98304.0, -192.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-300, -30, -1025, 1270]).astype(np.int16)\nx_scale = np.float32(2)\nx_zero_point = np.int16(-1024)\ny = np.array([1448.0, 1988.0, -2.0, 4588.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.INT4, [5], [0, 1, 7, -4, -8])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.INT4, (1,), [1])\ny = np.array([-2, 0, 12, -10, -18], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int4\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([30000, 31000, 32768, 33000]).astype(np.uint16)\nx_scale = np.float32(2)\nx_zero_point = np.uint16(32767)\ny = np.array([-5534.0, -3534.0, 2.0, 466.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.UINT4, (1,), [1])\ny = np.array([-2, 0, 12, 18, 28], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "DequantizeLinear", + "module": "ai.onnx", + "version": 13, + "description": "The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor.\nThe dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar\nfor per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\n`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "x", + "type": "T", + "description": "N-D quantized input tensor to be de-quantized." + }, + { + "name": "x_scale", + "type": "tensor(float)", + "description": "Scale for input 'x'. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization." + }, + { + "name": "x_zero_point", + "type": "T", + "option": "optional", + "description": "Zero point for input 'x'. Shape must match x_scale. It's optional. Zero point is 0 when it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "tensor(float)", + "description": "N-D full precision output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x_zero_point' and 'x' to 8-bit/32-bit integer tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# 1-D tensor zero point and scale of size equal to axis 1 of the input tensor\nx = np.array(\n [\n [\n [[3, 89], [34, 200], [74, 59]],\n [[5, 24], [24, 87], [32, 13]],\n [[245, 99], [4, 142], [121, 102]],\n ],\n ],\n dtype=np.uint8,\n)\nx_scale = np.array([2, 4, 5], dtype=np.float32)\nx_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (\n x.astype(np.float32) - x_zero_point.reshape(1, 3, 1, 1).astype(np.float32)\n) * x_scale.reshape(1, 3, 1, 1)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_axis\",\n)" + }, + { + "summary": "dequantizelinear", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# scalar zero point and scale\nx = np.array([0, 3, 128, 255]).astype(np.uint8)\nx_scale = np.float32(2)\nx_zero_point = np.uint8(128)\ny = np.array([-256, -250, 0, 254], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e4m3fn_float16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float16(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float16)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_float16\",\n)" + }, + { + "summary": "e4m3fn_zero_point", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nzero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_zero_point\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, -96])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 98304.0, -192.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-300, -30, -1025, 1270]).astype(np.int16)\nx_scale = np.float32(2)\nx_zero_point = np.int16(-1024)\ny = np.array([1448.0, 1988.0, -2.0, 4588.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.INT4, [5], [0, 1, 7, -4, -8])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.INT4, (1,), [1])\ny = np.array([-2, 0, 12, -10, -18], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int4\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([30000, 31000, 32768, 33000]).astype(np.uint16)\nx_scale = np.float32(2)\nx_zero_point = np.uint16(32767)\ny = np.array([-5534.0, -3534.0, 2.0, 466.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.UINT4, (1,), [1])\ny = np.array([-2, 0, 12, 18, 28], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "DequantizeLinear", + "module": "ai.onnx", + "version": 19, + "description": "The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor.\nThe dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar\nfor per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\n`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n`zero-point` is usually not used in the case of float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz quantization,\nbut the dequantization formula remains the same for consistency and 'x_scale' still determines the output type.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D quantized input tensor to be de-quantized." + }, + { + "name": "x_scale", + "type": "T2", + "description": "Scale for input 'x'. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization." + }, + { + "name": "x_zero_point", + "type": "T1", + "option": "optional", + "description": "Zero point for input 'x'. Shape must match x_scale. It's optional. Zero point is 0 when it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D full precision output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x_zero_point' and 'x' to 8-bit integer or float, or /32-bit integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int32)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "'x_scale' determines the output type.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# 1-D tensor zero point and scale of size equal to axis 1 of the input tensor\nx = np.array(\n [\n [\n [[3, 89], [34, 200], [74, 59]],\n [[5, 24], [24, 87], [32, 13]],\n [[245, 99], [4, 142], [121, 102]],\n ],\n ],\n dtype=np.uint8,\n)\nx_scale = np.array([2, 4, 5], dtype=np.float32)\nx_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (\n x.astype(np.float32) - x_zero_point.reshape(1, 3, 1, 1).astype(np.float32)\n) * x_scale.reshape(1, 3, 1, 1)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_axis\",\n)" + }, + { + "summary": "dequantizelinear", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# scalar zero point and scale\nx = np.array([0, 3, 128, 255]).astype(np.uint8)\nx_scale = np.float32(2)\nx_zero_point = np.uint8(128)\ny = np.array([-256, -250, 0, 254], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e4m3fn_float16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float16(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float16)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_float16\",\n)" + }, + { + "summary": "e4m3fn_zero_point", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nzero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_zero_point\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, -96])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 98304.0, -192.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-300, -30, -1025, 1270]).astype(np.int16)\nx_scale = np.float32(2)\nx_zero_point = np.int16(-1024)\ny = np.array([1448.0, 1988.0, -2.0, 4588.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.INT4, [5], [0, 1, 7, -4, -8])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.INT4, (1,), [1])\ny = np.array([-2, 0, 12, -10, -18], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int4\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([30000, 31000, 32768, 33000]).astype(np.uint16)\nx_scale = np.float32(2)\nx_zero_point = np.uint16(32767)\ny = np.array([-5534.0, -3534.0, 2.0, 466.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.UINT4, (1,), [1])\ny = np.array([-2, 0, 12, 18, 28], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "DequantizeLinear", + "module": "ai.onnx", + "version": 21, + "description": "The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor.\nThe dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar\nfor per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\n`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n`zero-point` is usually not used in the case of float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz quantization,\nbut the dequantization formula remains the same for consistency and 'x_scale' still determines the output type.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D quantized input tensor to be de-quantized." + }, + { + "name": "x_scale", + "type": "T2", + "description": "Scale for input 'x'. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization." + }, + { + "name": "x_zero_point", + "type": "T1", + "option": "optional", + "description": "Zero point for input 'x'. Shape must match x_scale. It's optional. Zero point is 0 when it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D full precision output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "The type of the inputs 'x_zero_point' and 'x'.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int16)", + "tensor(uint16)", + "tensor(int32)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "'x_scale' determines the output type.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# 1-D tensor zero point and scale of size equal to axis 1 of the input tensor\nx = np.array(\n [\n [\n [[3, 89], [34, 200], [74, 59]],\n [[5, 24], [24, 87], [32, 13]],\n [[245, 99], [4, 142], [121, 102]],\n ],\n ],\n dtype=np.uint8,\n)\nx_scale = np.array([2, 4, 5], dtype=np.float32)\nx_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (\n x.astype(np.float32) - x_zero_point.reshape(1, 3, 1, 1).astype(np.float32)\n) * x_scale.reshape(1, 3, 1, 1)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_axis\",\n)" + }, + { + "summary": "dequantizelinear", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\n# scalar zero point and scale\nx = np.array([0, 3, 128, 255]).astype(np.uint8)\nx_scale = np.float32(2)\nx_zero_point = np.uint8(128)\ny = np.array([-256, -250, 0, 254], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e4m3fn_float16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nx_scale = np.float16(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float16)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_float16\",\n)" + }, + { + "summary": "e4m3fn_zero_point", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104])\nzero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_e4m3fn_zero_point\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, -96])\nx_scale = np.float32(2)\ny = np.array([0.0, 1.0, 2.0, 98304.0, -192.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale],\n outputs=[y],\n name=\"test_dequantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-300, -30, -1025, 1270]).astype(np.int16)\nx_scale = np.float32(2)\nx_zero_point = np.int16(-1024)\ny = np.array([1448.0, 1988.0, -2.0, 4588.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.INT4, [5], [0, 1, 7, -4, -8])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.INT4, (1,), [1])\ny = np.array([-2, 0, 12, -10, -18], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_int4\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([30000, 31000, 32768, 33000]).astype(np.uint16)\nx_scale = np.float32(2)\nx_zero_point = np.uint16(32767)\ny = np.array([-5534.0, -3534.0, 2.0, 466.0], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"DequantizeLinear\",\n inputs=[\"x\", \"x_scale\", \"x_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\n# scalar zero point and scale\nx = make_tensor(\"x\", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])\nx_scale = np.float32(2)\nx_zero_point = make_tensor(\"zero_point\", TensorProto.UINT4, (1,), [1])\ny = np.array([-2, 0, 12, 18, 28], dtype=np.float32)\n\nexpect(\n node,\n inputs=[x, x_scale, x_zero_point],\n outputs=[y],\n name=\"test_dequantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "Det", + "module": "ai.onnx", + "version": 11, + "description": "Det calculates determinant of a square matrix or batches of square matrices.\nDet takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions,\nand the inner-most 2 dimensions form square matrices.\nThe output is a tensor of shape `[*]`, containing the determinants of all input submatrices.\ne.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`).\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to floating-point tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "2d", + "code": "node = onnx.helper.make_node(\n \"Det\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.arange(4).reshape(2, 2).astype(np.float32)\ny = np.linalg.det(x) # expect -2\nexpect(node, inputs=[x], outputs=[y], name=\"test_det_2d\")" + }, + { + "summary": "nd", + "code": "node = onnx.helper.make_node(\n \"Det\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype(\n np.float32\n)\ny = np.linalg.det(x) # expect array([-2., -3., -8.])\nexpect(node, inputs=[x], outputs=[y], name=\"test_det_nd\")" + } + ] + }, + { + "name": "DictVectorizer", + "module": "ai.onnx.ml", + "version": 1, + "description": "Uses an index mapping to convert a dictionary to an array.
\n Given a dictionary, each key is looked up in the vocabulary attribute corresponding to\n the key type. The index into the vocabulary array at which the key is found is then\n used to index the output 1-D tensor 'Y' and insert into it the value found in the dictionary 'X'.
\n The key type of the input map must correspond to the element type of the defined vocabulary attribute.\n Therefore, the output array will be equal in length to the index mapping vector parameter.\n All keys in the input dictionary must be present in the index mapping vector.\n For each item in the input dictionary, insert its value in the output array.\n Any keys not present in the input dictionary, will be zero in the output array.
\n For example: if the ``string_vocabulary`` parameter is set to ``[\"a\", \"c\", \"b\", \"z\"]``,\n then an input of ``{\"a\": 4, \"c\": 8}`` will produce an output of ``[4, 8, 0, 0]``.\n ", + "attributes": [ + { + "name": "int64_vocabulary", + "type": "int64[]", + "required": false, + "description": "An integer vocabulary array.
One and only one of the vocabularies must be defined." + }, + { + "name": "string_vocabulary", + "type": "string[]", + "required": false, + "description": "A string vocabulary array.
One and only one of the vocabularies must be defined." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "A dictionary." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "A 1-D tensor holding values from the input dictionary." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.", + "type_param_str": "T1", + "allowed_type_strs": [ + "map(string, int64)", + "map(int64, string)", + "map(int64, float)", + "map(int64, double)", + "map(string, float)", + "map(string, double)" + ] + }, + { + "description": "The output will be a tensor of the value type of the input map. It's shape will be [1,C], where C is the length of the input dictionary.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)", + "tensor(float)", + "tensor(double)", + "tensor(string)" + ] + } + ] + }, + { + "name": "Div", + "module": "ai.onnx", + "version": 1, + "description": "Performs element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "div", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div\")\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1\nz = x // y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_uint8\")" + }, + { + "summary": "div_broadcast", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_bcast\")" + } + ] + }, + { + "name": "Div", + "module": "ai.onnx", + "version": 6, + "description": "Performs element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "div", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div\")\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1\nz = x // y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_uint8\")" + }, + { + "summary": "div_broadcast", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_bcast\")" + } + ] + }, + { + "name": "Div", + "module": "ai.onnx", + "version": 7, + "description": "Performs element-wise binary division (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "div", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div\")\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1\nz = x // y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_uint8\")" + }, + { + "summary": "div_broadcast", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_bcast\")" + } + ] + }, + { + "name": "Div", + "module": "ai.onnx", + "version": 13, + "description": "Performs element-wise binary division (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "div", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div\")\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1\nz = x // y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_uint8\")" + }, + { + "summary": "div_broadcast", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_bcast\")" + } + ] + }, + { + "name": "Div", + "module": "ai.onnx", + "version": 14, + "description": "Performs element-wise binary division (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n\n(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "div", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div\")\n\nx = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1\nz = x // y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_uint8\")" + }, + { + "summary": "div_broadcast", + "code": "node = onnx.helper.make_node(\n \"Div\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_div_bcast\")" + } + ] + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 1, + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + }, + { + "name": "is_test", + "type": "int64", + "required": false, + "description": "(int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X." + }, + { + "name": "ratio", + "type": "float32", + "required": false, + "default": 0.5, + "description": "(float, default 0.5) the ratio of random dropout" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T", + "option": "optional", + "description": "The output mask. If is_test is nonzero, this output is not filled." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 6, + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\n", + "attributes": [ + { + "name": "is_test", + "type": "int64", + "required": false, + "description": "(int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X." + }, + { + "name": "ratio", + "type": "float32", + "required": false, + "default": 0.5, + "description": "(float, default 0.5) the ratio of random dropout" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T", + "option": "optional", + "description": "The output mask. If is_test is nonzero, this output is not filled." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 7, + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "ratio", + "type": "float32", + "required": false, + "default": 0.5, + "description": "The ratio of random dropout" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T", + "option": "optional", + "description": "The output mask." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 10, + "description": "Dropout takes one input floating tensor and produces two tensor outputs,\noutput (floating tensor) and mask (`Tensor`). Depending on whether it is\nin test mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "ratio", + "type": "float32", + "required": false, + "default": 0.5, + "description": "The ratio of random dropout" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T1", + "option": "optional", + "description": "The output mask." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output mask types to boolean tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 12, + "description": "Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs,\noutput (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout;\nNote that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode,\nthe user can simply not pass `training_mode` input or set it to false.\n```\noutput = scale * data * mask,\n```\nwhere\n```\nscale = 1. / (1. - ratio).\n```\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "seed", + "type": "int64", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + }, + { + "name": "ratio", + "type": "T1", + "option": "optional", + "description": "The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it's non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5." + }, + { + "name": "training_mode", + "type": "T2", + "option": "optional", + "description": "If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T2", + "option": "optional", + "description": "The output mask." + } + ], + "min_output": 1, + "max_output": 2, + "inputs_range": "1 - 3", + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input 'ratio' types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output 'mask' types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "Dropout", + "module": "ai.onnx", + "version": 13, + "description": "Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs,\noutput (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout;\nNote that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode,\nthe user can simply not pass `training_mode` input or set it to false.\n```\noutput = scale * data * mask,\n```\nwhere\n```\nscale = 1. / (1. - ratio).\n```\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "seed", + "type": "int64", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "The input data as Tensor." + }, + { + "name": "ratio", + "type": "T1", + "option": "optional", + "description": "The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it's non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5." + }, + { + "name": "training_mode", + "type": "T2", + "option": "optional", + "description": "If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones." + } + ], + "min_input": 1, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + }, + { + "name": "mask", + "type": "T2", + "option": "optional", + "description": "The output mask." + } + ], + "min_output": 1, + "max_output": 2, + "inputs_range": "1 - 3", + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain input 'ratio' types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output 'mask' types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\"Dropout\", inputs=[\"x\"], outputs=[\"y\"], seed=seed)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_dropout_default\")" + }, + { + "summary": "default_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name=\"test_dropout_default_mask\")" + }, + { + "summary": "default_mask_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(\n node, inputs=[x, r], outputs=[y, z], name=\"test_dropout_default_mask_ratio\"\n)" + }, + { + "summary": "default_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_default_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "default_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\"], outputs=[\"y\"], seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name=\"test_dropout_default_ratio\")" + }, + { + "summary": "random_old", + "code": "node = onnx.helper.make_node(\n \"Dropout\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n ratio=0.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_dropout_random_old\",\n opset_imports=[helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "training", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout\")" + }, + { + "summary": "training_default", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_default\"\n)" + }, + { + "summary": "training_default_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_default_mask\",\n)" + }, + { + "summary": "training_default_zero_ratio", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(\n node, inputs=[x, r, t], outputs=[y], name=\"test_training_dropout_zero_ratio\"\n)" + }, + { + "summary": "training_default_zero_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node,\n inputs=[x, r, t],\n outputs=[y, z],\n name=\"test_training_dropout_zero_ratio_mask\",\n)" + }, + { + "summary": "training_ratio_mask", + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n \"Dropout\", inputs=[\"x\", \"r\", \"t\"], outputs=[\"y\", \"z\"], seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(\n node, inputs=[x, r, t], outputs=[y, z], name=\"test_training_dropout_mask\"\n)" + } + ], + "category": "Dropout" + }, + { + "name": "DynamicQuantizeLinear", + "module": "ai.onnx", + "version": 11, + "description": "A Function to fuse calculation for Scale, Zero Point and FP32->8Bit conversion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\ny_scale = (maximum(0, max(x)) - minimum(0, min(x))) / (qmax - qmin)\n```\n\n* where qmax and qmin are max and min values for quantization range i.e. [0, 255] in case of uint8\n* data range is adjusted to include 0.\n\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n```\n\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n", + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "Quantized output tensor" + }, + { + "name": "y_scale", + "type": "tensor(float)", + "description": "Output scale. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "y_zero_point", + "type": "T2", + "description": "Output zero point. It's a scalar, which means a per-tensor/layer quantization." + } + ], + "min_output": 3, + "max_output": 3, + "type_constraints": [ + { + "description": "Constrain 'x' to float tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)" + ] + }, + { + "description": "Constrain 'y_zero_point' and 'y' to 8-bit unsigned integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)" + ] + } + ], + "examples": [ + { + "summary": "dynamicquantizelinear", + "code": "node = onnx.helper.make_node(\n \"DynamicQuantizeLinear\",\n inputs=[\"x\"],\n outputs=[\"y\", \"y_scale\", \"y_zero_point\"],\n)\n\n# expected scale 0.0196078438 and zero point 153\nX = np.array([0, 2, -3, -2.5, 1.34, 0.5]).astype(np.float32)\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(\n node,\n inputs=[X],\n outputs=[Y, Y_Scale, Y_ZeroPoint],\n name=\"test_dynamicquantizelinear\",\n)\n\n# expected scale 0.0156862754 and zero point 255\nX = np.array([-1.0, -2.1, -1.3, -2.5, -3.34, -4.0]).astype(np.float32)\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(\n node,\n inputs=[X],\n outputs=[Y, Y_Scale, Y_ZeroPoint],\n name=\"test_dynamicquantizelinear_max_adjusted\",\n)\n\nX = (\n np.array([1, 2.1, 1.3, 2.5, 3.34, 4.0, 1.5, 2.6, 3.9, 4.0, 3.0, 2.345])\n .astype(np.float32)\n .reshape((3, 4))\n)\n\n# expected scale 0.0156862754 and zero point 0\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(\n node,\n inputs=[X],\n outputs=[Y, Y_Scale, Y_ZeroPoint],\n name=\"test_dynamicquantizelinear_min_adjusted\",\n)" + } + ] + }, + { + "name": "Einsum", + "module": "ai.onnx", + "version": 12, + "description": "An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation\n\n```\noutput[output-term] = reduce-sum( input1[term1] * input2[term2] )\n```\n\nwhere the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", + "attributes": [ + { + "name": "equation", + "type": "string", + "required": true, + "description": "Einsum expression string." + } + ], + "inputs": [ + { + "name": "Inputs", + "type": "T", + "list": true, + "description": "Operands" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "Output", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to all numerical tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "einsum_batch_diagonal", + "code": "Eqn = \"...ii ->...i\"\nnode = onnx.helper.make_node(\n \"Einsum\", inputs=[\"x\"], outputs=[\"y\"], equation=Eqn\n)\n\nX = np.random.randn(3, 5, 5)\nZ = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Z], name=\"test_einsum_batch_diagonal\")" + }, + { + "summary": "einsum_batch_matmul", + "code": "Eqn = \"bij, bjk -> bik\"\nnode = onnx.helper.make_node(\n \"Einsum\", inputs=[\"x\", \"y\"], outputs=[\"z\"], equation=Eqn\n)\n\nX = np.random.randn(5, 2, 3)\nY = np.random.randn(5, 3, 4)\nZ = einsum_reference_implementation(Eqn, (X, Y))\n\nexpect(node, inputs=[X, Y], outputs=[Z], name=\"test_einsum_batch_matmul\")" + }, + { + "summary": "einsum_inner_prod", + "code": "Eqn = \"i,i\"\nnode = onnx.helper.make_node(\n \"Einsum\", inputs=[\"x\", \"y\"], outputs=[\"z\"], equation=Eqn\n)\n\nX = np.random.randn(5)\nY = np.random.randn(5)\nZ = einsum_reference_implementation(Eqn, (X, Y))\n\nexpect(node, inputs=[X, Y], outputs=[Z], name=\"test_einsum_inner_prod\")" + }, + { + "summary": "einsum_sum", + "code": "Eqn = \"ij->i\"\nnode = onnx.helper.make_node(\n \"Einsum\", inputs=[\"x\"], outputs=[\"y\"], equation=Eqn\n)\n\nX = np.random.randn(3, 4)\nZ = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Z], name=\"test_einsum_sum\")" + }, + { + "summary": "einsum_transpose", + "code": "Eqn = \"ij->ji\"\nnode = onnx.helper.make_node(\n \"Einsum\", inputs=[\"x\"], outputs=[\"y\"], equation=Eqn\n)\n\nX = np.random.randn(3, 4)\nY = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Y], name=\"test_einsum_transpose\")" + } + ] + }, + { + "name": "Elu", + "module": "ai.onnx", + "version": 1, + "description": "Elu takes one input data (Tensor) and produces one output data\n(Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x <\n0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.\n\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Coefficient of ELU default to 1.0." + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "1D input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "1D input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "elu", + "code": "node = onnx.helper.make_node(\"Elu\", inputs=[\"x\"], outputs=[\"y\"], alpha=2.0)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-1.2642411, 0., 1.]\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu\")" + }, + { + "summary": "elu_default", + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\n \"Elu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "Elu", + "module": "ai.onnx", + "version": 6, + "description": "Elu takes one input data (Tensor) and produces one output data\n(Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x <\n0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.\n\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Coefficient of ELU." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "1D input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "1D output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "elu", + "code": "node = onnx.helper.make_node(\"Elu\", inputs=[\"x\"], outputs=[\"y\"], alpha=2.0)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-1.2642411, 0., 1.]\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu\")" + }, + { + "summary": "elu_default", + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\n \"Elu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha\nexpect(node, inputs=[x], outputs=[y], name=\"test_elu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "Equal", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to integral tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "equal", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal\")" + }, + { + "summary": "equal_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_bcast\")" + }, + { + "summary": "equal_string", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\", \"string3\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string\")" + }, + { + "summary": "equal_string_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string_broadcast\")" + } + ] + }, + { + "name": "Equal", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to integral tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "equal", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal\")" + }, + { + "summary": "equal_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_bcast\")" + }, + { + "summary": "equal_string", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\", \"string3\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string\")" + }, + { + "summary": "equal_string_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string_broadcast\")" + } + ] + }, + { + "name": "Equal", + "module": "ai.onnx", + "version": 11, + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "equal", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal\")" + }, + { + "summary": "equal_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_bcast\")" + }, + { + "summary": "equal_string", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\", \"string3\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string\")" + }, + { + "summary": "equal_string_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string_broadcast\")" + } + ] + }, + { + "name": "Equal", + "module": "ai.onnx", + "version": 13, + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "equal", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal\")" + }, + { + "summary": "equal_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_bcast\")" + }, + { + "summary": "equal_string", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\", \"string3\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string\")" + }, + { + "summary": "equal_string_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string_broadcast\")" + } + ] + }, + { + "name": "Equal", + "module": "ai.onnx", + "version": 19, + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all (non-complex) tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(string)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "equal", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal\")" + }, + { + "summary": "equal_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_bcast\")" + }, + { + "summary": "equal_string", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\", \"string3\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string\")" + }, + { + "summary": "equal_string_broadcast", + "code": "node = onnx.helper.make_node(\n \"Equal\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([\"string1\", \"string2\"], dtype=np.dtype(object))\ny = np.array([\"string1\"], dtype=np.dtype(object))\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_equal_string_broadcast\")" + } + ] + }, + { + "name": "Erf", + "module": "ai.onnx", + "version": 9, + "description": "Computes the error function of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The error function of the input tensor computed element-wise. It has the same shape and type of the input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "erf", + "code": "node = onnx.helper.make_node(\n \"Erf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\ny = np.vectorize(math.erf)(x).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_erf\")" + } + ] + }, + { + "name": "Erf", + "module": "ai.onnx", + "version": 13, + "description": "Computes the error function of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The error function of the input tensor computed element-wise. It has the same shape and type of the input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "erf", + "code": "node = onnx.helper.make_node(\n \"Erf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\ny = np.vectorize(math.erf)(x).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_erf\")" + } + ] + }, + { + "name": "Exp", + "module": "ai.onnx", + "version": 1, + "description": "Calculates the exponential of the given input tensor, element-wise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The exponential of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "exp", + "code": "node = onnx.helper.make_node(\n \"Exp\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.exp(x) # expected output [0.36787945, 1., 2.71828175]\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.exp(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp\")" + } + ] + }, + { + "name": "Exp", + "module": "ai.onnx", + "version": 6, + "description": "Calculates the exponential of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The exponential of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "exp", + "code": "node = onnx.helper.make_node(\n \"Exp\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.exp(x) # expected output [0.36787945, 1., 2.71828175]\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.exp(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp\")" + } + ] + }, + { + "name": "Exp", + "module": "ai.onnx", + "version": 13, + "description": "Calculates the exponential of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The exponential of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "exp", + "code": "node = onnx.helper.make_node(\n \"Exp\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.exp(x) # expected output [0.36787945, 1., 2.71828175]\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.exp(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_exp\")" + } + ] + }, + { + "name": "Expand", + "module": "ai.onnx", + "version": 8, + "description": "Broadcast the input tensor following the given shape and the broadcast rule.\nThe broadcast rule is similar to numpy.array(input) * numpy.ones(shape):\nDimensions are right alignment;\nTwo corresponding dimensions must have the same value, or one of them is equal to 1.\nAlso, this operator is similar to numpy.broadcast_to(input, shape),\nbut the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size().\nIt is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1,\nor the shape.ndim < input.shape.ndim.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "A 1-D tensor indicates the shape you want to expand to, following the broadcast rule" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "dim_changed", + "code": "node = onnx.helper.make_node(\n \"Expand\",\n inputs=[\"data\", \"new_shape\"],\n outputs=[\"expanded\"],\n)\nshape = [3, 1]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[1.], [2.], [3.]]\nnew_shape = [2, 1, 6]\nexpanded = data * np.ones(new_shape, dtype=np.float32)\n# print(expanded)\n# [[[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]],\n#\n# [[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(\n node,\n inputs=[data, new_shape],\n outputs=[expanded],\n name=\"test_expand_dim_changed\",\n)" + }, + { + "summary": "dim_unchanged", + "code": "node = onnx.helper.make_node(\n \"Expand\",\n inputs=[\"data\", \"new_shape\"],\n outputs=[\"expanded\"],\n)\nshape = [3, 1]\nnew_shape = [3, 4]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[1.], [2.], [3.]]\nexpanded = np.tile(data, 4)\n# print(expanded)\n# [[1., 1., 1., 1.],\n# [2., 2., 2., 2.],\n# [3., 3., 3., 3.]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(\n node,\n inputs=[data, new_shape],\n outputs=[expanded],\n name=\"test_expand_dim_unchanged\",\n)" + } + ] + }, + { + "name": "Expand", + "module": "ai.onnx", + "version": 13, + "description": "Broadcast the input tensor following the given shape and the broadcast rule.\nThe broadcast rule is similar to numpy.array(input) * numpy.ones(shape):\nDimensions are right alignment;\nTwo corresponding dimensions must have the same value, or one of them is equal to 1.\nAlso, this operator is similar to numpy.broadcast_to(input, shape),\nbut the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size().\nIt is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1,\nor the shape.ndim < input.shape.ndim.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "A 1-D tensor indicates the shape you want to expand to, following the broadcast rule" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "dim_changed", + "code": "node = onnx.helper.make_node(\n \"Expand\",\n inputs=[\"data\", \"new_shape\"],\n outputs=[\"expanded\"],\n)\nshape = [3, 1]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[1.], [2.], [3.]]\nnew_shape = [2, 1, 6]\nexpanded = data * np.ones(new_shape, dtype=np.float32)\n# print(expanded)\n# [[[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]],\n#\n# [[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(\n node,\n inputs=[data, new_shape],\n outputs=[expanded],\n name=\"test_expand_dim_changed\",\n)" + }, + { + "summary": "dim_unchanged", + "code": "node = onnx.helper.make_node(\n \"Expand\",\n inputs=[\"data\", \"new_shape\"],\n outputs=[\"expanded\"],\n)\nshape = [3, 1]\nnew_shape = [3, 4]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[1.], [2.], [3.]]\nexpanded = np.tile(data, 4)\n# print(expanded)\n# [[1., 1., 1., 1.],\n# [2., 2., 2., 2.],\n# [3., 3., 3., 3.]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(\n node,\n inputs=[data, new_shape],\n outputs=[expanded],\n name=\"test_expand_dim_unchanged\",\n)" + } + ] + }, + { + "name": "EyeLike", + "module": "ai.onnx", + "version": 9, + "description": "Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D\ntensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the\nsame as the input tensor. The data type can be specified by the 'dtype' argument. If\n'dtype' is not specified, then the type of input tensor is used. By default, the main diagonal\nis populated with ones, but attribute 'k' can be used to populate upper or lower diagonals.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message and be valid as an output type.\n", + "attributes": [ + { + "name": "dtype", + "type": "DataType", + "required": false, + "description": "(Optional) The data type for the elements of the output tensor. If not specified,the data type of the input tensor T1 is used. If input tensor T1 is also notspecified, then type defaults to 'float'." + }, + { + "name": "k", + "type": "int64", + "required": false, + "description": "(Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "2D input tensor to copy shape, and optionally, type information from." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor, same shape as input tensor T1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types. Strings and complex are not supported.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + }, + { + "description": "Constrain output types. Strings and complex are not supported.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "populate_off_main_diagonal", + "code": "shape = (4, 5)\noff_diagonal_offset = 1\nnode = onnx.helper.make_node(\n \"EyeLike\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n k=off_diagonal_offset,\n dtype=onnx.TensorProto.FLOAT,\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_eyelike_populate_off_main_diagonal\",\n)" + }, + { + "summary": "with_dtype", + "code": "shape = (3, 4)\nnode = onnx.helper.make_node(\n \"EyeLike\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n dtype=onnx.TensorProto.DOUBLE,\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], dtype=np.float64)\nexpect(node, inputs=[x], outputs=[y], name=\"test_eyelike_with_dtype\")" + }, + { + "summary": "without_dtype", + "code": "shape = (4, 4)\nnode = onnx.helper.make_node(\n \"EyeLike\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_eyelike_without_dtype\")" + } + ] + }, + { + "name": "FeatureVectorizer", + "module": "ai.onnx.ml", + "version": 1, + "description": "Concatenates input tensors into one continuous output.
\n All input shapes are 2-D and are concatenated along the second dimension. 1-D tensors are treated as [1,C].\n Inputs are copied to the output maintaining the order of the input arguments.
\n All inputs must be integers or floats, while the output will be all floating point values.\n", + "attributes": [ + { + "name": "inputdimensions", + "type": "int64[]", + "required": false, + "description": "The size of each input in the input list" + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "list": true, + "description": "An ordered collection of tensors, all with the same element type." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "The output array, elements ordered as the inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "Flatten", + "module": "ai.onnx", + "version": 1, + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). " + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "A tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "flatten", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b], name=\"test_flatten_axis\" + str(i))" + }, + { + "summary": "flatten_negative_axis", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(\n node,\n inputs=[a],\n outputs=[b],\n name=\"test_flatten_negative_axis\" + str(abs(i)),\n )" + }, + { + "summary": "flatten_with_default_axis", + "code": "node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b], name=\"test_flatten_default_axis\")" + } + ], + "category": "Shape" + }, + { + "name": "Flatten", + "module": "ai.onnx", + "version": 9, + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). " + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "A tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "flatten", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b], name=\"test_flatten_axis\" + str(i))" + }, + { + "summary": "flatten_negative_axis", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(\n node,\n inputs=[a],\n outputs=[b],\n name=\"test_flatten_negative_axis\" + str(abs(i)),\n )" + }, + { + "summary": "flatten_with_default_axis", + "code": "node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b], name=\"test_flatten_default_axis\")" + } + ], + "category": "Shape" + }, + { + "name": "Flatten", + "module": "ai.onnx", + "version": 11, + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). " + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "A tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "flatten", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b], name=\"test_flatten_axis\" + str(i))" + }, + { + "summary": "flatten_negative_axis", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(\n node,\n inputs=[a],\n outputs=[b],\n name=\"test_flatten_negative_axis\" + str(abs(i)),\n )" + }, + { + "summary": "flatten_with_default_axis", + "code": "node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b], name=\"test_flatten_default_axis\")" + } + ], + "category": "Shape" + }, + { + "name": "Flatten", + "module": "ai.onnx", + "version": 13, + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). " + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "A tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "flatten", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b], name=\"test_flatten_axis\" + str(i))" + }, + { + "summary": "flatten_negative_axis", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(\n node,\n inputs=[a],\n outputs=[b],\n name=\"test_flatten_negative_axis\" + str(abs(i)),\n )" + }, + { + "summary": "flatten_with_default_axis", + "code": "node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b], name=\"test_flatten_default_axis\")" + } + ], + "category": "Shape" + }, + { + "name": "Flatten", + "module": "ai.onnx", + "version": 21, + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). " + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "A tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output to all tensor types up to IRv10.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "flatten", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b], name=\"test_flatten_axis\" + str(i))" + }, + { + "summary": "flatten_negative_axis", + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(\n node,\n inputs=[a],\n outputs=[b],\n name=\"test_flatten_negative_axis\" + str(abs(i)),\n )" + }, + { + "summary": "flatten_with_default_axis", + "code": "node = onnx.helper.make_node(\n \"Flatten\",\n inputs=[\"a\"],\n outputs=[\"b\"], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b], name=\"test_flatten_default_axis\")" + } + ], + "category": "Shape" + }, + { + "name": "Floor", + "module": "ai.onnx", + "version": 1, + "description": "Floor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "floor", + "code": "node = onnx.helper.make_node(\n \"Floor\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2, 2]).astype(np.float32)\ny = np.floor(x) # expected output [-2., 1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.floor(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor\")" + } + ] + }, + { + "name": "Floor", + "module": "ai.onnx", + "version": 6, + "description": "Floor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "floor", + "code": "node = onnx.helper.make_node(\n \"Floor\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2, 2]).astype(np.float32)\ny = np.floor(x) # expected output [-2., 1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.floor(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor\")" + } + ] + }, + { + "name": "Floor", + "module": "ai.onnx", + "version": 13, + "description": "Floor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "floor", + "code": "node = onnx.helper.make_node(\n \"Floor\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.5, 1.2, 2]).astype(np.float32)\ny = np.floor(x) # expected output [-2., 1., 2.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.floor(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_floor\")" + } + ] + }, + { + "name": "GRU", + "module": "ai.onnx", + "version": 1, + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "foward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "output_sequence", + "type": "int64", + "required": false, + "description": "The sequence output for the hidden is optional if 0. Default 0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0" + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0." + }, + { + "name": "Y_h", + "type": "T", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 2, + "max_output": 2, + "inputs_range": "3 - 6", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 6\nnumber_of_gates = 3\nweight_scale = 0.2\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_gru_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(\n np.float32\n)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(\n np.float32\n)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "GRU", + "module": "ai.onnx", + "version": 3, + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "linear_before_reset", + "type": "int64", + "required": false, + "description": "When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate." + }, + { + "name": "output_sequence", + "type": "int64", + "required": false, + "description": "The sequence output for the hidden is optional if 0. Default 0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0" + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0." + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 6\nnumber_of_gates = 3\nweight_scale = 0.2\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_gru_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(\n np.float32\n)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(\n np.float32\n)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "GRU", + "module": "ai.onnx", + "version": 7, + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "linear_before_reset", + "type": "int64", + "required": false, + "description": "When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0" + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 6\nnumber_of_gates = 3\nweight_scale = 0.2\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_gru_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(\n np.float32\n)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(\n np.float32\n)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "GRU", + "module": "ai.onnx", + "version": 14, + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `z` - update gate\n* `r` - reset gate\n* `h` - hidden gate\n* `t` - time step (t-1 means previous time step)\n* `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n* `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n* `Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n* `Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n* `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n* `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n* `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n* `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE:\n Below are optional\n\n* Affine(x) - alpha * x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha * Tanh(beta * x)\n* HardSigmoid(x) - min(max(alpha * x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha * (e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n* zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)\n* rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n* ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0\n* ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0\n* Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "layout", + "type": "int64", + "required": false, + "description": "The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]." + }, + { + "name": "linear_before_reset", + "type": "int64", + "required": false, + "description": "When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0" + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 6\nnumber_of_gates = 3\nweight_scale = 0.2\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_gru_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\ngru = GRUHelper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n \"GRU\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(\n np.float32\n)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(\n np.float32\n)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRUHelper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_gru_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "Gather", + "module": "ai.onnx", + "version": 1, + "description": "Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\nExample 1:\n```\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n ]\n```\nExample 2:\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis = 1,\n output = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n [[4.5, 5.9]],\n ]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1]" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + (r - 1)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "gather_0", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=0)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_0\",\n)" + }, + { + "summary": "gather_1", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_1\",\n)" + }, + { + "summary": "gather_2d_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(3, 3).astype(np.float32)\nindices = np.array([[0, 2]])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_2d_indices\",\n)" + }, + { + "summary": "gather_negative_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.arange(10).astype(np.float32)\nindices = np.array([0, -9, -10])\ny = np.take(data, indices, axis=0)\n\n# print(y)\n# [0. 1. 0.]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_negative_indices\",\n)" + } + ], + "category": "Transform" + }, + { + "name": "Gather", + "module": "ai.onnx", + "version": 11, + "description": "Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\n\naxis = 0 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n ]\n```\naxis = 1 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis = 1,\n output = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n [[4.5, 5.9]],\n ]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + (r - 1)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "gather_0", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=0)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_0\",\n)" + }, + { + "summary": "gather_1", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_1\",\n)" + }, + { + "summary": "gather_2d_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(3, 3).astype(np.float32)\nindices = np.array([[0, 2]])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_2d_indices\",\n)" + }, + { + "summary": "gather_negative_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.arange(10).astype(np.float32)\nindices = np.array([0, -9, -10])\ny = np.take(data, indices, axis=0)\n\n# print(y)\n# [0. 1. 0.]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_negative_indices\",\n)" + } + ], + "category": "Transform" + }, + { + "name": "Gather", + "module": "ai.onnx", + "version": 13, + "description": "Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\n\nIf `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`\nthen `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\nindices = [\n [0, 1],\n [1, 2],\n]\noutput = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n]\n```\n\nIf `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]`\nthen `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n]\nindices = [\n [0, 2],\n]\naxis = 1,\noutput = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n [[4.5, 5.9]],\n]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + (r - 1)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "gather_0", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=0)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_0\",\n)" + }, + { + "summary": "gather_1", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_1\",\n)" + }, + { + "summary": "gather_2d_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=1,\n)\ndata = np.random.randn(3, 3).astype(np.float32)\nindices = np.array([[0, 2]])\ny = np.take(data, indices, axis=1)\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_2d_indices\",\n)" + }, + { + "summary": "gather_negative_indices", + "code": "node = onnx.helper.make_node(\n \"Gather\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=0,\n)\ndata = np.arange(10).astype(np.float32)\nindices = np.array([0, -9, -10])\ny = np.take(data, indices, axis=0)\n\n# print(y)\n# [0. 1. 0.]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_negative_indices\",\n)" + } + ], + "category": "Transform" + }, + { + "name": "GatherElements", + "module": "ai.onnx", + "version": 11, + "description": "GatherElements takes two inputs `data` and `indices` of the same rank r >= 1\nand an optional attribute `axis` that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). It is an indexing operation\nthat produces its output by indexing into the input data tensor at index\npositions determined by elements of the `indices` tensor.\nIts output shape is the same as the shape of `indices` and consists of one value\n(gathered from the `data`) for each element in `indices`.\n\nFor instance, in the 3-D case (r = 3), the output produced is determined\nby the following equations:\n```\n out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\n out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\n out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.\n\nExample 1:\n```\n data = [\n [1, 2],\n [3, 4],\n ]\n indices = [\n [0, 0],\n [1, 0],\n ]\n axis = 1\n output = [\n [\n [1, 1],\n [4, 3],\n ],\n ]\n```\nExample 2:\n```\n data = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n indices = [\n [1, 2, 0],\n [2, 0, 0],\n ]\n axis = 0\n output = [\n [\n [4, 8, 3],\n [7, 2, 3],\n ],\n ]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of the same shape as indices." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "gather_elements_0", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2], [3, 4]], dtype=np.float32)\nindices = np.array([[0, 0], [1, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[1, 1],\n# [4, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_0\",\n)" + }, + { + "summary": "gather_elements_1", + "code": "axis = 0\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\nindices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[4, 8, 3],\n# [7, 2, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_1\",\n)" + }, + { + "summary": "gather_elements_negative_indices", + "code": "axis = 0\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\nindices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[7, 5, 3],\n# [4, 2, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_negative_indices\",\n)" + } + ] + }, + { + "name": "GatherElements", + "module": "ai.onnx", + "version": 13, + "description": "GatherElements takes two inputs `data` and `indices` of the same rank r >= 1\nand an optional attribute `axis` that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). It is an indexing operation\nthat produces its output by indexing into the input data tensor at index\npositions determined by elements of the `indices` tensor.\nIts output shape is the same as the shape of `indices` and consists of one value\n(gathered from the `data`) for each element in `indices`.\n\nFor instance, in the 3-D case (r = 3), the output produced is determined\nby the following equations:\n```\nout[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\nout[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\nout[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.\n\nExample 1:\n```\ndata = [\n [1, 2],\n [3, 4],\n]\nindices = [\n [0, 0],\n [1, 0],\n]\naxis = 1\noutput = [\n [1, 1],\n [4, 3],\n]\n```\nExample 2:\n```\ndata = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n]\nindices = [\n [1, 2, 0],\n [2, 0, 0],\n]\naxis = 0\noutput = [\n [4, 8, 3],\n [7, 2, 3],\n]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of the same shape as indices." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "gather_elements_0", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2], [3, 4]], dtype=np.float32)\nindices = np.array([[0, 0], [1, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[1, 1],\n# [4, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_0\",\n)" + }, + { + "summary": "gather_elements_1", + "code": "axis = 0\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\nindices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[4, 8, 3],\n# [7, 2, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_1\",\n)" + }, + { + "summary": "gather_elements_negative_indices", + "code": "axis = 0\nnode = onnx.helper.make_node(\n \"GatherElements\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\nindices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[7, 5, 3],\n# [4, 2, 3]]\n\nexpect(\n node,\n inputs=[data, indices.astype(np.int64)],\n outputs=[y],\n name=\"test_gather_elements_negative_indices\",\n)" + } + ] + }, + { + "name": "GatherND", + "module": "ai.onnx", + "version": 11, + "description": "Given `data` tensor of rank `r` >= 1, and `indices` tensor of rank `q` >= 1, this operator gathers\nslices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1`.\n\n`indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`,\nwhere each element defines a slice of `data`\n\nSome salient points about the inputs' rank and shape:\n\n1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q`\n\n2) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r` (inclusive)\n\n3) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`.\n It is an error if any of the index values are out of bounds.\n\nThe output is computed as follows:\n\nThe output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`.\n\n1) If `indices_shape[-1] > r` => error condition\n\n2) If `indices_shape[-1] == r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor\n containing 1-D tensors of dimension `r`. Let us think of each such `r` ranked tensor as `indices_slice`.\n Each *scalar value* corresponding to `data[indices_slice]` is filled into the corresponding location of the `(q-1)`-dimensional tensor\n to form the `output` tensor (Example 1 below)\n\n3) If `indices_shape[-1] < r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor\n containing 1-D tensors of dimension `< r`. Let us think of each such tensors as `indices_slice`.\n Each *tensor slice* corresponding to `data[indices_slice , :]` is filled into the corresponding location of the `(q-1)`-dimensional tensor\n to form the `output` tensor (Examples 2, 3, and 4 below)\n\nThis operator is the inverse of `ScatterND`.\n\n`Example 1`\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[0,0],[1,1]] # indices_shape = [2, 2]\n\n output = [0,3] # output_shape = [2]\n\n`Example 2`\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[0,1]] # output_shape = [2, 2]\n\n`Example 3`\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[0,1],[1,0]] # indices_shape = [2, 2]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2]\n\n`Example 4`\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]\n\n output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]\n\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "float32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)\nindices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_float32\",\n)" + }, + { + "summary": "int32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[0, 1], [2, 3]], dtype=np.int32)\nindices = np.array([[0, 0], [1, 1]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([0, 3], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32\",\n)" + }, + { + "summary": "int32_batchdim_1", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n batch_dims=1,\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)\nindices = np.array([[1], [0]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 1)\nexpected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32_batch_dim1\",\n)" + } + ] + }, + { + "name": "GatherND", + "module": "ai.onnx", + "version": 12, + "description": "Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers\nslices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1 - b`.\n\n`indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`,\nwhere each element defines a slice of `data`\n\n`batch_dims` (denoted as `b`) is an integer indicating the number of batch dimensions, i.e the leading `b` number of dimensions of\n`data` tensor and `indices` are representing the batches, and the gather starts from the `b+1` dimension.\n\nSome salient points about the inputs' rank and shape:\n\n1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q`\n\n2) The first `b` dimensions of the shape of `indices` tensor and `data` tensor must be equal.\n\n3) b < min(q, r) is to be honored.\n\n4) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r-b` (inclusive)\n\n5) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`.\n It is an error if any of the index values are out of bounds.\n\nThe output is computed as follows:\n\nThe output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`.\n\n1) If `indices_shape[-1] > r-b` => error condition\n\n2) If `indices_shape[-1] == r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensors\n containing 1-D tensors of dimension `r-b`, where `N` is an integer equals to the product of 1 and all the elements in the batch dimensions\n of the indices_shape. Let us think of each such `r-b` ranked tensor as `indices_slice`. Each *scalar value* corresponding to `data[0:b-1,indices_slice]`\n is filled into the corresponding location of the `(q-b-1)`-dimensional tensor to form the `output` tensor (Example 1 below)\n\n3) If `indices_shape[-1] < r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensor\n containing 1-D tensors of dimension `< r-b`. Let us think of each such tensors as `indices_slice`. Each *tensor slice* corresponding\n to `data[0:b-1, indices_slice , :]` is filled into the corresponding location of the `(q-b-1)`-dimensional tensor\n to form the `output` tensor (Examples 2, 3, 4 and 5 below)\n\nThis operator is the inverse of `ScatterND`.\n\n`Example 1`\n\n batch_dims = 0\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[0,0],[1,1]] # indices_shape = [2, 2]\n\n output = [0,3] # output_shape = [2]\n\n`Example 2`\n\n batch_dims = 0\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[0,1]] # output_shape = [2, 2]\n\n`Example 3`\n\n batch_dims = 0\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[0,1],[1,0]] # indices_shape = [2, 2]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2]\n\n`Example 4`\n\n batch_dims = 0\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]\n\n output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]\n\n`Example 5`\n\n batch_dims = 1\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2]\n\n\n", + "attributes": [ + { + "name": "batch_dims", + "type": "int64", + "required": false, + "description": "The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:]" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "float32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)\nindices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_float32\",\n)" + }, + { + "summary": "int32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[0, 1], [2, 3]], dtype=np.int32)\nindices = np.array([[0, 0], [1, 1]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([0, 3], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32\",\n)" + }, + { + "summary": "int32_batchdim_1", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n batch_dims=1,\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)\nindices = np.array([[1], [0]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 1)\nexpected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32_batch_dim1\",\n)" + } + ] + }, + { + "name": "GatherND", + "module": "ai.onnx", + "version": 13, + "description": "Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers\nslices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1 - b`.\n\n`indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`,\nwhere each element defines a slice of `data`\n\n`batch_dims` (denoted as `b`) is an integer indicating the number of batch dimensions, i.e the leading `b` number of dimensions of\n`data` tensor and `indices` are representing the batches, and the gather starts from the `b+1` dimension.\n\nSome salient points about the inputs' rank and shape:\n\n1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q`\n\n2) The first `b` dimensions of the shape of `indices` tensor and `data` tensor must be equal.\n\n3) b < min(q, r) is to be honored.\n\n4) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r-b` (inclusive)\n\n5) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`.\n It is an error if any of the index values are out of bounds.\n\nThe output is computed as follows:\n\nThe output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`.\n\n1) If `indices_shape[-1] > r-b` => error condition\n\n2) If `indices_shape[-1] == r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensors\n containing 1-D tensors of dimension `r-b`, where `N` is an integer equals to the product of 1 and all the elements in the batch dimensions\n of the indices_shape. Let us think of each such `r-b` ranked tensor as `indices_slice`. Each *scalar value* corresponding to `data[0:b-1,indices_slice]`\n is filled into the corresponding location of the `(q-b-1)`-dimensional tensor to form the `output` tensor (Example 1 below)\n\n3) If `indices_shape[-1] < r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensor\n containing 1-D tensors of dimension `< r-b`. Let us think of each such tensors as `indices_slice`. Each *tensor slice* corresponding\n to `data[0:b-1, indices_slice , :]` is filled into the corresponding location of the `(q-b-1)`-dimensional tensor\n to form the `output` tensor (Examples 2, 3, 4 and 5 below)\n\nThis operator is the inverse of `ScatterND`.\n\n**Example 1**\n\n```\nbatch_dims = 0\ndata = [[0,1],[2,3]] # data_shape = [2, 2]\nindices = [[0,0],[1,1]] # indices_shape = [2, 2]\noutput = [0,3] # output_shape = [2]\n```\n\n**Example 2**\n\n```\nbatch_dims = 0\ndata = [[0,1],[2,3]] # data_shape = [2, 2]\nindices = [[1],[0]] # indices_shape = [2, 1]\noutput = [[2,3],[0,1]] # output_shape = [2, 2]\n```\n\n**Example 3**\n\n```\nbatch_dims = 0\ndata = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\nindices = [[0,1],[1,0]] # indices_shape = [2, 2]\noutput = [[2,3],[4,5]] # output_shape = [2, 2]\n```\n\n**Example 4**\n\n```\nbatch_dims = 0\ndata = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\nindices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]\noutput = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]\n```\n\n**Example 5**\n\n```\nbatch_dims = 1\ndata = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\nindices = [[1],[0]] # indices_shape = [2, 1]\noutput = [[2,3],[4,5]] # output_shape = [2, 2]\n```\n", + "attributes": [ + { + "name": "batch_dims", + "type": "int64", + "required": false, + "description": "The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:]" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "float32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)\nindices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_float32\",\n)" + }, + { + "summary": "int32", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n)\n\ndata = np.array([[0, 1], [2, 3]], dtype=np.int32)\nindices = np.array([[0, 0], [1, 1]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([0, 3], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32\",\n)" + }, + { + "summary": "int32_batchdim_1", + "code": "node = onnx.helper.make_node(\n \"GatherND\",\n inputs=[\"data\", \"indices\"],\n outputs=[\"output\"],\n batch_dims=1,\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)\nindices = np.array([[1], [0]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 1)\nexpected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)\nassert np.array_equal(output, expected_output)\nexpect(\n node,\n inputs=[data, indices],\n outputs=[output],\n name=\"test_gathernd_example_int32_batch_dim1\",\n)" + } + ] + }, + { + "name": "Gelu", + "module": "ai.onnx", + "version": 20, + "description": "Gelu takes one input data (Tensor) and produces one\noutput data (Tensor) where the gaussian error linear units function,\n$y = 0.5 * x * (1 + erf(x/sqrt(2)))$ is applied to the tensor elementwise.\nIf the attribute \"approximate\" is set to \"tanh\", the function estimation,\n$y = 0.5 * x * (1 + Tanh(sqrt(2/\\pi) * (x + 0.044715 * x^3)))$ is used and applied\nto the tensor elementwise.\n\n", + "attributes": [ + { + "name": "approximate", + "type": "string", + "required": false, + "default": "none", + "description": "Gelu approximation algorithm: `\"tanh\"`, `\"none\"`(default).`\"none\"`: do not use approximation.`\"tanh\"`: use tanh approximation." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "gelu_default", + "code": "node = onnx.helper.make_node(\"Gelu\", inputs=[\"x\"], outputs=[\"y\"])\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.15865526, 0., 0.84134474]\ny = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_gelu_default_1\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n# expected output [2.99595031, 3.99987331, 4.99999857]\ny = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_gelu_default_2\")" + }, + { + "summary": "gelu_tanh", + "code": "node = onnx.helper.make_node(\n \"Gelu\", inputs=[\"x\"], outputs=[\"y\"], approximate=\"tanh\"\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.158808, 0., 0.841192]\ny = (\n 0.5\n * x\n * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_gelu_tanh_1\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n# expected output [2.9963627, 3.99993, 4.9999995]\ny = (\n 0.5\n * x\n * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_gelu_tanh_2\")" + } + ] + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 1, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\nCompute Y = alpha * A * B + beta * C, where input tensor A has\ndimension (M X K), input tensor B has dimension (K X N), input tensor C and\noutput tensor Y have dimension (M X N).\nIf attribute broadcast is non-zero, input tensor C will be broadcasted to match\nthe dimension requirement. A will be transposed before doing the computation\nif attribute transA is non-zero, same for B and transB.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B, the default value is 1.0." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C, the default value is 1.0." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Whether C should be broadcasted" + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A" + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B" + }, + { + "name": "C", + "type": "T", + "description": "Input tensor C, can be inplace." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 6, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\nCompute Y = alpha * A * B + beta * C, where input tensor A has\ndimension (M X K), input tensor B has dimension (K X N), input tensor C and\noutput tensor Y have dimension (M X N).\nIf attribute broadcast is non-zero, input tensor C will be broadcasted to match\nthe dimension requirement. A will be transposed before doing the computation\nif attribute transA is non-zero, same for B and transB.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B, the default value is 1.0." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C, the default value is 1.0." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Whether C should be broadcasted" + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A" + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B" + }, + { + "name": "C", + "type": "T", + "description": "Input tensor C" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 7, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C." + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero." + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero." + }, + { + "name": "C", + "type": "T", + "description": "Input tensor C. The shape of C should be unidirectional broadcastable to (M, N)." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor of shape (M, N)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 9, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C." + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero." + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero." + }, + { + "name": "C", + "type": "T", + "description": "Input tensor C. The shape of C should be unidirectional broadcastable to (M, N)." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor of shape (M, N)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 11, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C." + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero." + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero." + }, + { + "name": "C", + "type": "T", + "option": "optional", + "description": "Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N)." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor of shape (M, N)." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "Gemm", + "module": "ai.onnx", + "version": 13, + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\n* A' = transpose(A) if transA else A\n* B' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Scalar multiplier for input tensor C." + }, + { + "name": "transA", + "type": "int64", + "required": false, + "description": "Whether A should be transposed" + }, + { + "name": "transB", + "type": "int64", + "required": false, + "description": "Whether B should be transposed" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero." + }, + { + "name": "B", + "type": "T", + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero." + }, + { + "name": "C", + "type": "T", + "option": "optional", + "description": "Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N)." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor of shape (M, N)." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "all_attributes", + "code": "node = onnx.helper.make_node(\n \"Gemm\",\n inputs=[\"a\", \"b\", \"c\"],\n outputs=[\"y\"],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1,\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(\n a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35\n)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_all_attributes\")" + }, + { + "summary": "alpha", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_alpha\")" + }, + { + "summary": "beta", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_beta\")" + }, + { + "summary": "default_matrix_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_matrix_bias\"\n)" + }, + { + "summary": "default_no_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\"], outputs=[\"y\"])\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y], name=\"test_gemm_default_no_bias\")" + }, + { + "summary": "default_scalar_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_scalar_bias\"\n)" + }, + { + "summary": "default_single_elem_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node,\n inputs=[a, b, c],\n outputs=[y],\n name=\"test_gemm_default_single_elem_vector_bias\",\n)" + }, + { + "summary": "default_vector_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(\n node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_vector_bias\"\n)" + }, + { + "summary": "default_zero_bias", + "code": "node = onnx.helper.make_node(\"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"])\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_default_zero_bias\")" + }, + { + "summary": "transposeA", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeA\")" + }, + { + "summary": "transposeB", + "code": "node = onnx.helper.make_node(\n \"Gemm\", inputs=[\"a\", \"b\", \"c\"], outputs=[\"y\"], transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y], name=\"test_gemm_transposeB\")" + } + ], + "category": "Layer" + }, + { + "name": "GlobalAveragePool", + "module": "ai.onnx", + "version": 1, + "description": "GlobalAveragePool consumes an input tensor X and applies average pooling across\n the values in the same channel. This is equivalent to AveragePool with kernel size\n equal to the spatial dimension of input tensor.", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "globalaveragepool", + "code": "node = onnx.helper.make_node(\n \"GlobalAveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\ny = np.mean(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)\nexpect(node, inputs=[x], outputs=[y], name=\"test_globalaveragepool\")" + }, + { + "summary": "globalaveragepool_precomputed", + "code": "node = onnx.helper.make_node(\n \"GlobalAveragePool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[5]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_globalaveragepool_precomputed\")" + } + ], + "category": "Pool" + }, + { + "name": "GlobalLpPool", + "module": "ai.onnx", + "version": 1, + "description": "GlobalLpPool consumes an input tensor X and applies lp pool pooling across the\n the values in the same channel. This is equivalent to LpPool with kernel size\n equal to the spatial dimension of input tensor.", + "attributes": [ + { + "name": "p", + "type": "float32", + "required": false, + "default": 2.0, + "description": "p value of the Lp norm used to pool over the input data, default is 2.0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from pooling across the input tensor. Dimensions will be N x C x 1 x 1" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "category": "Pool" + }, + { + "name": "GlobalLpPool", + "module": "ai.onnx", + "version": 2, + "description": "GlobalLpPool consumes an input tensor X and applies lp pool pooling across\n the values in the same channel. This is equivalent to LpPool with kernel size\n equal to the spatial dimension of input tensor.", + "attributes": [ + { + "name": "p", + "type": "int64", + "required": false, + "default": 2, + "description": "p value of the Lp norm used to pool over the input data." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "category": "Pool" + }, + { + "name": "GlobalMaxPool", + "module": "ai.onnx", + "version": 1, + "description": "GlobalMaxPool consumes an input tensor X and applies max pooling across\n the values in the same channel. This is equivalent to MaxPool with kernel size\n equal to the spatial dimension of input tensor.", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "globalmaxpool", + "code": "node = onnx.helper.make_node(\n \"GlobalMaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\ny = np.max(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)\nexpect(node, inputs=[x], outputs=[y], name=\"test_globalmaxpool\")" + }, + { + "summary": "globalmaxpool_precomputed", + "code": "node = onnx.helper.make_node(\n \"GlobalMaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[9]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_globalmaxpool_precomputed\")" + } + ], + "category": "Pool" + }, + { + "name": "Gradient", + "module": "ai.onnx.preview.training", + "version": 1, + "description": "Gradient operator computes the partial derivatives of a specific tensor w.r.t.\nsome other tensors. This operator is widely used in gradient-based training\nalgorithms. To illustrate its use, let's consider a computation graph,\n\n```\nX -----.\n |\n v\nW --> Conv --> H --> Gemm --> Y\n ^\n |\n Z\n```\n\n, where W and Z are trainable tensors. Note that operators' attributes are\nomitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of\nY with respect to W (Z). The user can compute gradient by inserting Gradient\noperator to form another graph shown below.\n\n```\nW --> Conv --> H --> Gemm --> Y\n| ^ ^\n| | |\n| X Z\n| | |\n| | .----------'\n| | | (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in\n| | | \"xs\" followed by \"zs\")\n| v v\n'---> Gradient(xs=[\"W\", \"Z\"], zs=[\"X\"], y=\"Y\")\n | |\n | '-----------------------------------> dY/dW (1st output of Gradient)\n |\n '---------------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nBy definition, the tensor \"y\" is a function of independent variables in \"xs\"\nand \"zs\". Since we only compute the gradient of \"y\" w.r.t. the differentiable\nvariables in \"xs\", this Gradient only outputs dY/dW and dY/dZ. Note that \"H\"\ncannot appear in \"xs\" and \"zs\". The reason is that \"H\" can be determined by\ntensors \"W\" and \"X\" and therefore \"H\" is not an independent variable.\n\nAll outputs are optional. If needed, for example, user can assign an empty\nstring to the 1st output name of that Gradient to skip the generation of dY/dW.\nNote that the concept of optional outputs can also be found in ONNX's RNN, GRU,\nand LSTM.\n\nGradient operator can compute derivative against intermediate tensors. For\nexample, the gradient of Y with respect to H can be done via\n\n```\nW --> Conv --> H --> Gemm --> Y\n ^ | ^\n | | |\n X | Z\n .-------' |\n | .----------'\n | | (H/Z is the 1st/2nd input of Gradient as shown in \"xs\")\n v v\n Gradient(xs=[\"H\", \"Z\"], y=\"Y\")\n | |\n | '-----------------------------------> dY/dH (1st output of Gradient)\n |\n '---------------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nIt is possible to represent high-order differentiation using Gradient operators.\nFor example, given the following linear model:\n\n```\nW --> Gemm --> Y --> Loss --> O\n ^ ^\n | |\n X L\n```\n\nTo compute the 2nd order derivative of O with respect to W (denoted by\nd^2O/dW^2), one can do\n\n```\nW --> Gemm --> Y --> Loss --> O\n| ^ ^\n| | |\n| X .------------L\n| | | |\n| | | v\n+------+-+> Gradient(xs=[\"X\", \"W\"], zs=[\"L\"], y=\"O\") ---> dO/dX (1st output of Gradient)\n| | | |\n| | | '---> dO/dW (2nd output of Gradient)\n| v v\n'---> Gradient(xs=[\"X\", \"W\"], zs=[\"L\"], y=\"dO/dW\") ---> d(dO/dW)dX (1st output of\n | Gradient)\n |\n |\n '---> d^2O/dW^2 (2nd output of Gradient)\n```\n\nThe tensors named in attributes \"xs\", \"zs\", and \"y\" define the differentiated\ncomputation graph, and the inputs to Gradient node define the values at\nwhich the gradient is computed. We can feed different tensors to the identified\ngraph. For example, one can compute the gradient of Y with respect to H at\na specific value of H, H_1, by providing that value as an input to the Gradient\nnode.\n\n```\nW --> Conv --> H --> Gemm --> Y\n ^ ^\n | |\n X Z\n\n Z_1 (2nd input of Gradient)\n |\n v\nH_1 --> Gradient(xs=[\"H\", \"Z\"], y=\"Y\") ---> dY/dH when H = H_1 and Y = Y_1.\n |\n '------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nWhen the inputs of Gradient are the tensors named in \"xs\" and \"zs\", the\ncomputation can be optimized. More specifically, intermediate variables in\nforward pass can be reused if the gradient is computed via reverse-mode\nauto-differentiation.\n\n", + "attributes": [ + { + "name": "xs", + "type": "string[]", + "required": true, + "description": "Input tensor names of the differentiated sub-graph. It contains only the necessary differentiated inputs of a (sub-)graph. Variables (usually called intermediate variables) that can be generated from inputs cannot be included in this attribute." + }, + { + "name": "y", + "type": "string", + "required": true, + "description": "The targeted tensor. It can be viewed as the output of the differentiated function. The attribute \"xs\" and attribute \"zs\" are the minimal independent variable set that determines the value of \"y\"." + }, + { + "name": "zs", + "type": "string[]", + "required": false, + "description": "Input tensor names of the differentiated sub-graph. It contains only the necessary non-differentiated inputs of a (sub-)graph. Variables (usually called intermediate variables) that can be generated from inputs cannot be included in this attribute." + } + ], + "inputs": [ + { + "name": "Inputs", + "type": "T1", + "list": true, + "description": "The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute \"xs\" and the attribute \"zs\". For example, if xs=[\"A\", \"B\"] and zs=[\"C\"], the first input is used as the value of symbol \"A\" and the 3rd input is substituted for all the occurrences of \"C\"." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "Outputs", + "type": "T2", + "list": true, + "description": "The gradient of the tensor specified by the attribute \"y\" with respect to each of tensors specified in the attribute \"xs\". The i-th output is the gradient of \"y\" with respect to the i-th tensor specified in the attribute \"xs\"." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Allow outputs to be any kind of tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Allow inputs to be any kind of floating-point tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "gradient_scalar_add", + "code": "add_node = onnx.helper.make_node(\"Add\", [\"a\", \"b\"], [\"c\"], name=\"my_add\")\ngradient_node = onnx.helper.make_node(\n \"Gradient\",\n [\"a\", \"b\"],\n [\"dc_da\", \"dc_db\"],\n name=\"my_gradient\",\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n xs=[\"a\", \"b\"],\n y=\"c\",\n)\n\na = np.array(1.0).astype(np.float32)\nb = np.array(2.0).astype(np.float32)\nc = a + b\n# dc / da = d(a+b) / da = 1\ndc_da = np.array(1).astype(np.float32)\n# db / db = d(a+b) / db = 1\ndc_db = np.array(1).astype(np.float32)\n\ngraph = onnx.helper.make_graph(\n nodes=[add_node, gradient_node],\n name=\"GradientOfAdd\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"a\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"b\", onnx.TensorProto.FLOAT, []),\n ],\n outputs=[\n onnx.helper.make_tensor_value_info(\"c\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"dc_da\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"dc_db\", onnx.TensorProto.FLOAT, []),\n ],\n)\nopsets = [\n onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),\n onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),\n]\nmodel = onnx.helper.make_model_gen_version(\n graph, producer_name=\"backend-test\", opset_imports=opsets\n)\nexpect(\n model, inputs=[a, b], outputs=[c, dc_da, dc_db], name=\"test_gradient_of_add\"\n)" + }, + { + "summary": "gradient_scalar_add_and_mul", + "code": "add_node = onnx.helper.make_node(\"Add\", [\"a\", \"b\"], [\"c\"], name=\"my_add\")\nmul_node = onnx.helper.make_node(\"Mul\", [\"c\", \"a\"], [\"d\"], name=\"my_mul\")\ngradient_node = onnx.helper.make_node(\n \"Gradient\",\n [\"a\", \"b\"],\n [\"dd_da\", \"dd_db\"],\n name=\"my_gradient\",\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n xs=[\"a\", \"b\"],\n y=\"d\",\n)\n\na = np.array(1.0).astype(np.float32)\nb = np.array(2.0).astype(np.float32)\nc = a + b\n# d = a * c = a * (a + b)\nd = a * c\n# dd / da = d(a*a+a*b) / da = 2 * a + b\ndd_da = (2 * a + b).astype(np.float32)\n# dd / db = d(a*a+a*b) / db = a\ndd_db = a\n\ngraph = onnx.helper.make_graph(\n nodes=[add_node, mul_node, gradient_node],\n name=\"GradientOfTwoOperators\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"a\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"b\", onnx.TensorProto.FLOAT, []),\n ],\n outputs=[\n onnx.helper.make_tensor_value_info(\"d\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"dd_da\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"dd_db\", onnx.TensorProto.FLOAT, []),\n ],\n)\n\nopsets = [\n onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),\n onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),\n]\nmodel = onnx.helper.make_model_gen_version(\n graph, producer_name=\"backend-test\", opset_imports=opsets\n)\nexpect(\n model,\n inputs=[a, b],\n outputs=[d, dd_da, dd_db],\n name=\"test_gradient_of_add_and_mul\",\n)" + } + ] + }, + { + "name": "Greater", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater\")" + }, + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_bcast\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal_bcast\")" + } + ] + }, + { + "name": "Greater", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater\")" + }, + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_bcast\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal_bcast\")" + } + ] + }, + { + "name": "Greater", + "module": "ai.onnx", + "version": 9, + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater\")" + }, + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_bcast\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal_bcast\")" + } + ] + }, + { + "name": "Greater", + "module": "ai.onnx", + "version": 13, + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater\")" + }, + { + "summary": "greater", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"Greater\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_bcast\")" + }, + { + "summary": "greater_broadcast", + "code": "node = onnx.helper.make_node(\n \"GreaterOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"greater_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_greater_equal_bcast\")" + } + ] + }, + { + "name": "GreaterOrEqual", + "module": "ai.onnx", + "version": 12, + "description": "Returns the tensor resulted from performing the `greater_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ] + }, + { + "name": "GreaterOrEqual", + "module": "ai.onnx", + "version": 16, + "description": "Returns the tensor resulted from performing the `greater_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ] + }, + { + "name": "GridSample", + "module": "ai.onnx", + "version": 16, + "description": "Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from `grid`.\nCurrently, only spatial (4-D) inputs are supported. For input `X` with shape (N, C, H, W) and `grid` with shape (N, H_out, W_out, 2),\nthe output `Y` will have shape (N, C, H_out, W_out).\n\nThe tensor `X` contains values at centers of square pixels in a H by W 2-dimensional image.\nThe tensor `grid` describes normalized positions where the output `Y` is to be computed\nusing a specified interpolation method (the mode) and a padding mode (for grid positions falling outside the 2-dimensional image).\n\nElements in `grid[N, H_out, W_out]` are size-2 vectors specifying positions in the 2-dimensional space of `X`.\nThey are used to interpolate output values of `Y[N, C, H_out, W_out]`.\n\nThe GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025).\nSee also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).\n", + "attributes": [ + { + "name": "align_corners", + "type": "int64", + "required": false, + "description": "If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input's corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels, making the sampling more resolution agnostic." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "bilinear", + "description": "Three interpolation modes: bilinear (default), nearest and bicubic." + }, + { + "name": "padding_mode", + "type": "string", + "required": false, + "default": "zeros", + "description": "Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = 0.5." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "4-D tensor of shape (N, C, H, W), where N is the batch size, C is the numbers of channels, H and W are the height and width of the input data." + }, + { + "name": "grid", + "type": "T2", + "description": "Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and W_out are the height and width of grid and output, Grid specifies the sampling pixel locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "4-D tensor of shape (N, C, H_out, W_out) of sampled values. For integer input types, intermediate values are computed as floating point and cast to integer at the end." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input `X` and output `Y` types to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain grid types to float tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "gridsample", + "code": "node = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n padding_mode=\"zeros\",\n align_corners=0,\n)\n# X shape, [N, C, H, W] - [1, 1, 4, 4]\nX = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0],\n ]\n ]\n ],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]\nGrid = np.array(\n [\n [\n [\n [-1.0000, -1.0000],\n [-0.6000, -1.0000],\n [-0.2000, -1.0000],\n [0.2000, -1.0000],\n [0.6000, -1.0000],\n [1.0000, -1.0000],\n ],\n [\n [-1.0000, -0.6000],\n [-0.6000, -0.6000],\n [-0.2000, -0.6000],\n [0.2000, -0.6000],\n [0.6000, -0.6000],\n [1.0000, -0.6000],\n ],\n [\n [-1.0000, -0.2000],\n [-0.6000, -0.2000],\n [-0.2000, -0.2000],\n [0.2000, -0.2000],\n [0.6000, -0.2000],\n [1.0000, -0.2000],\n ],\n [\n [-1.0000, 0.2000],\n [-0.6000, 0.2000],\n [-0.2000, 0.2000],\n [0.2000, 0.2000],\n [0.6000, 0.2000],\n [1.0000, 0.2000],\n ],\n [\n [-1.0000, 0.6000],\n [-0.6000, 0.6000],\n [-0.2000, 0.6000],\n [0.2000, 0.6000],\n [0.6000, 0.6000],\n [1.0000, 0.6000],\n ],\n [\n [-1.0000, 1.0000],\n [-0.6000, 1.0000],\n [-0.2000, 1.0000],\n [0.2000, 1.0000],\n [0.6000, 1.0000],\n [1.0000, 1.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]\nY = np.array(\n [\n [\n [\n [0.0000, 0.1500, 0.5500, 0.9500, 1.3500, 0.7500],\n [0.6000, 1.5000, 2.3000, 3.1000, 3.9000, 2.1000],\n [2.2000, 4.7000, 5.5000, 6.3000, 7.1000, 3.7000],\n [3.8000, 7.9000, 8.7000, 9.5000, 10.3000, 5.3000],\n [5.4000, 11.1000, 11.9000, 12.7000, 13.5000, 6.9000],\n [3.0000, 6.1500, 6.5500, 6.9500, 7.3500, 3.7500],\n ]\n ]\n ],\n dtype=np.float32,\n)\nexpect(node, inputs=[X, Grid], outputs=[Y], name=\"test_gridsample\")" + }, + { + "summary": "gridsample_mode_aligncorners", + "code": "# X shape, [N, C, H, W] - [1, 1, 3, 2]\nX = np.array(\n [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]\nGrid = np.array(\n [\n [\n [\n [-1.0000, -1.0000],\n [-0.5000, -0.5000],\n [-0.2000, -0.2000],\n [0.0000, 0.0000],\n ],\n [\n [0.0000, 0.0000],\n [-0.2000, -0.2000],\n [0.5000, 0.5000],\n [1.0000, 1.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\n# setting mode = 'bilinear', default align_corners = 0\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.0000, 0.5000, 1.7000, 2.5000], [2.5000, 1.7000, 4.5000, 1.2500]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear\",\n)\n\n# setting mode = 'bilinear', align_corners = 1\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_align_corners = np.array(\n [[[[0.0000, 1.2500, 2.0000, 2.5000], [2.5000, 2.0000, 3.7500, 5.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_align_corners],\n name=\"test_gridsample_aligncorners_true\",\n)\n\n# setting mode = 'nearest'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 2.0], [2.0, 2.0, 5.0, 0.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node, inputs=[X, Grid], outputs=[Y_nearest], name=\"test_gridsample_nearest\"\n)\n\n# setting mode = 'bicubic'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [[[[-0.1406, 0.3828, 1.7556, 2.9688], [2.9688, 1.7556, 5.1445, 1.3906]]]],\n dtype=np.float32,\n)\n\nexpect(\n node, inputs=[X, Grid], outputs=[Y_bicubic], name=\"test_gridsample_bicubic\"\n)\n\n# ============================================================================\n# Additional tests\n# The reference output tensors were generated using PyTorch 2.0.\nGrid = np.array(\n [\n [\n [[-1.0, -0.8], [-0.6, -0.5], [-0.1, -0.2], [0.7, 0.0]],\n [[0.0, 0.4], [0.2, -0.2], [-0.3, 0.5], [-1.0, 1.0]],\n ]\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 3.0], [4.0, 3.0, 4.0, 4.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_nearest_align_corners_0_additional_1\",\n)\n\n# setting mode = 'nearest'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 3.0], [2.0, 3.0, 4.0, 4.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_nearest_align_corners_1_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.0000, 0.4500, 1.8000, 2.4000], [3.7000, 2.1000, 3.7000, 1.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear_align_corners_0_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.4000, 1.2000, 2.0500, 2.8500], [3.3000, 2.2000, 3.3500, 4.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear_align_corners_1_additional_1\",\n)\n\n# These two new bicubic tests produces slightly higher error ~5e-5\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [\n [\n [\n [-0.173250, 0.284265, 1.923106, 2.568000],\n [5.170375, 2.284414, 4.744844, 1.046875],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bicubic],\n name=\"test_gridsample_bicubic_align_corners_0_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [\n [\n [\n [0.304001, 1.128750, 2.266270, 3.144844],\n [4.531500, 2.455360, 4.599819, 4.000000],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bicubic],\n name=\"test_gridsample_bicubic_align_corners_1_additional_1\",\n)" + }, + { + "summary": "gridsample_paddingmode", + "code": "# X shape, [N, C, H, W] - [1, 1, 3, 2]\nX = np.array(\n [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]\nGrid = np.array(\n [\n [\n [\n [-10.0000, -10.0000],\n [-5.0000, -5.0000],\n [-0.2000, -0.2000],\n [10.0000, 10.0000],\n ],\n [\n [10.0000, 10.0000],\n [-0.2000, -0.2000],\n [5.0000, 5.0000],\n [10.0000, 10.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\n# setting padding_mode = 'zeros'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"zeros\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_zeros = np.array(\n [[[[0.0000, 0.0000, 1.7000, 0.0000], [0.0000, 1.7000, 0.0000, 0.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_zeros],\n name=\"test_gridsample_zeros_padding\",\n)\n\n# setting padding_mode = 'border'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"border\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_border = np.array(\n [[[[0.0000, 0.0000, 1.7000, 5.0000], [5.0000, 1.7000, 5.0000, 5.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_border],\n name=\"test_gridsample_border_padding\",\n)\n\n# setting padding_mode = 'reflection'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"reflection\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_reflection = np.array(\n [[[[2.5000, 0.0000, 1.7000, 2.5000], [2.5000, 1.7000, 5.0000, 2.5000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_reflection],\n name=\"test_gridsample_reflection_padding\",\n)" + }, + { + "summary": "volumeetric_gridsample_mode_aligncorners", + "code": "X = np.array(\n [\n [\n [\n [[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]],\n [[9.0, 10.0], [11.0, 12.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nGrid = np.array(\n [\n [\n [\n [[-1.0, -1.0, -1.0], [-1.0, -0.5, 0.3]],\n [[-0.5, -0.5, -0.5], [1.0, -0.6, -1.0]],\n [[-0.2, -0.2, -0.2], [0.4, 0.2, 0.6]],\n [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]],\n ],\n [\n [[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0]],\n [[-0.2, -0.2, -0.2], [1.0, 0.4, -0.2]],\n [[0.5, 0.5, 0.5], [-1.0, -0.8, 0.8]],\n [[1.0, 1.0, 1.0], [0.4, 0.6, -0.3]],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [\n [\n [\n [[1.0, 5.0], [1.0, 0.0], [5.0, 12.0], [5.0, 5.0]],\n [[5.0, 0.0], [5.0, 0.0], [12.0, 9.0], [0.0, 8.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_volumetric_nearest_align_corners_0\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [\n [\n [\n [[1.0, 5.0], [1.0, 2.0], [5.0, 12.0], [5.0, 5.0]],\n [[5.0, 7.0], [5.0, 8.0], [12.0, 9.0], [12.0, 8.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_volumetric_nearest_align_corners_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [\n [\n [\n [\n [0.1250, 3.4000],\n [2.0000, 0.4500],\n [4.7000, 10.9000],\n [6.5000, 3.0000],\n ],\n [\n [6.5000, 1.7500],\n [4.7000, 3.3000],\n [11.0000, 2.5200],\n [1.5000, 5.4900],\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_volumetric_bilinear_align_corners_0\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [\n [\n [\n [\n [1.0000, 6.7000],\n [3.7500, 2.4000],\n [5.4000, 9.3000],\n [6.5000, 6.0000],\n ],\n [\n [6.5000, 7.0000],\n [5.4000, 6.6000],\n [9.2500, 8.4000],\n [12.0000, 6.1000],\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_volumetric_bilinear_align_corners_1\",\n)" + } + ] + }, + { + "name": "GridSample", + "module": "ai.onnx", + "version": 20, + "description": "Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from the `grid`.\nFor spatial input `X` with shape (N, C, H, W), the `grid` will have shape (N, H_out, W_out, 2),\nthe output `Y` will have shape (N, C, H_out, W_out). For volumetric input `X` with shape (N, C, D, H, W),\nthe `grid` will have shape (N, D_out, H_out, W_out, 3), the output `Y` will have shape (N, C, D_out, H_out, W_out).\nMore generally, for an input `X` of rank r+2 with shape (N, C, d1, d2, ..., dr),\nthe `grid` will have shape (N, D1_out, D2_out, ..., Dr_out, r), the output `Y` will have shape (N, C, D1_out, D2_out, ..., Dr_out).\n\nThe tensor `X` contains values at centers of square pixels (voxels, etc) locations such as (n, c, d1_in, d2_in, ..., dr_in).\nThe (n, d1_out, d2_out, ..., dr_out, :) values from the tensor `grid` are the normalized positions for interpolating the values\nat the (n, c, d1_out, d2_out, ..., dr_out) locations from the output tensor `Y` using a specified interpolation method (the mode)\nand a padding mode (for `grid` positions falling outside the 2-dimensional image).\n\nFor example, the values in `grid[n, h_out, w_out, :]` are size-2 vectors specifying normalized positions in the 2-dimensional space of `X`.\nThey are used to interpolate output values of `Y[n, c, h_out, w_out]`.\n\nThe GridSample operator is often used in doing grid generator and sampler in the\n[Spatial Transformer Networks](https://arxiv.org/abs/1506.02025).\nSee also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html).\n", + "attributes": [ + { + "name": "align_corners", + "type": "int64", + "required": false, + "description": "If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input's corner pixels (voxels, etc.). If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels (voxels, etc.), making the sampling more resolution agnostic." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "linear", + "description": "Three interpolation modes: linear (default), nearest and cubic. The \"linear\" mode includes linear and N-linear interpolation modes depending on the number of spatial dimensions of the input tensor (i.e. linear for 1 spatial dimension, bilinear for 2 spatial dimensions, etc.). The \"cubic\" mode also includes N-cubic interpolation modes following the same rules. The \"nearest\" mode rounds to the nearest even index when the sampling point falls halfway between two indices." + }, + { + "name": "padding_mode", + "type": "string", + "required": false, + "default": "zeros", + "description": "Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = 0.5." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input tensor of rank r+2 that has shape (N, C, D1, D2, ..., Dr), where N is the batch size, C is the number of channels, D1, D2, ..., Dr are the spatial dimensions." + }, + { + "name": "grid", + "type": "T2", + "description": "Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, D2_out, ..., Dr_out are the spatial dimensions of the grid and output, and r is the number of spatial dimensions. Grid specifies the sampling locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If the grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode. Following computer vision convention, the coordinates in the length-r location vector are listed from the innermost tensor dimension to the outermost, the opposite of regular tensor indexing." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "Output tensor of rank r+2 that has shape (N, C, D1_out, D2_out, ..., Dr_out) of the sampled values. For integer input types, intermediate values are computed as floating point and cast to integer at the end." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input `X` and output `Y` types to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain grid types to float tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "gridsample", + "code": "node = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n padding_mode=\"zeros\",\n align_corners=0,\n)\n# X shape, [N, C, H, W] - [1, 1, 4, 4]\nX = np.array(\n [\n [\n [\n [0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0],\n ]\n ]\n ],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]\nGrid = np.array(\n [\n [\n [\n [-1.0000, -1.0000],\n [-0.6000, -1.0000],\n [-0.2000, -1.0000],\n [0.2000, -1.0000],\n [0.6000, -1.0000],\n [1.0000, -1.0000],\n ],\n [\n [-1.0000, -0.6000],\n [-0.6000, -0.6000],\n [-0.2000, -0.6000],\n [0.2000, -0.6000],\n [0.6000, -0.6000],\n [1.0000, -0.6000],\n ],\n [\n [-1.0000, -0.2000],\n [-0.6000, -0.2000],\n [-0.2000, -0.2000],\n [0.2000, -0.2000],\n [0.6000, -0.2000],\n [1.0000, -0.2000],\n ],\n [\n [-1.0000, 0.2000],\n [-0.6000, 0.2000],\n [-0.2000, 0.2000],\n [0.2000, 0.2000],\n [0.6000, 0.2000],\n [1.0000, 0.2000],\n ],\n [\n [-1.0000, 0.6000],\n [-0.6000, 0.6000],\n [-0.2000, 0.6000],\n [0.2000, 0.6000],\n [0.6000, 0.6000],\n [1.0000, 0.6000],\n ],\n [\n [-1.0000, 1.0000],\n [-0.6000, 1.0000],\n [-0.2000, 1.0000],\n [0.2000, 1.0000],\n [0.6000, 1.0000],\n [1.0000, 1.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]\nY = np.array(\n [\n [\n [\n [0.0000, 0.1500, 0.5500, 0.9500, 1.3500, 0.7500],\n [0.6000, 1.5000, 2.3000, 3.1000, 3.9000, 2.1000],\n [2.2000, 4.7000, 5.5000, 6.3000, 7.1000, 3.7000],\n [3.8000, 7.9000, 8.7000, 9.5000, 10.3000, 5.3000],\n [5.4000, 11.1000, 11.9000, 12.7000, 13.5000, 6.9000],\n [3.0000, 6.1500, 6.5500, 6.9500, 7.3500, 3.7500],\n ]\n ]\n ],\n dtype=np.float32,\n)\nexpect(node, inputs=[X, Grid], outputs=[Y], name=\"test_gridsample\")" + }, + { + "summary": "gridsample_mode_aligncorners", + "code": "# X shape, [N, C, H, W] - [1, 1, 3, 2]\nX = np.array(\n [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]\nGrid = np.array(\n [\n [\n [\n [-1.0000, -1.0000],\n [-0.5000, -0.5000],\n [-0.2000, -0.2000],\n [0.0000, 0.0000],\n ],\n [\n [0.0000, 0.0000],\n [-0.2000, -0.2000],\n [0.5000, 0.5000],\n [1.0000, 1.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\n# setting mode = 'bilinear', default align_corners = 0\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.0000, 0.5000, 1.7000, 2.5000], [2.5000, 1.7000, 4.5000, 1.2500]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear\",\n)\n\n# setting mode = 'bilinear', align_corners = 1\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_align_corners = np.array(\n [[[[0.0000, 1.2500, 2.0000, 2.5000], [2.5000, 2.0000, 3.7500, 5.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_align_corners],\n name=\"test_gridsample_aligncorners_true\",\n)\n\n# setting mode = 'nearest'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 2.0], [2.0, 2.0, 5.0, 0.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node, inputs=[X, Grid], outputs=[Y_nearest], name=\"test_gridsample_nearest\"\n)\n\n# setting mode = 'bicubic'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [[[[-0.1406, 0.3828, 1.7556, 2.9688], [2.9688, 1.7556, 5.1445, 1.3906]]]],\n dtype=np.float32,\n)\n\nexpect(\n node, inputs=[X, Grid], outputs=[Y_bicubic], name=\"test_gridsample_bicubic\"\n)\n\n# ============================================================================\n# Additional tests\n# The reference output tensors were generated using PyTorch 2.0.\nGrid = np.array(\n [\n [\n [[-1.0, -0.8], [-0.6, -0.5], [-0.1, -0.2], [0.7, 0.0]],\n [[0.0, 0.4], [0.2, -0.2], [-0.3, 0.5], [-1.0, 1.0]],\n ]\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 3.0], [4.0, 3.0, 4.0, 4.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_nearest_align_corners_0_additional_1\",\n)\n\n# setting mode = 'nearest'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [[[[0.0, 0.0, 2.0, 3.0], [2.0, 3.0, 4.0, 4.0]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_nearest_align_corners_1_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.0000, 0.4500, 1.8000, 2.4000], [3.7000, 2.1000, 3.7000, 1.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear_align_corners_0_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [[[[0.4000, 1.2000, 2.0500, 2.8500], [3.3000, 2.2000, 3.3500, 4.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_bilinear_align_corners_1_additional_1\",\n)\n\n# These two new bicubic tests produces slightly higher error ~5e-5\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [\n [\n [\n [-0.173250, 0.284265, 1.923106, 2.568000],\n [5.170375, 2.284414, 4.744844, 1.046875],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bicubic],\n name=\"test_gridsample_bicubic_align_corners_0_additional_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bicubic = np.array(\n [\n [\n [\n [0.304001, 1.128750, 2.266270, 3.144844],\n [4.531500, 2.455360, 4.599819, 4.000000],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bicubic],\n name=\"test_gridsample_bicubic_align_corners_1_additional_1\",\n)" + }, + { + "summary": "gridsample_paddingmode", + "code": "# X shape, [N, C, H, W] - [1, 1, 3, 2]\nX = np.array(\n [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],\n dtype=np.float32,\n)\n# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]\nGrid = np.array(\n [\n [\n [\n [-10.0000, -10.0000],\n [-5.0000, -5.0000],\n [-0.2000, -0.2000],\n [10.0000, 10.0000],\n ],\n [\n [10.0000, 10.0000],\n [-0.2000, -0.2000],\n [5.0000, 5.0000],\n [10.0000, 10.0000],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\n# setting padding_mode = 'zeros'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"zeros\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_zeros = np.array(\n [[[[0.0000, 0.0000, 1.7000, 0.0000], [0.0000, 1.7000, 0.0000, 0.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_zeros],\n name=\"test_gridsample_zeros_padding\",\n)\n\n# setting padding_mode = 'border'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"border\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_border = np.array(\n [[[[0.0000, 0.0000, 1.7000, 5.0000], [5.0000, 1.7000, 5.0000, 5.0000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_border],\n name=\"test_gridsample_border_padding\",\n)\n\n# setting padding_mode = 'reflection'\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n padding_mode=\"reflection\",\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_reflection = np.array(\n [[[[2.5000, 0.0000, 1.7000, 2.5000], [2.5000, 1.7000, 5.0000, 2.5000]]]],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_reflection],\n name=\"test_gridsample_reflection_padding\",\n)" + }, + { + "summary": "volumeetric_gridsample_mode_aligncorners", + "code": "X = np.array(\n [\n [\n [\n [[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]],\n [[9.0, 10.0], [11.0, 12.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nGrid = np.array(\n [\n [\n [\n [[-1.0, -1.0, -1.0], [-1.0, -0.5, 0.3]],\n [[-0.5, -0.5, -0.5], [1.0, -0.6, -1.0]],\n [[-0.2, -0.2, -0.2], [0.4, 0.2, 0.6]],\n [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]],\n ],\n [\n [[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0]],\n [[-0.2, -0.2, -0.2], [1.0, 0.4, -0.2]],\n [[0.5, 0.5, 0.5], [-1.0, -0.8, 0.8]],\n [[1.0, 1.0, 1.0], [0.4, 0.6, -0.3]],\n ],\n ]\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [\n [\n [\n [[1.0, 5.0], [1.0, 0.0], [5.0, 12.0], [5.0, 5.0]],\n [[5.0, 0.0], [5.0, 0.0], [12.0, 9.0], [0.0, 8.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_volumetric_nearest_align_corners_0\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_nearest = np.array(\n [\n [\n [\n [[1.0, 5.0], [1.0, 2.0], [5.0, 12.0], [5.0, 5.0]],\n [[5.0, 7.0], [5.0, 8.0], [12.0, 9.0], [12.0, 8.0]],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_nearest],\n name=\"test_gridsample_volumetric_nearest_align_corners_1\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=0,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [\n [\n [\n [\n [0.1250, 3.4000],\n [2.0000, 0.4500],\n [4.7000, 10.9000],\n [6.5000, 3.0000],\n ],\n [\n [6.5000, 1.7500],\n [4.7000, 3.3000],\n [11.0000, 2.5200],\n [1.5000, 5.4900],\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_volumetric_bilinear_align_corners_0\",\n)\n\nnode = onnx.helper.make_node(\n \"GridSample\",\n inputs=[\"X\", \"Grid\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n align_corners=1,\n)\n# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]\nY_bilinear = np.array(\n [\n [\n [\n [\n [1.0000, 6.7000],\n [3.7500, 2.4000],\n [5.4000, 9.3000],\n [6.5000, 6.0000],\n ],\n [\n [6.5000, 7.0000],\n [5.4000, 6.6000],\n [9.2500, 8.4000],\n [12.0000, 6.1000],\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, Grid],\n outputs=[Y_bilinear],\n name=\"test_gridsample_volumetric_bilinear_align_corners_1\",\n)" + } + ] + }, + { + "name": "GroupNormalization", + "module": "ai.onnx", + "version": 18, + "description": "A GroupNormalization function. Carries out group normalization as described in\nthe paper https://arxiv.org/abs/1803.08494\n\nThis operator transforms input according to\n```\ny = scale * (x - mean) / sqrt(variance + epsilon) + bias,\n```\nwhere the mean and variance are computed per instance per group of channels, and\n`scale` and `bias` should be specified for each group of channels. The number of\ngroups `num_groups` should be divisible by the number of channels so that there are\nan equal number of channels per group.\n\nWhen the number of groups is the same as the number of channels, this operator is\nequivalent to InstanceNormalization. When there is only one group, this operator\nis equivalent to LayerNormalization.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "num_groups", + "type": "int64", + "required": true, + "description": "The number of groups of channels. It should be a divisor of the number of channels `C`." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor. Dimensions for image cases are `(N x C x H x W)`, where `N` is the batch size, `C` is the number of channels, and `H` and `W` are the height and width of the data. Statistics are computed for every group of channels over `C`, `H`, and `W`. For non-image cases, the dimensions are in the form of `(N x C x D1 x D2 ... Dn)`." + }, + { + "name": "scale", + "type": "T", + "description": "Scale tensor of shape `(num_groups)`." + }, + { + "name": "bias", + "type": "T", + "description": "Bias tensor of shape `(num_groups)`." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as `X`." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "epsilon", + "code": "c = 4\nnum_groups = 2\nx = np.random.randn(3, c, 2, 2).astype(np.float32)\nscale = np.random.randn(c).astype(np.float32)\nbias = np.random.randn(c).astype(np.float32)\nepsilon = 1e-2\ny = _group_normalization(x, num_groups, scale, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"GroupNormalization\",\n inputs=[\"x\", \"scale\", \"bias\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n num_groups=num_groups,\n)\n\nexpect(\n node,\n inputs=[x, scale, bias],\n outputs=[y],\n name=\"test_group_normalization_epsilon\",\n)" + }, + { + "summary": "groupnormalization", + "code": "c = 4\nnum_groups = 2\nx = np.random.randn(3, c, 2, 2).astype(np.float32)\nscale = np.random.randn(c).astype(np.float32)\nbias = np.random.randn(c).astype(np.float32)\ny = _group_normalization(x, num_groups, scale, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"GroupNormalization\",\n inputs=[\"x\", \"scale\", \"bias\"],\n outputs=[\"y\"],\n num_groups=num_groups,\n)\n\nexpect(\n node,\n inputs=[x, scale, bias],\n outputs=[y],\n name=\"test_group_normalization_example\",\n)" + } + ] + }, + { + "name": "GroupNormalization", + "module": "ai.onnx", + "version": 21, + "description": "A GroupNormalization function. Carries out group normalization as described in\nthe paper https://arxiv.org/abs/1803.08494\n\nThis operator transforms input according to\n```\ny = scale * (x - mean) / sqrt(variance + epsilon) + bias,\n```\nwhere the mean and variance are computed per instance per group of channels, and\n`scale` and `bias` should be specified for each group of channels. The number of\ngroups `num_groups` should be divisible by the number of channels so that there are\nan equal number of channels per group.\n\nThe overall computation has two stages: the first stage normalizes the elements to\nhave zero mean and unit variance for each instance in each group, and the second\nstage scales and shifts the results of the first stage. The floating-point precision\nused in the first stage is determined by the `stash_type` attribute. For example,\nif `stash_type` is 1, the operator casts all input variables to 32-bit float,\nperforms the computation, and finally casts the normalized results back to the\noriginal type of `X`. The second stage does not depend on `stash_type`.\n\nWhen the number of groups is the same as the number of channels, this operator is\nequivalent to InstanceNormalization. When there is only one group, this operator\nis equivalent to LayerNormalization.\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "num_groups", + "type": "int64", + "required": true, + "description": "The number of groups of channels. It should be a divisor of the number of channels `C`." + }, + { + "name": "stash_type", + "type": "int64", + "required": false, + "default": 1, + "description": "The floating-point precision used in stage one of the computation." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor. Dimensions for image cases are `(N x C x H x W)`, where `N` is the batch size, `C` is the number of channels, and `H` and `W` are the height and width of the data. Statistics are computed for every group of channels over `C`, `H`, and `W`. For non-image cases, the dimensions are in the form of `(N x C x D1 x D2 ... Dn)`." + }, + { + "name": "scale", + "type": "T", + "description": "Scale tensor of shape `(C)`." + }, + { + "name": "bias", + "type": "T", + "description": "Bias tensor of shape `(C)`." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "The output tensor of the same shape as `X`." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "epsilon", + "code": "c = 4\nnum_groups = 2\nx = np.random.randn(3, c, 2, 2).astype(np.float32)\nscale = np.random.randn(c).astype(np.float32)\nbias = np.random.randn(c).astype(np.float32)\nepsilon = 1e-2\ny = _group_normalization(x, num_groups, scale, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"GroupNormalization\",\n inputs=[\"x\", \"scale\", \"bias\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n num_groups=num_groups,\n)\n\nexpect(\n node,\n inputs=[x, scale, bias],\n outputs=[y],\n name=\"test_group_normalization_epsilon\",\n)" + }, + { + "summary": "groupnormalization", + "code": "c = 4\nnum_groups = 2\nx = np.random.randn(3, c, 2, 2).astype(np.float32)\nscale = np.random.randn(c).astype(np.float32)\nbias = np.random.randn(c).astype(np.float32)\ny = _group_normalization(x, num_groups, scale, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"GroupNormalization\",\n inputs=[\"x\", \"scale\", \"bias\"],\n outputs=[\"y\"],\n num_groups=num_groups,\n)\n\nexpect(\n node,\n inputs=[x, scale, bias],\n outputs=[y],\n name=\"test_group_normalization_example\",\n)" + } + ] + }, + { + "name": "HammingWindow", + "module": "ai.onnx", + "version": 17, + "description": "Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106.\n", + "attributes": [ + { + "name": "output_datatype", + "type": "int64", + "required": false, + "default": 1, + "description": "The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. " + }, + { + "name": "periodic", + "type": "int64", + "required": false, + "default": 1, + "description": "If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. " + } + ], + "inputs": [ + { + "name": "size", + "type": "T1", + "description": "A scalar value indicating the length of the window." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "A Hamming window with length: size. The output has the shape: [size]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain the input size to int64_t.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to numeric tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "hammingwindow", + "code": "# Test periodic window\nnode = onnx.helper.make_node(\n \"HammingWindow\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nsize = np.int32(10)\na0 = 25 / 46\na1 = 1 - a0\ny = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)\nexpect(node, inputs=[size], outputs=[y], name=\"test_hammingwindow\")\n\n# Test symmetric window\nnode = onnx.helper.make_node(\n \"HammingWindow\", inputs=[\"x\"], outputs=[\"y\"], periodic=0\n)\nsize = np.int32(10)\na0 = 25 / 46\na1 = 1 - a0\ny = a0 - a1 * np.cos(\n 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)\n)\nexpect(node, inputs=[size], outputs=[y], name=\"test_hammingwindow_symmetric\")" + } + ] + }, + { + "name": "HannWindow", + "module": "ai.onnx", + "version": 17, + "description": "Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106.\n", + "attributes": [ + { + "name": "output_datatype", + "type": "int64", + "required": false, + "default": 1, + "description": "The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. " + }, + { + "name": "periodic", + "type": "int64", + "required": false, + "default": 1, + "description": "If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. " + } + ], + "inputs": [ + { + "name": "size", + "type": "T1", + "description": "A scalar value indicating the length of the window." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "A Hann window with length: size. The output has the shape: [size]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain the input size to int64_t.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to numeric tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "hannwindow", + "code": "# Test periodic window\nnode = onnx.helper.make_node(\n \"HannWindow\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nsize = np.int32(10)\na0 = 0.5\na1 = 0.5\ny = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)\nexpect(node, inputs=[size], outputs=[y], name=\"test_hannwindow\")\n\n# Test symmetric window\nnode = onnx.helper.make_node(\n \"HannWindow\", inputs=[\"x\"], outputs=[\"y\"], periodic=0\n)\nsize = np.int32(10)\na0 = 0.5\na1 = 0.5\ny = a0 - a1 * np.cos(\n 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)\n)\nexpect(node, inputs=[size], outputs=[y], name=\"test_hannwindow_symmetric\")" + } + ] + }, + { + "name": "HardSigmoid", + "module": "ai.onnx", + "version": 1, + "description": "HardSigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.20000000298023224, + "description": "Value of alpha default to 0.2" + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 0.5, + "description": "Value of beta default to 0.5" + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hardsigmoid", + "code": "node = onnx.helper.make_node(\n \"HardSigmoid\", inputs=[\"x\"], outputs=[\"y\"], alpha=0.5, beta=0.6\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid\")" + }, + { + "summary": "hardsigmoid_default", + "code": "default_alpha = 0.2\ndefault_beta = 0.5\nnode = onnx.helper.make_node(\n \"HardSigmoid\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * default_alpha + default_beta, 0, 1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid_default\")" + } + ], + "category": "Activation" + }, + { + "name": "HardSigmoid", + "module": "ai.onnx", + "version": 6, + "description": "HardSigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.20000000298023224, + "description": "Value of alpha." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 0.5, + "description": "Value of beta." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hardsigmoid", + "code": "node = onnx.helper.make_node(\n \"HardSigmoid\", inputs=[\"x\"], outputs=[\"y\"], alpha=0.5, beta=0.6\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid\")" + }, + { + "summary": "hardsigmoid_default", + "code": "default_alpha = 0.2\ndefault_beta = 0.5\nnode = onnx.helper.make_node(\n \"HardSigmoid\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * default_alpha + default_beta, 0, 1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardsigmoid_default\")" + } + ], + "category": "Activation" + }, + { + "name": "HardSwish", + "module": "ai.onnx", + "version": 14, + "description": "HardSwish takes one input data (Tensor) and produces one output data (Tensor) where\nthe HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid(x),\nwhere alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hardswish", + "code": "node = onnx.helper.make_node(\n \"HardSwish\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = hardswish(x)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardswish\")" + } + ] + }, + { + "name": "Hardmax", + "module": "ai.onnx", + "version": 1, + "description": "The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the hardmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hardmax", + "code": "node = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(\n np.float32\n)\n# expect result:\n# [[1. 0. 0. 0.]\n# [0. 1. 0. 0.]\n# [0. 0. 1. 0.]\n# [0. 0. 0. 1.]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_example\")\n\n# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output\nx = np.array([[3, 3, 3, 1]]).astype(np.float32)\n# expect result:\n# [[1, 0, 0, 0]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_one_hot\")" + }, + { + "summary": "hardmax_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = hardmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = hardmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = hardmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = hardmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_default_axis\")" + } + ] + }, + { + "name": "Hardmax", + "module": "ai.onnx", + "version": 11, + "description": "The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the hardmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hardmax", + "code": "node = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(\n np.float32\n)\n# expect result:\n# [[1. 0. 0. 0.]\n# [0. 1. 0. 0.]\n# [0. 0. 1. 0.]\n# [0. 0. 0. 1.]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_example\")\n\n# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output\nx = np.array([[3, 3, 3, 1]]).astype(np.float32)\n# expect result:\n# [[1, 0, 0, 0]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_one_hot\")" + }, + { + "summary": "hardmax_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = hardmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = hardmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = hardmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = hardmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_default_axis\")" + } + ] + }, + { + "name": "Hardmax", + "module": "ai.onnx", + "version": 13, + "description": "The operator computes the hardmax values for the given input:\n\n Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise\n\nThe \"axis\" attribute indicates the dimension along which Hardmax\nwill be performed. The output tensor has the same shape\nand contains the Hardmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "\nDescribes the dimension Hardmax will be performed on.\nNegative value means counting dimensions\nfrom the back. Accepted range is [-r, r-1] where r = rank(input).\n" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as the input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "hardmax", + "code": "node = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(\n np.float32\n)\n# expect result:\n# [[1. 0. 0. 0.]\n# [0. 1. 0. 0.]\n# [0. 0. 1. 0.]\n# [0. 0. 0. 1.]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_example\")\n\n# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output\nx = np.array([[3, 3, 3, 1]]).astype(np.float32)\n# expect result:\n# [[1, 0, 0, 0]]\ny = hardmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_one_hot\")" + }, + { + "summary": "hardmax_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = hardmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = hardmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = hardmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = hardmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Hardmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_hardmax_default_axis\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 1, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 13, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 14, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "V", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor and sequence types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 16, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "V", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor, sequence, and optional types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 19, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "V", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor, sequence, and optional types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "Identity", + "module": "ai.onnx", + "version": 21, + "description": "Identity operator", + "inputs": [ + { + "name": "input", + "type": "V", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Tensor to copy input into." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor, sequence, and optional types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "identity", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity\")" + }, + { + "summary": "identity_opt", + "code": "ten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"opt_in\"], outputs=[\"opt_out\"]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\n\nexpect(\n identity_node,\n inputs=[x],\n outputs=[x],\n name=\"test_identity_opt\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[opt_in_tp],\n output_type_protos=[opt_in_tp],\n)" + }, + { + "summary": "sequence", + "code": "node = onnx.helper.make_node(\n \"Identity\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\ndata = [\n np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [\n [\n [\n [2, 3],\n [1, 5],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n]\n\nexpect(node, inputs=[data], outputs=[data], name=\"test_identity_sequence\")" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 1, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same shape and same data type." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 11, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 13, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor and Sequence types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 16, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv4.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 19, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv9.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "seq(tensor(float8e4m3fn))", + "seq(tensor(float8e4m3fnuz))", + "seq(tensor(float8e5m2))", + "seq(tensor(float8e5m2fnuz))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "optional(tensor(float8e4m3fn))", + "optional(tensor(float8e4m3fnuz))", + "optional(tensor(float8e5m2))", + "optional(tensor(float8e5m2fnuz))" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "If", + "module": "ai.onnx", + "version": 21, + "description": "If conditional", + "attributes": [ + { + "name": "else_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch." + }, + { + "name": "then_branch", + "type": "graph", + "required": true, + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch." + } + ], + "inputs": [ + { + "name": "cond", + "type": "B", + "description": "Condition for the if. The tensor must contain a single element." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "V", + "list": true, + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible." + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv10.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "seq(tensor(float8e4m3fn))", + "seq(tensor(float8e4m3fnuz))", + "seq(tensor(float8e5m2))", + "seq(tensor(float8e5m2fnuz))", + "seq(tensor(uint4))", + "seq(tensor(int4))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "optional(tensor(float8e4m3fn))", + "optional(tensor(float8e4m3fnuz))", + "optional(tensor(float8e5m2))", + "optional(tensor(float8e5m2fnuz))", + "optional(tensor(uint4))", + "optional(tensor(int4))" + ] + }, + { + "description": "Only bool", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "if", + "code": "# Given a bool scalar input cond.\n# return constant tensor x if cond is True, otherwise return constant tensor y.\n\nthen_out = onnx.helper.make_tensor_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, [5]\n)\nelse_out = onnx.helper.make_tensor_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, [5]\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"then_out\"],\n value=onnx.numpy_helper.from_array(x),\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"else_out\"],\n value=onnx.numpy_helper.from_array(y),\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "if_optional", + "code": "# Given a bool scalar input cond, return an empty optional sequence of\n# tensor if True, return an optional sequence with value x\n# (the input optional sequence) otherwise.\n\nten_in_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\n\nthen_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nthen_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)\nthen_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)\nthen_out = onnx.helper.make_value_info(\"optional_empty\", then_out_opt_tp)\n\nelse_out_tensor_tp = onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)\nelse_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)\nelse_out = onnx.helper.make_value_info(\"else_opt\", else_out_opt_tp)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ncond = np.array(0).astype(bool)\nres = compute_if_outputs(x, cond)\n\nopt_empty_in = onnx.helper.make_node(\n \"Optional\", inputs=[], outputs=[\"optional_empty\"], type=seq_in_tp\n)\n\nthen_body = onnx.helper.make_graph([opt_empty_in], \"then_body\", [], [then_out])\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"else_seq\"]\n)\n\nelse_optional_seq_node = onnx.helper.make_node(\n \"Optional\", inputs=[\"else_seq\"], outputs=[\"else_opt\"]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node, else_optional_seq_node],\n \"else_body\",\n [],\n [else_out],\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_opt\",\n output_type_protos=[else_out_opt_tp],\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n)" + }, + { + "summary": "if_seq", + "code": "# Given a bool scalar input cond.\n# return constant sequence x if cond is True, otherwise return constant sequence y.\n\nthen_out = onnx.helper.make_tensor_sequence_value_info(\n \"then_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\nelse_out = onnx.helper.make_tensor_sequence_value_info(\n \"else_out\", onnx.TensorProto.FLOAT, shape=[5]\n)\n\nx = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]\ny = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]\n\nthen_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.numpy_helper.from_array(x[0]),\n)\n\nthen_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"x\"], outputs=[\"then_out\"]\n)\n\nelse_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"y\"],\n value=onnx.numpy_helper.from_array(y[0]),\n)\n\nelse_seq_node = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"y\"], outputs=[\"else_out\"]\n)\n\nthen_body = onnx.helper.make_graph(\n [then_const_node, then_seq_node], \"then_body\", [], [then_out]\n)\n\nelse_body = onnx.helper.make_graph(\n [else_const_node, else_seq_node], \"else_body\", [], [else_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"cond\"],\n outputs=[\"res\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\ncond = np.array(1).astype(bool)\nres = x if cond else y\nexpect(\n if_node,\n inputs=[cond],\n outputs=[res],\n name=\"test_if_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + } + ] + }, + { + "name": "ImageDecoder", + "module": "ai.onnx", + "version": 20, + "description": "Loads and decodes and image from a file. If it can't decode for any reason (e.g. corrupted encoded\nstream, invalid format, it will return an empty matrix).\nThe following image formats are supported:\n* BMP\n* JPEG (note: Lossless JPEG support is optional)\n* JPEG2000\n* TIFF\n* PNG\n* WebP\n* Portable image format (PBM, PGM, PPM, PXM, PNM)\nDecoded images follow a channel-last layout: (Height, Width, Channels).\n**JPEG chroma upsampling method:**\nWhen upsampling the chroma components by a factor of 2, the pixels are linearly interpolated so that the\ncenters of the output pixels are 1/4 and 3/4 of the way between input pixel centers.\nWhen rounding, 0.5 is rounded down and up at alternative pixels locations to prevent bias towards\nlarger values (ordered dither pattern).\nConsidering adjacent input pixels A, B, and C, B is upsampled to pixels B0 and B1 so that\n```\nB0 = round_half_down((1/4) * A + (3/4) * B)\nB1 = round_half_up((3/4) * B + (1/4) * C)\n```\nThis method, is the default chroma upsampling method in the well-established libjpeg-turbo library,\nalso referred as \"smooth\" or \"fancy\" upsampling.\n", + "attributes": [ + { + "name": "pixel_format", + "type": "string", + "required": false, + "default": "RGB", + "description": "Pixel format. Can be one of \"RGB\", \"BGR\", or \"Grayscale\"." + } + ], + "inputs": [ + { + "name": "encoded_stream", + "type": "T1", + "description": "Encoded stream" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "image", + "type": "T2", + "description": "Decoded image" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to 8-bit unsigned integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)" + ] + }, + { + "description": "Constrain output types to 8-bit unsigned integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)" + ] + } + ], + "examples": [ + { + "summary": "image_decoder_decode_bmp_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"bmp\", _image_decoder_data.image_decoder_decode_bmp_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_bmp_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_jpeg2k_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"jpeg2000\", _image_decoder_data.image_decoder_decode_jpeg2k_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_jpeg2k_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_jpeg_bgr", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"BGR\",\n)\n\ndata, output = _generate_test_data(\n \"jpeg\", _image_decoder_data.image_decoder_decode_jpeg_bgr, \"BGR\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_jpeg_bgr\",\n)" + }, + { + "summary": "image_decoder_decode_jpeg_grayscale", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"Grayscale\",\n)\n\ndata, output = _generate_test_data(\n \"jpeg\", _image_decoder_data.image_decoder_decode_jpeg_grayscale, \"Grayscale\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_jpeg_grayscale\",\n)" + }, + { + "summary": "image_decoder_decode_jpeg_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"jpeg\", _image_decoder_data.image_decoder_decode_jpeg_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_jpeg_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_png_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"png\", _image_decoder_data.image_decoder_decode_png_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_png_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_pnm_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"ppm\", _image_decoder_data.image_decoder_decode_pnm_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_pnm_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_tiff_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"tiff\", _image_decoder_data.image_decoder_decode_tiff_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_tiff_rgb\",\n)" + }, + { + "summary": "image_decoder_decode_webp_rgb", + "code": "node = onnx.helper.make_node(\n \"ImageDecoder\",\n inputs=[\"data\"],\n outputs=[\"output\"],\n pixel_format=\"RGB\",\n)\n\ndata, output = _generate_test_data(\n \"webp\", _image_decoder_data.image_decoder_decode_webp_rgb, \"RGB\"\n)\nexpect(\n node,\n inputs=[data],\n outputs=[output],\n name=\"test_image_decoder_decode_webp_rgb\",\n)" + } + ] + }, + { + "name": "Imputer", + "module": "ai.onnx.ml", + "version": 1, + "description": "Replaces inputs that equal one value with another, leaving all other elements alone.
\n This operator is typically used to replace missing values in situations where they have a canonical\n representation, such as -1, 0, NaN, or some extreme value.
\n One and only one of imputed_value_floats or imputed_value_int64s should be defined -- floats if the input tensor\n holds floats, integers if the input tensor holds integers. The imputed values must all fit within the\n width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined,\n which one depends on whether floats or integers are being processed.
\n The imputed_value attribute length can be 1 element, or it can have one element per input feature.
In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.\n", + "attributes": [ + { + "name": "imputed_value_floats", + "type": "float32[]", + "required": false, + "description": "Value(s) to change to" + }, + { + "name": "imputed_value_int64s", + "type": "int64[]", + "required": false, + "description": "Value(s) to change to." + }, + { + "name": "replaced_value_float", + "type": "float32", + "required": false, + "description": "A value that needs replacing." + }, + { + "name": "replaced_value_int64", + "type": "int64", + "required": false, + "description": "A value that needs replacing." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be processed." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Imputed output data" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "InstanceNormalization", + "module": "ai.onnx", + "version": 1, + "description": "Carries out instance normalization as described in the paper\nhttps://arxiv.org/abs/1607.08022.\n\ny = scale * (x - mean) / sqrt(variance + epsilon) + B,\nwhere mean and variance are computed per instance per channel.\n\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + }, + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input 4-dimensional tensor of shape NCHW." + }, + { + "name": "scale", + "type": "T", + "description": "The input 1-dimensional scale tensor of size C." + }, + { + "name": "B", + "type": "T", + "description": "The input 1-dimensional bias tensor of size C." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output 4-dimensional tensor of the same shape as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "instancenormalization", + "code": "def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n axis = tuple(range(2, dims_x))\n mean = np.mean(x, axis=axis, keepdims=True)\n var = np.var(x, axis=axis, keepdims=True)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\ny = _instancenorm_test_mode(x, s, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"s\", \"bias\"],\n outputs=[\"y\"],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias], outputs=[y], name=\"test_instancenorm_example\")\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nepsilon = 1e-2\ny = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"s\", \"bias\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias], outputs=[y], name=\"test_instancenorm_epsilon\")" + } + ], + "category": "Normalization" + }, + { + "name": "InstanceNormalization", + "module": "ai.onnx", + "version": 6, + "description": "Carries out instance normalization as described in the paper\nhttps://arxiv.org/abs/1607.08022.\n\ny = scale * (x - mean) / sqrt(variance + epsilon) + B,\nwhere mean and variance are computed per instance per channel.\n\n", + "attributes": [ + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + }, + { + "name": "scale", + "type": "T", + "description": "The input 1-dimensional scale tensor of size C." + }, + { + "name": "B", + "type": "T", + "description": "The input 1-dimensional bias tensor of size C." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output tensor of the same shape as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "instancenormalization", + "code": "def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n axis = tuple(range(2, dims_x))\n mean = np.mean(x, axis=axis, keepdims=True)\n var = np.var(x, axis=axis, keepdims=True)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\ny = _instancenorm_test_mode(x, s, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"s\", \"bias\"],\n outputs=[\"y\"],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias], outputs=[y], name=\"test_instancenorm_example\")\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nepsilon = 1e-2\ny = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"s\", \"bias\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias], outputs=[y], name=\"test_instancenorm_epsilon\")" + } + ], + "category": "Normalization" + }, + { + "name": "IsInf", + "module": "ai.onnx", + "version": 10, + "description": "Map infinity to true and other values to false.", + "attributes": [ + { + "name": "detect_negative", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false." + }, + { + "name": "detect_positive", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "infinity", + "code": "node = onnx.helper.make_node(\n \"IsInf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\ny = np.isinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf\")" + }, + { + "summary": "infinity_float16", + "code": "node = onnx.helper.make_node(\n \"IsInf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float16)\ny = np.isinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_float16\")" + }, + { + "summary": "negative_infinity_only", + "code": "node = onnx.helper.make_node(\n \"IsInf\", inputs=[\"x\"], outputs=[\"y\"], detect_positive=0\n)\n\nx = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf], dtype=np.float32)\ny = np.isneginf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_negative\")" + }, + { + "summary": "positive_infinity_only", + "code": "node = onnx.helper.make_node(\n \"IsInf\", inputs=[\"x\"], outputs=[\"y\"], detect_negative=0\n)\n\nx = np.array([-1.7, np.nan, np.inf, 3.6, np.NINF, np.inf], dtype=np.float32)\ny = np.isposinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_positive\")" + } + ] + }, + { + "name": "IsInf", + "module": "ai.onnx", + "version": 20, + "description": "Map infinity to true and other values to false.", + "attributes": [ + { + "name": "detect_negative", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false." + }, + { + "name": "detect_positive", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "infinity", + "code": "node = onnx.helper.make_node(\n \"IsInf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\ny = np.isinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf\")" + }, + { + "summary": "infinity_float16", + "code": "node = onnx.helper.make_node(\n \"IsInf\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float16)\ny = np.isinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_float16\")" + }, + { + "summary": "negative_infinity_only", + "code": "node = onnx.helper.make_node(\n \"IsInf\", inputs=[\"x\"], outputs=[\"y\"], detect_positive=0\n)\n\nx = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf], dtype=np.float32)\ny = np.isneginf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_negative\")" + }, + { + "summary": "positive_infinity_only", + "code": "node = onnx.helper.make_node(\n \"IsInf\", inputs=[\"x\"], outputs=[\"y\"], detect_negative=0\n)\n\nx = np.array([-1.7, np.nan, np.inf, 3.6, np.NINF, np.inf], dtype=np.float32)\ny = np.isposinf(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isinf_positive\")" + } + ] + }, + { + "name": "IsNaN", + "module": "ai.onnx", + "version": 9, + "description": "Returns which elements of the input are NaN.", + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "float16", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float16)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan_float16\")" + }, + { + "summary": "isnan", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan\")" + } + ] + }, + { + "name": "IsNaN", + "module": "ai.onnx", + "version": 13, + "description": "Returns which elements of the input are NaN.", + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "float16", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float16)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan_float16\")" + }, + { + "summary": "isnan", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan\")" + } + ] + }, + { + "name": "IsNaN", + "module": "ai.onnx", + "version": 20, + "description": "Returns which elements of the input are NaN.", + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "float16", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float16)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan_float16\")" + }, + { + "summary": "isnan", + "code": "node = onnx.helper.make_node(\n \"IsNaN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_isnan\")" + } + ] + }, + { + "name": "LRN", + "module": "ai.onnx", + "version": 1, + "description": "Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf).\nIt normalizes over local input regions.\nThe local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor\nof shape (N x C x D1 x D2, ..., Dk), its region is\n{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.\n\nsquare_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2),\nwhere max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).\n\nY[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 9.999999747378752e-05, + "description": "Scaling parameter." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 0.75, + "description": "The exponent." + }, + { + "name": "bias", + "type": "float32", + "required": false, + "default": 1.0, + "description": "" + }, + { + "name": "size", + "type": "int64", + "required": true, + "description": "The number of channels to sum over" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor, which has the shape and type as input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "alpha = 0.0001\nbeta = 0.75\nbias = 1.0\nnsize = 3\nnode = onnx.helper.make_node(\"LRN\", inputs=[\"x\"], outputs=[\"y\"], size=3)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(\n x[\n n,\n max(0, c - int(math.floor((nsize - 1) / 2))) : min(\n 5, c + int(math.ceil((nsize - 1) / 2)) + 1\n ),\n h,\n w,\n ]\n ** 2\n )\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y], name=\"test_lrn_default\")" + }, + { + "summary": "lrn", + "code": "alpha = 0.0002\nbeta = 0.5\nbias = 2.0\nnsize = 3\nnode = onnx.helper.make_node(\n \"LRN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=nsize,\n)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(\n x[\n n,\n max(0, c - int(math.floor((nsize - 1) / 2))) : min(\n 5, c + int(math.ceil((nsize - 1) / 2)) + 1\n ),\n h,\n w,\n ]\n ** 2\n )\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y], name=\"test_lrn\")" + } + ], + "category": "Normalization" + }, + { + "name": "LRN", + "module": "ai.onnx", + "version": 13, + "description": "Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf).\nIt normalizes over local input regions.\nThe local region is defined across the channels. For an element `X[n, c, d1, ..., dk]` in a tensor\nof shape `(N x C x D1 x D2, ..., Dk)`, its region is\n`{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}`.\n\n`square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)`,\nwhere `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))`.\n\n`Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 9.999999747378752e-05, + "description": "Scaling parameter." + }, + { + "name": "beta", + "type": "float32", + "required": false, + "default": 0.75, + "description": "The exponent." + }, + { + "name": "bias", + "type": "float32", + "required": false, + "default": 1.0, + "description": "" + }, + { + "name": "size", + "type": "int64", + "required": true, + "description": "The number of channels to sum over" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor, which has the shape and type as input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "alpha = 0.0001\nbeta = 0.75\nbias = 1.0\nnsize = 3\nnode = onnx.helper.make_node(\"LRN\", inputs=[\"x\"], outputs=[\"y\"], size=3)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(\n x[\n n,\n max(0, c - int(math.floor((nsize - 1) / 2))) : min(\n 5, c + int(math.ceil((nsize - 1) / 2)) + 1\n ),\n h,\n w,\n ]\n ** 2\n )\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y], name=\"test_lrn_default\")" + }, + { + "summary": "lrn", + "code": "alpha = 0.0002\nbeta = 0.5\nbias = 2.0\nnsize = 3\nnode = onnx.helper.make_node(\n \"LRN\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=nsize,\n)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(\n x[\n n,\n max(0, c - int(math.floor((nsize - 1) / 2))) : min(\n 5, c + int(math.ceil((nsize - 1) / 2)) + 1\n ),\n h,\n w,\n ]\n ** 2\n )\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y], name=\"test_lrn\")" + } + ], + "category": "Normalization" + }, + { + "name": "LSTM", + "module": "ai.onnx", + "version": 1, + "description": "Computes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n`c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n\n`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n - it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.) h(Ct)\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "input_forget", + "type": "int64", + "required": false, + "description": "Couple the input and forget gates if 1, default 0." + }, + { + "name": "output_sequence", + "type": "int64", + "required": false, + "description": "The sequence output for the hidden is optional if 0. Default 0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "initial_c", + "type": "T", + "option": "optional", + "description": "Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "P", + "type": "T", + "option": "optional", + "description": "The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0." + } + ], + "min_input": 3, + "max_input": 8, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0." + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "Y_c", + "type": "T", + "option": "optional", + "description": "The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 3, + "inputs_range": "3 - 8", + "outputs_range": "0 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 7\nweight_scale = 0.3\nnumber_of_gates = 4\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_lstm_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 4\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), 1)\n\nlstm = LSTMHelper(X=input, W=W, R=R, B=B)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_initial_bias\",\n)" + }, + { + "summary": "peepholes", + "code": "input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype(\n np.float32\n)\n\ninput_size = 4\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\nnumber_of_peepholes = 3\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\", \"sequence_lens\", \"initial_h\", \"initial_c\", \"P\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\n# Initializing Inputs\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\nB = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\nseq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\ninit_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\ninit_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\nP = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(\n np.float32\n)\n\nlstm = LSTMHelper(\n X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h\n)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B, seq_lens, init_h, init_c, P],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_peepholes\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "LSTM", + "module": "ai.onnx", + "version": 7, + "description": "Computes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n`c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n\n`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.) h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "input_forget", + "type": "int64", + "required": false, + "description": "Couple the input and forget gates if 1." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "initial_c", + "type": "T", + "option": "optional", + "description": "Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "P", + "type": "T", + "option": "optional", + "description": "The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0." + } + ], + "min_input": 3, + "max_input": 8, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "Y_c", + "type": "T", + "option": "optional", + "description": "The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 3, + "inputs_range": "3 - 8", + "outputs_range": "0 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 7\nweight_scale = 0.3\nnumber_of_gates = 4\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_lstm_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 4\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), 1)\n\nlstm = LSTMHelper(X=input, W=W, R=R, B=B)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_initial_bias\",\n)" + }, + { + "summary": "peepholes", + "code": "input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype(\n np.float32\n)\n\ninput_size = 4\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\nnumber_of_peepholes = 3\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\", \"sequence_lens\", \"initial_h\", \"initial_c\", \"P\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\n# Initializing Inputs\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\nB = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\nseq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\ninit_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\ninit_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\nP = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(\n np.float32\n)\n\nlstm = LSTMHelper(\n X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h\n)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B, seq_lens, init_h, init_c, P],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_peepholes\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "LSTM", + "module": "ai.onnx", + "version": 14, + "description": "Computes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `i` - input gate\n* `o` - output gate\n* `f` - forget gate\n* `c` - cell gate\n* `t` - time step (t-1 means previous time step)\n* `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n* `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n* `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n* `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n* `P[iof]` - P peephole weight vector for input, output, and forget gates\n* `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n* `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n* `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n* `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n* `PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE: Below are optional\n\n* Affine(x) - alpha*x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha*Tanh(beta*x)\n* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n* it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n* ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n* ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)\n* Ct = ft (.) Ct-1 + it (.) ct\n* ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n* Ht = ot (.) h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "input_forget", + "type": "int64", + "required": false, + "description": "Couple the input and forget gates if 1." + }, + { + "name": "layout", + "type": "int64", + "required": false, + "description": "The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "initial_c", + "type": "T", + "option": "optional", + "description": "Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "P", + "type": "T", + "option": "optional", + "description": "The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0." + } + ], + "min_input": 3, + "max_input": 8, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + }, + { + "name": "Y_c", + "type": "T", + "option": "optional", + "description": "The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 3, + "inputs_range": "3 - 8", + "outputs_range": "0 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 7\nweight_scale = 0.3\nnumber_of_gates = 4\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_lstm_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\nlstm = LSTMHelper(X=input, W=W, R=R)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 4\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(\n np.float32\n)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), 1)\n\nlstm = LSTMHelper(X=input, W=W, R=R, B=B)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_initial_bias\",\n)" + }, + { + "summary": "peepholes", + "code": "input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype(\n np.float32\n)\n\ninput_size = 4\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\nnumber_of_peepholes = 3\n\nnode = onnx.helper.make_node(\n \"LSTM\",\n inputs=[\"X\", \"W\", \"R\", \"B\", \"sequence_lens\", \"initial_h\", \"initial_c\", \"P\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\n# Initializing Inputs\nW = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, input_size)\n).astype(np.float32)\nR = weight_scale * np.ones(\n (1, number_of_gates * hidden_size, hidden_size)\n).astype(np.float32)\nB = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\nseq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\ninit_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\ninit_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\nP = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(\n np.float32\n)\n\nlstm = LSTMHelper(\n X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h\n)\n_, Y_h = lstm.step()\nexpect(\n node,\n inputs=[input, W, R, B, seq_lens, init_h, init_c, P],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_lstm_with_peepholes\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "LabelEncoder", + "module": "ai.onnx.ml", + "version": 1, + "description": "Converts strings to integers and vice versa.
\n If the string default value is set, it will convert integers to strings.\n If the int default value is set, it will convert strings to integers.
\n Each operator converts either integers to strings or strings to integers, depending\n on which default value attribute is provided. Only one default value attribute\n should be defined.
\n When converting from integers to strings, the string is fetched from the\n 'classes_strings' list, by simple indexing.
\n When converting from strings to integers, the string is looked up in the list\n and the index at which it is found is used as the converted value.\n", + "attributes": [ + { + "name": "classes_strings", + "type": "string[]", + "required": false, + "description": "A list of labels." + }, + { + "name": "default_int64", + "type": "int64", + "required": false, + "default": -1, + "description": "An integer to use when an input string value is not found in the map.
One and only one of the 'default_*' attributes must be defined." + }, + { + "name": "default_string", + "type": "string", + "required": false, + "default": "_Unused", + "description": "A string to use when an input integer value is not found in the map.
One and only one of the 'default_*' attributes must be defined." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Output data. If strings are input, the output values are integers, and vice versa." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type must be a tensor of integers or strings, of any shape.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + }, + { + "description": "The output type will be a tensor of strings or integers, and will have the same shape as the input.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } + ] + }, + { + "name": "LabelEncoder", + "module": "ai.onnx.ml", + "version": 2, + "description": "Maps each element in the input tensor to another value.
\n The mapping is determined by the two parallel attributes, 'keys_*' and\n 'values_*' attribute. The i-th value in the specified 'keys_*' attribute\n would be mapped to the i-th value in the specified 'values_*' attribute. It\n implies that input's element type and the element type of the specified\n 'keys_*' should be identical while the output type is identical to the\n specified 'values_*' attribute. If an input element can not be found in the\n specified 'keys_*' attribute, the 'default_*' that matches the specified\n 'values_*' attribute may be used as its output value.
\n Let's consider an example which maps a string tensor to an integer tensor.\n Assume and 'keys_strings' is [\"Amy\", \"Sally\"], 'values_int64s' is [5, 6],\n and 'default_int64' is '-1'. The input [\"Dori\", \"Amy\", \"Amy\", \"Sally\",\n \"Sally\"] would be mapped to [-1, 5, 5, 6, 6].
\n Since this operator is an one-to-one mapping, its input and output shapes\n are the same. Notice that only one of 'keys_*'/'values_*' can be set.
\n For key look-up, bit-wise comparison is used so even a float NaN can be\n mapped to a value in 'values_*' attribute.
\n", + "attributes": [ + { + "name": "default_float", + "type": "float32", + "required": false, + "description": "A float." + }, + { + "name": "default_int64", + "type": "int64", + "required": false, + "default": -1, + "description": "An integer." + }, + { + "name": "default_string", + "type": "string", + "required": false, + "default": "_Unused", + "description": "A string." + }, + { + "name": "keys_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "keys_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "keys_strings", + "type": "string[]", + "required": false, + "description": "A list of strings. One and only one of 'keys_*'s should be set." + }, + { + "name": "values_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "values_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "values_strings", + "type": "string[]", + "required": false, + "description": "A list of strings. One and only one of 'value_*'s should be set." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data. It can be either tensor or scalar." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Output data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type is a tensor of any shape.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)" + ] + }, + { + "description": "Output type is determined by the specified 'values_*' attribute.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)" + ] + } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } + ] + }, + { + "name": "LabelEncoder", + "module": "ai.onnx.ml", + "version": 4, + "description": "Maps each element in the input tensor to another value.
\n The mapping is determined by the two parallel attributes, 'keys_*' and\n 'values_*' attribute. The i-th value in the specified 'keys_*' attribute\n would be mapped to the i-th value in the specified 'values_*' attribute. It\n implies that input's element type and the element type of the specified\n 'keys_*' should be identical while the output type is identical to the\n specified 'values_*' attribute. Note that the 'keys_*' and 'values_*' attributes\n must have the same length. If an input element can not be found in the\n specified 'keys_*' attribute, the 'default_*' that matches the specified\n 'values_*' attribute may be used as its output value. The type of the 'default_*'\n attribute must match the 'values_*' attribute chosen.
\n Let's consider an example which maps a string tensor to an integer tensor.\n Assume and 'keys_strings' is [\"Amy\", \"Sally\"], 'values_int64s' is [5, 6],\n and 'default_int64' is '-1'. The input [\"Dori\", \"Amy\", \"Amy\", \"Sally\",\n \"Sally\"] would be mapped to [-1, 5, 5, 6, 6].
\n Since this operator is an one-to-one mapping, its input and output shapes\n are the same. Notice that only one of 'keys_*'/'values_*' can be set.
\n Float keys with value 'NaN' match any input 'NaN' value regardless of bit\n value. If a key is repeated, the last key takes precedence.\n", + "attributes": [ + { + "name": "default_float", + "type": "float32", + "required": false, + "description": "A float." + }, + { + "name": "default_int64", + "type": "int64", + "required": false, + "default": -1, + "description": "An integer." + }, + { + "name": "default_string", + "type": "string", + "required": false, + "default": "_Unused", + "description": "A string." + }, + { + "name": "default_tensor", + "type": "tensor", + "required": false, + "description": "A default tensor. {\"_Unused\"} if values_* has string type, {-1} if values_* has integral type, and {-0.f} if values_* has float type." + }, + { + "name": "keys_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "keys_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "keys_strings", + "type": "string[]", + "required": false, + "description": "A list of strings." + }, + { + "name": "keys_tensor", + "type": "tensor", + "required": false, + "description": "Keys encoded as a 1D tensor. One and only one of 'keys_*'s should be set." + }, + { + "name": "values_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "values_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "values_strings", + "type": "string[]", + "required": false, + "description": "A list of strings." + }, + { + "name": "values_tensor", + "type": "tensor", + "required": false, + "description": "Values encoded as a 1D tensor. One and only one of 'values_*'s should be set." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data. It must have the same element type as the keys_* attribute set." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Output data. This tensor's element type is based on the values_* attribute set." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type is a tensor of any shape.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)", + "tensor(int32)", + "tensor(int16)", + "tensor(double)" + ] + }, + { + "description": "Output type is determined by the specified 'values_*' attribute.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)", + "tensor(int32)", + "tensor(int16)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } + ] + }, + { + "name": "LayerNormalization", + "module": "ai.onnx", + "version": 17, + "description": "This is layer normalization defined in ONNX as function.\n The overall computation can be split into two stages.\n The first stage is standardization, which makes the\n normalized elements have zero mean and unit variances.\n The computation required by standardization can be\n described by the following equations.\n ```\n Mean = ReduceMean(X)\n D = Sub(X, Mean)\n DD = Mul(D, D)\n Var = ReduceMean(DD)\n VarEps = Add(Var, epsilon)\n StdDev = Sqrt(VarEps)\n InvStdDev = Reciprocal(StdDev)\n Normalized = Mul(D, InvStdDev)\n ```\n where `normalized_axes` is `[axis, ..., rank of X - 1]`.\n The variables `Var` and `StdDev` stand for variance and\n standard deviation, respectively. The second output is\n `Mean` and the last one is `InvStdDev`.\n Depending on `stash_type` attribute, the actual computation\n must happen in different floating-point precision.\n For example, if `stash_type` is 1, this operator casts\n all input variables to 32-bit float, perform the computation, and\n finally cast `Normalized` back to the original type of `X`.\n The second stage then scales and shifts the outcome of the\n first stage using\n ```\n NormalizedScaled = Mul(Normalized, Scale)\n Y = Add(NormalizedScaled, B)\n ```\n The second stage doesn't depends on `stash_type`.\n All equations are in [this syntax](https://github.com/onnx/onnx/blob/main/docs/Syntax.md).\n The same variable (i.e., input, output, and attribute) uses\n the same name in the equations above and this operator's definition.\n Let `d[i]` indicate the i-th dimension of `X`.\n If `X`'s shape is `[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]`,\n the shape of `Mean` and `InvStdDev` is `[d[0], ..., d[axis-1], 1, ..., 1]`.\n `Y` and `X` have the same shape. This operator supports unidirectional broadcasting\n (tensors `Scale` and `B` should be unidirectional broadcastable to tensor `X`);\n for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "The first normalization dimension. If rank(X) is r, axis' allowed range is [-r, r). Negative value means counting dimensions from the back." + }, + { + "name": "epsilon", + "type": "float32", + "required": false, + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero." + }, + { + "name": "stash_type", + "type": "int64", + "required": false, + "default": 1, + "description": "Type of Mean and InvStdDev. This also specifies stage one's computation precision." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Tensor to be normalized." + }, + { + "name": "Scale", + "type": "T", + "description": "Scale tensor." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "Bias tensor." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Normalized tensor." + }, + { + "name": "Mean", + "type": "U", + "option": "optional", + "description": "Saved mean used during training to speed up gradient computation" + }, + { + "name": "InvStdDev", + "type": "U", + "option": "optional", + "description": "Saved inverse standard deviation used during training to speed up gradient computation." + } + ], + "min_output": 1, + "max_output": 3, + "inputs_range": "2 - 3", + "outputs_range": "1 - 3", + "type_constraints": [ + { + "description": "Constrain input types and output Y type to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Type of Mean and InvStdDev tensors.", + "type_param_str": "U", + "allowed_type_strs": [ + "tensor(float)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "d", + "code": "X = np.random.randn(3, 4).astype(np.float32)\n\ndef case(axis: int) -> None:\n normalized_shape = calculate_normalized_shape(X.shape, axis)\n W = np.random.randn(*normalized_shape).astype(np.float32)\n B = np.random.randn(*normalized_shape).astype(np.float32)\n Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis)\n\n node = onnx.helper.make_node(\n \"LayerNormalization\",\n inputs=[\"X\", \"W\", \"B\"],\n outputs=[\"Y\", \"Mean\", \"InvStdDev\"],\n axis=axis,\n )\n\n if axis < 0:\n name = f\"test_layer_normalization_2d_axis_negative_{-axis}\"\n else:\n name = f\"test_layer_normalization_2d_axis{axis}\"\n\n expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)\n\nfor i in range(len(X.shape)):\n case(i)\n case(i - len(X.shape))" + }, + { + "summary": "d_epsilon", + "code": "epsilon = 1e-1\nX = np.random.randn(2, 3, 5).astype(np.float32)\n\ndef case(axis: int) -> None:\n normalized_shape = calculate_normalized_shape(X.shape, axis)\n W = np.random.randn(*normalized_shape).astype(np.float32)\n B = np.random.randn(*normalized_shape).astype(np.float32)\n Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon)\n node = onnx.helper.make_node(\n \"LayerNormalization\",\n inputs=[\"X\", \"W\", \"B\"],\n outputs=[\"Y\", \"Mean\", \"InvStdDev\"],\n axis=axis,\n epsilon=epsilon,\n )\n\n if axis < 0:\n name = f\"test_layer_normalization_3d_axis_negative_{-axis}_epsilon\"\n else:\n name = f\"test_layer_normalization_3d_axis{axis}_epsilon\"\n\n expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)\n\nfor i in range(len(X.shape)):\n case(i)\n case(i - len(X.shape))" + }, + { + "summary": "default_axis", + "code": "X = np.random.randn(2, 3, 4, 5).astype(np.float32)\n\n# Default axis in LayerNormalization is -1.\nnormalized_shape = calculate_normalized_shape(X.shape, -1)\nW = np.random.randn(*normalized_shape).astype(np.float32)\nB = np.random.randn(*normalized_shape).astype(np.float32)\n# Axis is default to -1 in the reference implementation.\nY, mean, inv_std_dev = _layer_normalization(X, W, B)\n\n# Not specifying axis attribute means -1.\nnode = onnx.helper.make_node(\n \"LayerNormalization\",\n inputs=[\"X\", \"W\", \"B\"],\n outputs=[\"Y\", \"Mean\", \"InvStdDev\"],\n)\n\nexpect(\n node,\n inputs=[X, W, B],\n outputs=[Y, mean, inv_std_dev],\n name=\"test_layer_normalization_default_axis\",\n)" + }, + { + "summary": "layernormalization", + "code": "X = np.random.randn(2, 3, 4, 5).astype(np.float32)\n\ndef case(axis: int) -> None:\n normalized_shape = calculate_normalized_shape(X.shape, axis)\n W = np.random.randn(*normalized_shape).astype(np.float32)\n B = np.random.randn(*normalized_shape).astype(np.float32)\n Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis)\n\n node = onnx.helper.make_node(\n \"LayerNormalization\",\n inputs=[\"X\", \"W\", \"B\"],\n outputs=[\"Y\", \"Mean\", \"InvStdDev\"],\n axis=axis,\n )\n\n if axis < 0:\n name = f\"test_layer_normalization_4d_axis_negative_{-axis}\"\n else:\n name = f\"test_layer_normalization_4d_axis{axis}\"\n\n expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)\n\nfor i in range(len(X.shape)):\n case(i)\n case(i - len(X.shape))" + } + ] + }, + { + "name": "LeakyRelu", + "module": "ai.onnx", + "version": 1, + "description": "LeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.009999999776482582, + "description": "Coefficient of leakage default to 0.01." + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "leakyrelu", + "code": "node = onnx.helper.make_node(\n \"LeakyRelu\", inputs=[\"x\"], outputs=[\"y\"], alpha=0.1\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.1, 0., 1.]\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu\")" + }, + { + "summary": "leakyrelu_default", + "code": "default_alpha = 0.01\nnode = onnx.helper.make_node(\n \"LeakyRelu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "LeakyRelu", + "module": "ai.onnx", + "version": 6, + "description": "LeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.009999999776482582, + "description": "Coefficient of leakage." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "leakyrelu", + "code": "node = onnx.helper.make_node(\n \"LeakyRelu\", inputs=[\"x\"], outputs=[\"y\"], alpha=0.1\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.1, 0., 1.]\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu\")" + }, + { + "summary": "leakyrelu_default", + "code": "default_alpha = 0.01\nnode = onnx.helper.make_node(\n \"LeakyRelu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "LeakyRelu", + "module": "ai.onnx", + "version": 16, + "description": "LeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 0.009999999776482582, + "description": "Coefficient of leakage." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "leakyrelu", + "code": "node = onnx.helper.make_node(\n \"LeakyRelu\", inputs=[\"x\"], outputs=[\"y\"], alpha=0.1\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.1, 0., 1.]\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu\")" + }, + { + "summary": "leakyrelu_default", + "code": "default_alpha = 0.01\nnode = onnx.helper.make_node(\n \"LeakyRelu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha\nexpect(node, inputs=[x], outputs=[y], name=\"test_leakyrelu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "Less", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less\")" + }, + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_bcast\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal_bcast\")" + } + ] + }, + { + "name": "Less", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less\")" + }, + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_bcast\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal_bcast\")" + } + ] + }, + { + "name": "Less", + "module": "ai.onnx", + "version": 9, + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less\")" + }, + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_bcast\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal_bcast\")" + } + ] + }, + { + "name": "Less", + "module": "ai.onnx", + "version": 13, + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less\")" + }, + { + "summary": "less", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"Less\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_bcast\")" + }, + { + "summary": "less_broadcast", + "code": "node = onnx.helper.make_node(\n \"LessOrEqual\",\n inputs=[\"x\", \"y\"],\n outputs=[\"less_equal\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_less_equal_bcast\")" + } + ] + }, + { + "name": "LessOrEqual", + "module": "ai.onnx", + "version": 12, + "description": "Returns the tensor resulted from performing the `less_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ] + }, + { + "name": "LessOrEqual", + "module": "ai.onnx", + "version": 16, + "description": "Returns the tensor resulted from performing the `less_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ] + }, + { + "name": "LinearClassifier", + "module": "ai.onnx.ml", + "version": 1, + "description": "Linear classifier\n", + "attributes": [ + { + "name": "classlabels_ints", + "type": "int64[]", + "required": false, + "description": "Class labels when using integer labels. One and only one 'classlabels' attribute must be defined." + }, + { + "name": "classlabels_strings", + "type": "string[]", + "required": false, + "description": "Class labels when using string labels. One and only one 'classlabels' attribute must be defined." + }, + { + "name": "coefficients", + "type": "float32[]", + "required": true, + "description": "A collection of weights of the model(s)." + }, + { + "name": "intercepts", + "type": "float32[]", + "required": false, + "description": "A collection of intercepts." + }, + { + "name": "multi_class", + "type": "int64", + "required": false, + "description": "Indicates whether to do OvR or multinomial (0=OvR is the default)." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the scores vector.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'" + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Data to be classified." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Classification outputs (one class per example)." + }, + { + "name": "Z", + "type": "tensor(float)", + "description": "Classification scores ([N,E] - one score for each class and example" + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + }, + { + "description": "The output will be a tensor of strings or integers.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "LinearRegressor", + "module": "ai.onnx.ml", + "version": 1, + "description": "Generalized linear regression evaluation.
\n If targets is set to 1 (default) then univariate regression is performed.
\n If targets is set to M then M sets of coefficients must be passed in as a sequence\n and M results will be output for each input n in N.
\n The coefficients array is of length n, and the coefficients for each target are contiguous.\n Intercepts are optional but if provided must match the number of targets.\n", + "attributes": [ + { + "name": "coefficients", + "type": "float32[]", + "required": false, + "description": "Weights of the model(s)." + }, + { + "name": "intercepts", + "type": "float32[]", + "required": false, + "description": "Weights of the intercepts, if used." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the regression output vector.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'" + }, + { + "name": "targets", + "type": "int64", + "required": false, + "default": 1, + "description": "The total number of regression targets, 1 if not defined." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be regressed." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "Regression outputs (one per target, per example)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "Log", + "module": "ai.onnx", + "version": 1, + "description": "Calculates the natural log of the given input tensor, element-wise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The natural log of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "log", + "code": "node = onnx.helper.make_node(\n \"Log\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 10]).astype(np.float32)\ny = np.log(x) # expected output [0., 2.30258512]\nexpect(node, inputs=[x], outputs=[y], name=\"test_log_example\")\n\nx = np.exp(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.log(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_log\")" + } + ] + }, + { + "name": "Log", + "module": "ai.onnx", + "version": 6, + "description": "Calculates the natural log of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The natural log of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "log", + "code": "node = onnx.helper.make_node(\n \"Log\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 10]).astype(np.float32)\ny = np.log(x) # expected output [0., 2.30258512]\nexpect(node, inputs=[x], outputs=[y], name=\"test_log_example\")\n\nx = np.exp(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.log(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_log\")" + } + ] + }, + { + "name": "Log", + "module": "ai.onnx", + "version": 13, + "description": "Calculates the natural log of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The natural log of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "log", + "code": "node = onnx.helper.make_node(\n \"Log\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 10]).astype(np.float32)\ny = np.log(x) # expected output [0., 2.30258512]\nexpect(node, inputs=[x], outputs=[y], name=\"test_log_example\")\n\nx = np.exp(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.log(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_log\")" + } + ] + }, + { + "name": "LogSoftmax", + "module": "ai.onnx", + "version": 1, + "description": "The operator computes the logsoftmax (log of softmax) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the logsoftmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "logsoftmax", + "code": "node = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output\n# [[-2.4076061 -1.407606 -0.407606 ]]\ny = logsoftmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_example_1\")" + }, + { + "summary": "logsoftmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[-3.4401896 -2.4401896 -1.4401896 -0.44018966]\n# [-3.4401896 -2.4401896 -1.4401896 -0.44018966]]\ny = logsoftmax(x)\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = logsoftmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = logsoftmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = logsoftmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = logsoftmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "LogSoftmax", + "module": "ai.onnx", + "version": 11, + "description": "The operator computes the logsoftmax (log of softmax) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the logsoftmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "logsoftmax", + "code": "node = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output\n# [[-2.4076061 -1.407606 -0.407606 ]]\ny = logsoftmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_example_1\")" + }, + { + "summary": "logsoftmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[-3.4401896 -2.4401896 -1.4401896 -0.44018966]\n# [-3.4401896 -2.4401896 -1.4401896 -0.44018966]]\ny = logsoftmax(x)\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = logsoftmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = logsoftmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = logsoftmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = logsoftmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "LogSoftmax", + "module": "ai.onnx", + "version": 13, + "description": "The operator computes the log of softmax values for the given input:\n\n LogSoftmax(input, axis) = Log(Softmax(input, axis=axis))\n\nThe \"axis\" attribute indicates the dimension along which LogSoftmax\nwill be performed. The output tensor has the same shape\nand contains the LogSoftmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "\nDescribes the dimension LogSoftmax will be performed on.\nNegative value means counting dimensions\nfrom the back. Accepted range is [-r, r-1] where r = rank(input).\n" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as the input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "logsoftmax", + "code": "node = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output\n# [[-2.4076061 -1.407606 -0.407606 ]]\ny = logsoftmax(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_example_1\")" + }, + { + "summary": "logsoftmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[-3.4401896 -2.4401896 -1.4401896 -0.44018966]\n# [-3.4401896 -2.4401896 -1.4401896 -0.44018966]]\ny = logsoftmax(x)\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = logsoftmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = logsoftmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = logsoftmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = logsoftmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"LogSoftmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_logsoftmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 1, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\n Operator inputs defined as (max_trip_count, condition_var).\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar]\n %keepgoing[BOOL, scalar]\n %b[INT32, scalar]\n ) {\n %my_local = Add(%a, %b)\n %b_out = Sub(%a, %b)\n %keepgoing_out = Greater(%my_local, %b_out)\n %user_defined_vals = Add(%b, %b)\n return %keepgoing_out, %b_out, %user_defined_vals\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n for (int i=0; i < max_trip_count && keepgoing; ++i) {\n /* User-defined code (loop body) */\n int my_local = a + b; // Reading values in the enclosing scope is fine\n b = a - b; // writes fine if we specify b as a loop-carried dependency\n keepgoing = my_local > b; // keepgoing is a loop-carried dependency\n user_defined_vals[i] = b + b;\n /* End user-defined code */\n }\n // my_local = 123; // Can't do this. my_local was defined in the body\n\n // These below values are live-out from the loop and therefore accessible\n b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable a here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any variables which you wish to make available in the enclosing scope (i.e.\n the variables b and keepgoing) must be declared as either loop-carried\n dependencies (both at the op inputs and output and at the body net input and\n output) or scan_outputs.\n3) Values created in the body cannot be accessed in the enclosing scope.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 3, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "3 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 11, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\n Operator inputs defined as (max_trip_count, condition_var).\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out;\n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out;\n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 13, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\n Operator inputs defined as (max_trip_count, condition_var).\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out;\n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out;\n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n\nThe input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor and Sequence types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 16, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\nOperator inputs defined as (max_trip_count, condition_var).\n\n* input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n* input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n* input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out;\n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out;\n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n\nThe input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv4.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 19, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\nOperator inputs defined as (max_trip_count, condition_var).\n\n* input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n* input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n* input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out;\n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out;\n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n\nThe input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv9.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "seq(tensor(float8e4m3fn))", + "seq(tensor(float8e4m3fnuz))", + "seq(tensor(float8e5m2))", + "seq(tensor(float8e5m2fnuz))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "optional(tensor(float8e4m3fn))", + "optional(tensor(float8e4m3fnuz))", + "optional(tensor(float8e5m2))", + "optional(tensor(float8e5m2fnuz))" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "Loop", + "module": "ai.onnx", + "version": 21, + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\nOperator inputs defined as (max_trip_count, condition_var).\n\n* input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n* input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n* input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n* input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out;\n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out;\n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n\nThe input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations." + } + ], + "inputs": [ + { + "name": "M", + "type": "I", + "option": "optional", + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip." + }, + { + "name": "cond", + "type": "B", + "option": "optional", + "description": "A boolean termination condition. Optional. Pass empty string to skip." + }, + { + "name": "v_initial", + "type": "V", + "list": true, + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "v_final_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv10.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(bfloat16))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))", + "seq(tensor(float8e4m3fn))", + "seq(tensor(float8e4m3fnuz))", + "seq(tensor(float8e5m2))", + "seq(tensor(float8e5m2fnuz))", + "seq(tensor(uint4))", + "seq(tensor(int4))", + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(bfloat16)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(bfloat16))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "optional(tensor(float8e4m3fn))", + "optional(tensor(float8e4m3fnuz))", + "optional(tensor(float8e5m2))", + "optional(tensor(float8e5m2fnuz))", + "optional(tensor(uint4))", + "optional(tensor(int4))" + ] + }, + { + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "loop_11", + "code": "# Given a tensor x of values [x1, ..., xN], and initial tensor y\n# sum up its elements using a scan\n# returning the final state (y+x1+x2+...+xN) as well the scan_output\n# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]\n\ny_in = onnx.helper.make_tensor_value_info(\"y_in\", onnx.TensorProto.FLOAT, [1])\ny_out = onnx.helper.make_tensor_value_info(\"y_out\", onnx.TensorProto.FLOAT, [1])\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [1]\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\ny = np.array([-2]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\ni_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nstart_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"iter_count\"], outputs=[\"slice_start\"], axes=[0]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\"], outputs=[\"slice_end\"], axes=[0]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ny_add_node = onnx.helper.make_node(\n \"Add\", inputs=[\"y_in\", \"slice_out\"], outputs=[\"y_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nscan_identity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n i_add_node,\n start_unsqueeze_node,\n end_unsqueeze_node,\n slice_node,\n y_add_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"y\"],\n outputs=[\"res_y\", \"res_scan\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nres_y = np.array([13]).astype(np.float32)\ncond = np.array(1).astype(bool)\nres_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))\nexpect(\n node,\n inputs=[trip_count, cond, y],\n outputs=[res_y, res_scan],\n name=\"test_loop11\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)],\n)" + }, + { + "summary": "loop_13", + "code": "# Given a tensor x of values [x1, ..., xN],\n# Return a sequence of tensors of\n# [[x1], [x1, x2], ..., [x1, ..., xN]]\n\nseq_in = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, None\n)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, None\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"seq_in\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, seq_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"seq_empty\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\nseq_empty: List[Any] = []\nseq_res = [x[: int(i)] for i in x]\ncond = np.array(1).astype(bool)\nexpect(\n node,\n inputs=[trip_count, cond, seq_empty],\n outputs=[seq_res],\n name=\"test_loop13_seq\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\n ),\n ],\n)" + }, + { + "summary": "loop_16_none", + "code": "# Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],\n# Return a concatenated sequence of tensors of\n# [x0, [x1], [x1, x2], ..., [x1, ..., xN]]\n\nten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])\nseq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)\nopt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)\nopt_in = onnx.helper.make_value_info(\"opt_seq_in\", opt_in_tp)\nseq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_out\", onnx.TensorProto.FLOAT, []\n)\ncond_in = onnx.helper.make_tensor_value_info(\n \"cond_in\", onnx.TensorProto.BOOL, []\n)\ncond_out = onnx.helper.make_tensor_value_info(\n \"cond_out\", onnx.TensorProto.BOOL, []\n)\niter_count = onnx.helper.make_tensor_value_info(\n \"iter_count\", onnx.TensorProto.INT64, []\n)\n\nx0 = np.array(0).astype(np.float32)\nx = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n\noptional_has_elem_node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"opt_seq_in\"], outputs=[\"optional_has_elem\"]\n)\n\noptional_is_none = onnx.helper.make_node(\n \"Not\", inputs=[\"optional_has_elem\"], outputs=[\"optional_is_none\"]\n)\n\noptional_get_elem = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"opt_seq_in\"], outputs=[\"seq_in\"]\n)\n\nconstant_in = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"constant_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]\n ),\n)\n\nseq_const_in = onnx.helper.make_node(\n \"SequenceConstruct\", inputs=[\"constant_in\"], outputs=[\"init_seq_in\"]\n)\n\nthen_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"init_seq_in\", onnx.TensorProto.FLOAT, []\n)\nthen_body = onnx.helper.make_graph(\n [constant_in, seq_const_in], \"then_body\", [], [then_seq_out]\n)\n\nelse_seq_out = onnx.helper.make_tensor_sequence_value_info(\n \"seq_in\", onnx.TensorProto.FLOAT, []\n)\nelse_body = onnx.helper.make_graph(\n [optional_get_elem], \"else_body\", [], [else_seq_out]\n)\n\nif_node = onnx.helper.make_node(\n \"If\",\n inputs=[\"optional_is_none\"],\n outputs=[\"sequence\"],\n then_branch=then_body,\n else_branch=else_body,\n)\n\nx_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"x\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_x\",\n data_type=onnx.TensorProto.FLOAT,\n dims=x.shape,\n vals=x.flatten().astype(float),\n ),\n)\n\none_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"one\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_one\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[1],\n ),\n)\n\nzero_const_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"slice_start\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_zero\",\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0],\n ),\n)\n\naxes_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"axes\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor_axes\",\n data_type=onnx.TensorProto.INT64,\n dims=(),\n vals=[0],\n ),\n)\n\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"iter_count\", \"one\"], outputs=[\"end\"]\n)\n\nend_unsqueeze_node = onnx.helper.make_node(\n \"Unsqueeze\", inputs=[\"end\", \"axes\"], outputs=[\"slice_end\"]\n)\n\nslice_node = onnx.helper.make_node(\n \"Slice\", inputs=[\"x\", \"slice_start\", \"slice_end\"], outputs=[\"slice_out\"]\n)\n\ninsert_node = onnx.helper.make_node(\n \"SequenceInsert\", inputs=[\"sequence\", \"slice_out\"], outputs=[\"seq_out\"]\n)\n\nidentity_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"]\n)\n\nloop_body = onnx.helper.make_graph(\n [\n identity_node,\n optional_has_elem_node,\n optional_is_none,\n if_node,\n x_const_node,\n one_const_node,\n zero_const_node,\n add_node,\n axes_node,\n end_unsqueeze_node,\n slice_node,\n insert_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, opt_in],\n [cond_out, seq_out],\n)\n\nnode = onnx.helper.make_node(\n \"Loop\",\n inputs=[\"trip_count\", \"cond\", \"opt_seq\"],\n outputs=[\"seq_res\"],\n body=loop_body,\n)\n\ntrip_count = np.array(5).astype(np.int64)\ncond = np.array(1).astype(bool)\nseq_res = compute_loop_outputs(x, [x0], trip_count)\nopt_seq_in: List[Any] = [x0]\nexpect(\n node,\n inputs=[trip_count, cond, opt_seq_in],\n outputs=[seq_res],\n name=\"test_loop16_seq_none\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 16)],\n input_type_protos=[\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.INT64, trip_count.shape\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),\n opt_in_tp,\n ],\n)" + } + ] + }, + { + "name": "LpNormalization", + "module": "ai.onnx", + "version": 1, + "description": "Given a matrix, apply Lp-normalization along the provided axis.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "The axis on which to apply normalization, -1 mean last axis." + }, + { + "name": "p", + "type": "int64", + "required": false, + "default": 2, + "description": "The order of the normalization, only 1 or 2 are supported." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input matrix" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Matrix after normalization" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "category": "Normalization" + }, + { + "name": "LpPool", + "module": "ai.onnx", + "version": 1, + "description": "LpPool consumes an input tensor X and applies Lp pooling across the\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. DEPRECATION NOTE: auto_pad is only intended to support legacy uses, and for framework authors, one is explicitly encouraged to use explicit padding specified in the pads attribute." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The size of the kernel along each axis." + }, + { + "name": "p", + "type": "float32", + "required": false, + "default": 2.0, + "description": "p value of the Lp norm used to pool over the input data, default is 2.0." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "lppool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\np = 3\nkernel_shape = [2]\nstrides = [1]\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n p=p,\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_1d_default\")" + }, + { + "summary": "lppool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_default\")" + }, + { + "summary": "lppool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n p=p,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [14.560219778561036, 16.24807680927192],\n [21.633307652783937, 23.49468024894146],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_dilations\")" + }, + { + "summary": "lppool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_pads\")" + }, + { + "summary": "lppool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_lower\")" + }, + { + "summary": "lppool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_upper\")" + }, + { + "summary": "lppool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_strides\")" + }, + { + "summary": "lppool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_3d_default\")" + } + ], + "category": "Pool" + }, + { + "name": "LpPool", + "module": "ai.onnx", + "version": 2, + "description": "LpPool consumes an input tensor X and applies Lp pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "p", + "type": "int64", + "required": false, + "default": 2, + "description": "p value of the Lp norm used to pool over the input data." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "lppool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\np = 3\nkernel_shape = [2]\nstrides = [1]\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n p=p,\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_1d_default\")" + }, + { + "summary": "lppool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_default\")" + }, + { + "summary": "lppool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n p=p,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [14.560219778561036, 16.24807680927192],\n [21.633307652783937, 23.49468024894146],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_dilations\")" + }, + { + "summary": "lppool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_pads\")" + }, + { + "summary": "lppool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_lower\")" + }, + { + "summary": "lppool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_upper\")" + }, + { + "summary": "lppool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_strides\")" + }, + { + "summary": "lppool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_3d_default\")" + } + ], + "category": "Pool" + }, + { + "name": "LpPool", + "module": "ai.onnx", + "version": 11, + "description": "LpPool consumes an input tensor X and applies Lp pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "p", + "type": "int64", + "required": false, + "default": 2, + "description": "p value of the Lp norm used to pool over the input data." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "lppool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\np = 3\nkernel_shape = [2]\nstrides = [1]\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n p=p,\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_1d_default\")" + }, + { + "summary": "lppool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_default\")" + }, + { + "summary": "lppool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n p=p,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [14.560219778561036, 16.24807680927192],\n [21.633307652783937, 23.49468024894146],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_dilations\")" + }, + { + "summary": "lppool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_pads\")" + }, + { + "summary": "lppool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_lower\")" + }, + { + "summary": "lppool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_upper\")" + }, + { + "summary": "lppool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_strides\")" + }, + { + "summary": "lppool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_3d_default\")" + } + ], + "category": "Pool" + }, + { + "name": "LpPool", + "module": "ai.onnx", + "version": 18, + "description": "LpPool consumes an input tensor X and applies Lp pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled `pad_shape[i]` is the sum of pads along axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i]\n ```", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults is 1 along each spatial axis." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "p", + "type": "int64", + "required": false, + "default": 2, + "description": "p value of the Lp norm used to pool over the input data." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "lppool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\np = 3\nkernel_shape = [2]\nstrides = [1]\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n p=p,\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_1d_default\")" + }, + { + "summary": "lppool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_default\")" + }, + { + "summary": "lppool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n p=p,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\n\ny = np.array(\n [\n [\n [\n [14.560219778561036, 16.24807680927192],\n [21.633307652783937, 23.49468024894146],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_dilations\")" + }, + { + "summary": "lppool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_pads\")" + }, + { + "summary": "lppool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\np = 4\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_lower\")" + }, + { + "summary": "lppool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=0,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", pads, p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_same_upper\")" + }, + { + "summary": "lppool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\np = 2\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n strides=[3, 3],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_2d_strides\")" + }, + { + "summary": "lppool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\np = 3\nnode = onnx.helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n p=p,\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"LPPOOL\", p=p)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_lppool_3d_default\")" + } + ], + "category": "Pool" + }, + { + "name": "MatMul", + "module": "ai.onnx", + "version": 1, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "N-dimensional matrix A" + }, + { + "name": "B", + "type": "T", + "description": "N-dimensional matrix B" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Matrix multiply results from A * B" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "matmul", + "code": "node = onnx.helper.make_node(\n \"MatMul\",\n inputs=[\"a\", \"b\"],\n outputs=[\"c\"],\n)\n\n# 2d\na = np.random.randn(3, 4).astype(np.float32)\nb = np.random.randn(4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_2d\")\n\n# 3d\na = np.random.randn(2, 3, 4).astype(np.float32)\nb = np.random.randn(2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_3d\")\n\n# 4d\na = np.random.randn(1, 2, 3, 4).astype(np.float32)\nb = np.random.randn(1, 2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_4d\")" + } + ] + }, + { + "name": "MatMul", + "module": "ai.onnx", + "version": 9, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "N-dimensional matrix A" + }, + { + "name": "B", + "type": "T", + "description": "N-dimensional matrix B" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Matrix multiply results from A * B" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "matmul", + "code": "node = onnx.helper.make_node(\n \"MatMul\",\n inputs=[\"a\", \"b\"],\n outputs=[\"c\"],\n)\n\n# 2d\na = np.random.randn(3, 4).astype(np.float32)\nb = np.random.randn(4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_2d\")\n\n# 3d\na = np.random.randn(2, 3, 4).astype(np.float32)\nb = np.random.randn(2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_3d\")\n\n# 4d\na = np.random.randn(1, 2, 3, 4).astype(np.float32)\nb = np.random.randn(1, 2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_4d\")" + } + ] + }, + { + "name": "MatMul", + "module": "ai.onnx", + "version": 13, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "N-dimensional matrix A" + }, + { + "name": "B", + "type": "T", + "description": "N-dimensional matrix B" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Matrix multiply results from A * B" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "matmul", + "code": "node = onnx.helper.make_node(\n \"MatMul\",\n inputs=[\"a\", \"b\"],\n outputs=[\"c\"],\n)\n\n# 2d\na = np.random.randn(3, 4).astype(np.float32)\nb = np.random.randn(4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_2d\")\n\n# 3d\na = np.random.randn(2, 3, 4).astype(np.float32)\nb = np.random.randn(2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_3d\")\n\n# 4d\na = np.random.randn(1, 2, 3, 4).astype(np.float32)\nb = np.random.randn(1, 2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c], name=\"test_matmul_4d\")" + } + ] + }, + { + "name": "MatMulInteger", + "module": "ai.onnx", + "version": 10, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nThe production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n", + "inputs": [ + { + "name": "A", + "type": "T1", + "description": "N-dimensional matrix A" + }, + { + "name": "B", + "type": "T2", + "description": "N-dimensional matrix B" + }, + { + "name": "a_zero_point", + "type": "T1", + "option": "optional", + "description": "Zero point tensor for input 'A'. It's optional and default value is 0. It could be a scalar or N-D tensor. Scalar refers to per tensor quantization whereas N-D refers to per row quantization. If the input is 2D of shape [M, K] then zero point tensor may be an M element vector [zp_1, zp_2, ..., zp_M]. If the input is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have shape [D1, D2, M, 1]. " + }, + { + "name": "b_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point tensor for input 'B'. It's optional and default value is 0. It could be a scalar or a N-D tensor, Scalar refers to per tensor quantization whereas N-D refers to per col quantization. If the input is 2D of shape [K, N] then zero point tensor may be an N element vector [zp_1, zp_2, ..., zp_N]. If the input is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have shape [D1, D2, 1, N]. " + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "Y", + "type": "T3", + "description": "Matrix multiply results from A * B" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain input A data type to 8-bit integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain input B data type to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain output Y data type as 32-bit integer tensor.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "matmulinteger", + "code": "node = onnx.helper.make_node(\n \"MatMulInteger\",\n inputs=[\"A\", \"B\", \"a_zero_point\", \"b_zero_point\"],\n outputs=[\"Y\"],\n)\n\nA = np.array(\n [\n [11, 7, 3],\n [10, 6, 2],\n [9, 5, 1],\n [8, 4, 0],\n ],\n dtype=np.uint8,\n)\n\na_zero_point = np.array([12], dtype=np.uint8)\n\nB = np.array(\n [\n [1, 4],\n [2, 5],\n [3, 6],\n ],\n dtype=np.uint8,\n)\n\nb_zero_point = np.array([0], dtype=np.uint8)\n\noutput = np.array(\n [\n [-38, -83],\n [-44, -98],\n [-50, -113],\n [-56, -128],\n ],\n dtype=np.int32,\n)\n\nexpect(\n node,\n inputs=[A, B, a_zero_point, b_zero_point],\n outputs=[output],\n name=\"test_matmulinteger\",\n)" + } + ] + }, + { + "name": "Max", + "module": "ai.onnx", + "version": 1, + "description": "Element-wise max of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Max." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "max", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "max", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_max_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_max_one_input\")\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_max_two_inputs\"\n)" + }, + { + "summary": "max_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_max_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Max", + "module": "ai.onnx", + "version": 6, + "description": "Element-wise max of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Max." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "max", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "max", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_max_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_max_one_input\")\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_max_two_inputs\"\n)" + }, + { + "summary": "max_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_max_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Max", + "module": "ai.onnx", + "version": 8, + "description": "Element-wise max of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for max." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "max", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "max", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_max_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_max_one_input\")\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_max_two_inputs\"\n)" + }, + { + "summary": "max_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_max_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Max", + "module": "ai.onnx", + "version": 12, + "description": "Element-wise max of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for max." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "max", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "max", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_max_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_max_one_input\")\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_max_two_inputs\"\n)" + }, + { + "summary": "max_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_max_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Max", + "module": "ai.onnx", + "version": 13, + "description": "Element-wise max of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for max." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "max", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "max", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_max_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_max_one_input\")\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_max_two_inputs\"\n)" + }, + { + "summary": "max_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Max\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_max_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "MaxPool", + "module": "ai.onnx", + "version": 1, + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "maxpool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_1d_default\")" + }, + { + "summary": "maxpool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_ceil\")" + }, + { + "summary": "maxpool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_default\")" + }, + { + "summary": "maxpool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_dilations\")" + }, + { + "summary": "maxpool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\n\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_same_upper\"\n)" + }, + { + "summary": "maxpool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[2, 2], strides=[2, 2]\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_strides\"\n)" + }, + { + "summary": "maxpool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_lower\")" + }, + { + "summary": "maxpool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_upper\")" + }, + { + "summary": "maxpool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[5, 5], strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_strides\")" + }, + { + "summary": "maxpool_2d_uint8", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.uint8)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_uint8\")" + }, + { + "summary": "maxpool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_default\")" + }, + { + "summary": "maxpool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(\n np.float32\n)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations\")" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\ndilations = [2, 2, 2]\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nceil_mode = False\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=dilations,\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nx_shape = x.shape[2:]\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = x\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations_use_ref_impl\"\n)" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\nceil_mode = True\n\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n)\n\nx = np.random.randn(1, 1, *x_shape).astype(np.float32)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_maxpool_3d_dilations_use_ref_impl_large\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\nz = np.array(\n [\n [\n [\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n ]\n ]\n ]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_pads\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_strides\",\n)" + } + ], + "category": "Pool" + }, + { + "name": "MaxPool", + "module": "ai.onnx", + "version": 8, + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "storage_order", + "type": "int64", + "required": false, + "description": "The storage order of the tensor. 0 is row major, and 1 is column major." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + }, + { + "name": "Indices", + "type": "I", + "option": "optional", + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn)." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "maxpool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_1d_default\")" + }, + { + "summary": "maxpool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_ceil\")" + }, + { + "summary": "maxpool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_default\")" + }, + { + "summary": "maxpool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_dilations\")" + }, + { + "summary": "maxpool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\n\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_same_upper\"\n)" + }, + { + "summary": "maxpool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[2, 2], strides=[2, 2]\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_strides\"\n)" + }, + { + "summary": "maxpool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_lower\")" + }, + { + "summary": "maxpool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_upper\")" + }, + { + "summary": "maxpool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[5, 5], strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_strides\")" + }, + { + "summary": "maxpool_2d_uint8", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.uint8)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_uint8\")" + }, + { + "summary": "maxpool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_default\")" + }, + { + "summary": "maxpool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(\n np.float32\n)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations\")" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\ndilations = [2, 2, 2]\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nceil_mode = False\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=dilations,\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nx_shape = x.shape[2:]\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = x\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations_use_ref_impl\"\n)" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\nceil_mode = True\n\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n)\n\nx = np.random.randn(1, 1, *x_shape).astype(np.float32)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_maxpool_3d_dilations_use_ref_impl_large\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\nz = np.array(\n [\n [\n [\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n ]\n ]\n ]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_pads\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_strides\",\n)" + } + ], + "category": "Pool" + }, + { + "name": "MaxPool", + "module": "ai.onnx", + "version": 10, + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "Dilation value along each spatial axis of filter." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "storage_order", + "type": "int64", + "required": false, + "description": "The storage order of the tensor. 0 is row major, and 1 is column major." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + }, + { + "name": "Indices", + "type": "I", + "option": "optional", + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn)." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "maxpool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_1d_default\")" + }, + { + "summary": "maxpool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_ceil\")" + }, + { + "summary": "maxpool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_default\")" + }, + { + "summary": "maxpool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_dilations\")" + }, + { + "summary": "maxpool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\n\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_same_upper\"\n)" + }, + { + "summary": "maxpool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[2, 2], strides=[2, 2]\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_strides\"\n)" + }, + { + "summary": "maxpool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_lower\")" + }, + { + "summary": "maxpool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_upper\")" + }, + { + "summary": "maxpool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[5, 5], strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_strides\")" + }, + { + "summary": "maxpool_2d_uint8", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.uint8)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_uint8\")" + }, + { + "summary": "maxpool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_default\")" + }, + { + "summary": "maxpool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(\n np.float32\n)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations\")" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\ndilations = [2, 2, 2]\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nceil_mode = False\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=dilations,\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nx_shape = x.shape[2:]\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = x\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations_use_ref_impl\"\n)" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\nceil_mode = True\n\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n)\n\nx = np.random.randn(1, 1, *x_shape).astype(np.float32)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_maxpool_3d_dilations_use_ref_impl_large\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\nz = np.array(\n [\n [\n [\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n ]\n ]\n ]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_pads\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_strides\",\n)" + } + ], + "category": "Pool" + }, + { + "name": "MaxPool", + "module": "ai.onnx", + "version": 11, + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "storage_order", + "type": "int64", + "required": false, + "description": "The storage order of the tensor. 0 is row major, and 1 is column major." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + }, + { + "name": "Indices", + "type": "I", + "option": "optional", + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn)." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "maxpool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_1d_default\")" + }, + { + "summary": "maxpool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_ceil\")" + }, + { + "summary": "maxpool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_default\")" + }, + { + "summary": "maxpool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_dilations\")" + }, + { + "summary": "maxpool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\n\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_same_upper\"\n)" + }, + { + "summary": "maxpool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[2, 2], strides=[2, 2]\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_strides\"\n)" + }, + { + "summary": "maxpool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_lower\")" + }, + { + "summary": "maxpool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_upper\")" + }, + { + "summary": "maxpool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[5, 5], strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_strides\")" + }, + { + "summary": "maxpool_2d_uint8", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.uint8)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_uint8\")" + }, + { + "summary": "maxpool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_default\")" + }, + { + "summary": "maxpool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(\n np.float32\n)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations\")" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\ndilations = [2, 2, 2]\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nceil_mode = False\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=dilations,\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nx_shape = x.shape[2:]\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = x\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations_use_ref_impl\"\n)" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\nceil_mode = True\n\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n)\n\nx = np.random.randn(1, 1, *x_shape).astype(np.float32)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_maxpool_3d_dilations_use_ref_impl_large\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\nz = np.array(\n [\n [\n [\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n ]\n ]\n ]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_pads\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_strides\",\n)" + } + ], + "category": "Pool" + }, + { + "name": "MaxPool", + "module": "ai.onnx", + "version": 12, + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape is calculated differently\n depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.\n With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D):\n ```\n VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "ceil_mode", + "type": "int64", + "required": false, + "description": "Whether to use ceil or floor (default) to compute the output shape." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "storage_order", + "type": "int64", + "required": false, + "description": "The storage order of the tensor. 0 is row major, and 1 is column major. This attribute is used only to convert an n-tuple index value into a single integer value for producing the second output. " + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used" + }, + { + "name": "Indices", + "type": "I", + "option": "optional", + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn)." + } + ], + "min_output": 1, + "max_output": 2, + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float and 8 bit tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "maxpool_1d_default", + "code": "\"\"\"input_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2]\nstrides = [1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_1d_default\")" + }, + { + "summary": "maxpool_2d_ceil", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_ceil\")" + }, + { + "summary": "maxpool_2d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_default\")" + }, + { + "summary": "maxpool_2d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_dilations\")" + }, + { + "summary": "maxpool_2d_pads", + "code": "\"\"\"input_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npads = [pad_top, pad_left, pad_bottom, pad_right]\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\n\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_pads\")" + }, + { + "summary": "maxpool_2d_precomputed_same_upper", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_same_upper\"\n)" + }, + { + "summary": "maxpool_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[2, 2], strides=[2, 2]\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_precomputed_strides\"\n)" + }, + { + "summary": "maxpool_2d_same_lower", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_LOWER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_LOWER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_lower\")" + }, + { + "summary": "maxpool_2d_same_upper", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n auto_pad=\"SAME_UPPER\",\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape_auto_pad(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides\n)\npad_shape = get_pad_shape(\n \"SAME_UPPER\", x_shape[2:], kernel_shape, strides, out_shape\n)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(\n x,\n ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),\n mode=\"constant\",\n constant_values=np.nan,\n)\npads = [pad_top, pad_left, pad_bottom, pad_right]\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\", pads)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_same_upper\")" + }, + { + "summary": "maxpool_2d_strides", + "code": "\"\"\"input_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\", inputs=[\"x\"], outputs=[\"y\"], kernel_shape=[5, 5], strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape, pads = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_strides\")" + }, + { + "summary": "maxpool_2d_uint8", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.uint8)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_2d_uint8\")" + }, + { + "summary": "maxpool_3d_default", + "code": "\"\"\"input_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\npads = None\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape, _ = get_output_shape_explicit_padding(\n pads, x_shape[2:], kernel_shape, strides\n)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, \"MAX\")\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_default\")" + }, + { + "summary": "maxpool_3d_dilations", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=[2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(\n np.float32\n)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations\")" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl", + "code": "\"\"\"input_shape: [1, 1, 4, 4, 4]\noutput_shape: [1, 1, 2, 2, 2]\n\"\"\"\ndilations = [2, 2, 2]\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nceil_mode = False\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2, 2],\n strides=[1, 1, 1],\n dilations=dilations,\n)\nx = np.array(\n [\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ],\n ]\n ]\n ]\n).astype(np.float32)\n\nx_shape = x.shape[2:]\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = x\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node, inputs=[x], outputs=[y], name=\"test_maxpool_3d_dilations_use_ref_impl\"\n)" + }, + { + "summary": "maxpool_3d_dilations_use_ref_impl_large", + "code": "x_shape = (32, 32, 32)\ndilations = (2, 2, 2)\nkernel_shape = (5, 5, 5)\nstrides = (3, 3, 3)\nceil_mode = True\n\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n strides=strides,\n dilations=dilations,\n ceil_mode=ceil_mode,\n)\n\nx = np.random.randn(1, 1, *x_shape).astype(np.float32)\nout_shape, pads = get_output_shape_explicit_padding(\n None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode\n)\npadded = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (pads[0], pads[3]),\n (pads[1], pads[4]),\n (pads[2], pads[5]),\n ),\n mode=\"constant\",\n constant_values=0,\n)\ny = pool(\n padded,\n (1, 1, *x_shape),\n kernel_shape,\n strides,\n out_shape,\n \"MAX\",\n pads,\n dilations=dilations,\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_maxpool_3d_dilations_use_ref_impl_large\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_pads", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array(\n [\n [\n [\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n ]\n ]\n ]\n).astype(np.float32)\nz = np.array(\n [\n [\n [\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n ]\n ]\n ]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_pads\",\n)" + }, + { + "summary": "maxpool_with_argmax_2d_precomputed_strides", + "code": "\"\"\"input_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n \"MaxPool\",\n inputs=[\"x\"],\n outputs=[\"y\", \"z\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1,\n)\nx = np.array(\n [\n [\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n ]\n ]\n ]\n).astype(np.float32)\ny = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y, z],\n name=\"test_maxpool_with_argmax_2d_precomputed_strides\",\n)" + } + ], + "category": "Pool" + }, + { + "name": "MaxRoiPool", + "module": "ai.onnx", + "version": 1, + "description": "ROI max pool consumes an input tensor X and region of interests (RoIs) to\n apply max pooling across each RoI, to produce output 4-D tensor of shape\n (num_rois, channels, pooled_shape[0], pooled_shape[1]).", + "attributes": [ + { + "name": "pooled_shape", + "type": "int64[]", + "required": true, + "description": "ROI pool output shape (height, width)." + }, + { + "name": "spatial_scale", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data." + }, + { + "name": "rois", + "type": "T", + "description": "RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...]." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1])." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "category": "Pool" + }, + { + "name": "MaxUnpool", + "module": "ai.onnx", + "version": 9, + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corresponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corresponding\n pooling op that the unpooling op is trying to invert.\n", + "attributes": [ + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "I", + "type": "T2", + "description": "Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x ... x Dn)." + }, + { + "name": "output_shape", + "type": "T2", + "option": "optional", + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If 'output_shape' is specified, 'pads' values are ignored." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T1", + "description": "Output data tensor that contains the result of the unpooling." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "with_output_shape", + "code": "node = onnx.helper.make_node(\n \"MaxUnpool\",\n inputs=[\"xT\", \"xI\", \"output_shape\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nxT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)\nxI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)\noutput_shape = np.array((1, 1, 5, 5), dtype=np.int64)\ny = np.array(\n [\n [\n [\n [0, 0, 0, 0, 0],\n [0, 5, 0, 6, 0],\n [0, 0, 0, 0, 0],\n [0, 7, 0, 8, 0],\n [0, 0, 0, 0, 0],\n ]\n ]\n ],\n dtype=np.float32,\n)\nexpect(\n node,\n inputs=[xT, xI, output_shape],\n outputs=[y],\n name=\"test_maxunpool_export_with_output_shape\",\n)" + }, + { + "summary": "without_output_shape", + "code": "node = onnx.helper.make_node(\n \"MaxUnpool\",\n inputs=[\"xT\", \"xI\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nxT = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nxI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)\ny = np.array(\n [[[[0, 0, 0, 0], [0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4]]]],\n dtype=np.float32,\n)\nexpect(\n node,\n inputs=[xT, xI],\n outputs=[y],\n name=\"test_maxunpool_export_without_output_shape\",\n)" + } + ] + }, + { + "name": "MaxUnpool", + "module": "ai.onnx", + "version": 11, + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corresponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corresponding\n pooling op that the unpooling op is trying to invert.\n", + "attributes": [ + { + "name": "kernel_shape", + "type": "int64[]", + "required": true, + "description": "The size of the kernel along each axis." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "I", + "type": "T2", + "description": "Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x ... x Dn)." + }, + { + "name": "output_shape", + "type": "T2", + "option": "optional", + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If 'output_shape' is specified, 'pads' values are ignored." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T1", + "description": "Output data tensor that contains the result of the unpooling." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "with_output_shape", + "code": "node = onnx.helper.make_node(\n \"MaxUnpool\",\n inputs=[\"xT\", \"xI\", \"output_shape\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nxT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)\nxI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)\noutput_shape = np.array((1, 1, 5, 5), dtype=np.int64)\ny = np.array(\n [\n [\n [\n [0, 0, 0, 0, 0],\n [0, 5, 0, 6, 0],\n [0, 0, 0, 0, 0],\n [0, 7, 0, 8, 0],\n [0, 0, 0, 0, 0],\n ]\n ]\n ],\n dtype=np.float32,\n)\nexpect(\n node,\n inputs=[xT, xI, output_shape],\n outputs=[y],\n name=\"test_maxunpool_export_with_output_shape\",\n)" + }, + { + "summary": "without_output_shape", + "code": "node = onnx.helper.make_node(\n \"MaxUnpool\",\n inputs=[\"xT\", \"xI\"],\n outputs=[\"y\"],\n kernel_shape=[2, 2],\n strides=[2, 2],\n)\nxT = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nxI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)\ny = np.array(\n [[[[0, 0, 0, 0], [0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4]]]],\n dtype=np.float32,\n)\nexpect(\n node,\n inputs=[xT, xI],\n outputs=[y],\n name=\"test_maxunpool_export_without_output_shape\",\n)" + } + ] + }, + { + "name": "Mean", + "module": "ai.onnx", + "version": 1, + "description": "Element-wise mean of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Mean." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "mean", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mean", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_mean_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_mean_one_input\")\n\nresult = np.divide(np.add(data_0, data_1), 2.0)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_mean_two_inputs\"\n)" + } + ] + }, + { + "name": "Mean", + "module": "ai.onnx", + "version": 6, + "description": "Element-wise mean of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Mean." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "mean", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mean", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_mean_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_mean_one_input\")\n\nresult = np.divide(np.add(data_0, data_1), 2.0)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_mean_two_inputs\"\n)" + } + ] + }, + { + "name": "Mean", + "module": "ai.onnx", + "version": 8, + "description": "Element-wise mean of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for mean." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "mean", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mean", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_mean_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_mean_one_input\")\n\nresult = np.divide(np.add(data_0, data_1), 2.0)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_mean_two_inputs\"\n)" + } + ] + }, + { + "name": "Mean", + "module": "ai.onnx", + "version": 13, + "description": "Element-wise mean of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for mean." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "mean", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "mean", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_mean_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_mean_one_input\")\n\nresult = np.divide(np.add(data_0, data_1), 2.0)\nnode = onnx.helper.make_node(\n \"Mean\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_mean_two_inputs\"\n)" + } + ] + }, + { + "name": "MeanVarianceNormalization", + "module": "ai.onnx", + "version": 9, + "description": "A MeanVarianceNormalization Function: Perform mean variance normalization\n on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ```\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to calculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "meanvariancenormalization", + "code": "node = onnx.helper.make_node(\n \"MeanVarianceNormalization\", inputs=[\"X\"], outputs=[\"Y\"]\n)\n\ninput_data = np.array(\n [\n [\n [[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]],\n ],\n [\n [[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]],\n ],\n [\n [[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]],\n ],\n ],\n dtype=np.float32,\n)\n\n# Calculate expected output data\ndata_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)\ndata_mean_squared = np.power(data_mean, 2)\ndata_squared = np.power(input_data, 2)\ndata_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)\nstd = np.sqrt(data_squared_mean - data_mean_squared)\nexpected_output = (input_data - data_mean) / (std + 1e-9)\n\nexpect(node, inputs=[input_data], outputs=[expected_output], name=\"test_mvn\")" + } + ] + }, + { + "name": "MeanVarianceNormalization", + "module": "ai.onnx", + "version": 13, + "description": "A MeanVarianceNormalization Function: Perform mean variance normalization\n on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)`\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to calculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "meanvariancenormalization", + "code": "node = onnx.helper.make_node(\n \"MeanVarianceNormalization\", inputs=[\"X\"], outputs=[\"Y\"]\n)\n\ninput_data = np.array(\n [\n [\n [[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]],\n ],\n [\n [[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]],\n ],\n [\n [[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]],\n ],\n ],\n dtype=np.float32,\n)\n\n# Calculate expected output data\ndata_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)\ndata_mean_squared = np.power(data_mean, 2)\ndata_squared = np.power(input_data, 2)\ndata_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)\nstd = np.sqrt(data_squared_mean - data_mean_squared)\nexpected_output = (input_data - data_mean) / (std + 1e-9)\n\nexpect(node, inputs=[input_data], outputs=[expected_output], name=\"test_mvn\")" + } + ] + }, + { + "name": "MelWeightMatrix", + "module": "ai.onnx", + "version": 17, + "description": "Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale.\nThis function defines the mel scale in terms of a frequency in hertz according to the following formula:\n\n mel(f) = 2595 * log10(1 + f/700)\n\nIn the returned matrix, all the triangles (filterbanks) have a peak value of 1.0.\n\nThe returned MelWeightMatrix can be used to right-multiply a spectrogram S of shape [frames, num_spectrogram_bins] of linear scale spectrum values (e.g. STFT magnitudes) to generate a \"mel spectrogram\" M of shape [frames, num_mel_bins].\n", + "attributes": [ + { + "name": "output_datatype", + "type": "int64", + "required": false, + "default": 1, + "description": "The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T3. The default value is 1 = FLOAT. " + } + ], + "inputs": [ + { + "name": "num_mel_bins", + "type": "T1", + "description": "The number of bands in the mel spectrum." + }, + { + "name": "dft_length", + "type": "T1", + "description": "The size of the original DFT. The size of the original DFT is used to infer the size of the onesided DFT, which is understood to be floor(dft_length/2) + 1, i.e. the spectrogram only contains the nonredundant DFT bins." + }, + { + "name": "sample_rate", + "type": "T1", + "description": "Samples per second of the input signal used to create the spectrogram. Used to figure out the frequencies corresponding to each spectrogram bin, which dictates how they are mapped into the mel scale." + }, + { + "name": "lower_edge_hertz", + "type": "T2", + "description": "Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band." + }, + { + "name": "upper_edge_hertz", + "type": "T2", + "description": "The desired top edge of the highest frequency band." + } + ], + "min_input": 5, + "max_input": 5, + "outputs": [ + { + "name": "output", + "type": "T3", + "description": "The Mel Weight Matrix. The output has the shape: [floor(dft_length/2) + 1][num_mel_bins]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to integer tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain to float tensors", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain to any numerical types.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "melweightmatrix", + "code": "node = onnx.helper.make_node(\n \"MelWeightMatrix\",\n inputs=[\n \"num_mel_bins\",\n \"dft_length\",\n \"sample_rate\",\n \"lower_edge_hertz\",\n \"upper_edge_hertz\",\n ],\n outputs=[\"output\"],\n)\n\nnum_mel_bins = np.int32(8)\ndft_length = np.int32(16)\nsample_rate = np.int32(8192)\nlower_edge_hertz = np.float32(0)\nupper_edge_hertz = np.float32(8192 / 2)\n\nnum_spectrogram_bins = dft_length // 2 + 1\nfrequency_bins = np.arange(0, num_mel_bins + 2)\n\nlow_frequency_mel = 2595 * np.log10(1 + lower_edge_hertz / 700)\nhigh_frequency_mel = 2595 * np.log10(1 + upper_edge_hertz / 700)\nmel_step = (high_frequency_mel - low_frequency_mel) / frequency_bins.shape[0]\n\nfrequency_bins = frequency_bins * mel_step + low_frequency_mel\nfrequency_bins = 700 * (np.power(10, (frequency_bins / 2595)) - 1)\nfrequency_bins = ((dft_length + 1) * frequency_bins) // sample_rate\nfrequency_bins = frequency_bins.astype(int)\n\noutput = np.zeros((num_spectrogram_bins, num_mel_bins))\noutput.flags.writeable = True\n\nfor i in range(num_mel_bins):\n lower_frequency_value = frequency_bins[i] # left\n center_frequency_point = frequency_bins[i + 1] # center\n higher_frequency_point = frequency_bins[i + 2] # right\n low_to_center = center_frequency_point - lower_frequency_value\n if low_to_center == 0:\n output[center_frequency_point, i] = 1\n else:\n for j in range(lower_frequency_value, center_frequency_point + 1):\n output[j, i] = float(j - lower_frequency_value) / float(\n low_to_center\n )\n center_to_high = higher_frequency_point - center_frequency_point\n if center_to_high > 0:\n for j in range(center_frequency_point, higher_frequency_point):\n output[j, i] = float(higher_frequency_point - j) / float(\n center_to_high\n )\n\n# Expected output\n# 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,\n# 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,\noutput = output.astype(np.float32)\nexpect(\n node,\n inputs=[\n num_mel_bins,\n dft_length,\n sample_rate,\n lower_edge_hertz,\n upper_edge_hertz,\n ],\n outputs=[output],\n name=\"test_melweightmatrix\",\n)" + } + ] + }, + { + "name": "Min", + "module": "ai.onnx", + "version": 1, + "description": "Element-wise min of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Min" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "min", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "min", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_min_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_min_one_input\")\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_min_two_inputs\"\n)" + }, + { + "summary": "min_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_min_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Min", + "module": "ai.onnx", + "version": 6, + "description": "Element-wise min of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Min" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "min", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "min", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_min_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_min_one_input\")\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_min_two_inputs\"\n)" + }, + { + "summary": "min_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_min_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Min", + "module": "ai.onnx", + "version": 8, + "description": "Element-wise min of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for min." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "min", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "min", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_min_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_min_one_input\")\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_min_two_inputs\"\n)" + }, + { + "summary": "min_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_min_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Min", + "module": "ai.onnx", + "version": 12, + "description": "Element-wise min of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for min." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "min", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "min", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_min_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_min_one_input\")\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_min_two_inputs\"\n)" + }, + { + "summary": "min_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_min_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Min", + "module": "ai.onnx", + "version": 13, + "description": "Element-wise min of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for min." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "min", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "min", + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_min_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_min_one_input\")\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_min_two_inputs\"\n)" + }, + { + "summary": "min_all_numeric_types", + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n \"Min\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n )\n expect(\n node,\n inputs=[data_0, data_1],\n outputs=[result],\n name=f\"test_min_{np.dtype(op_dtype).name}\",\n )" + } + ] + }, + { + "name": "Mish", + "module": "ai.onnx", + "version": 18, + "description": "Mish: A Self Regularized Non-Monotonic Neural Activation Function.\n\nPerform the linear unit element-wise on the input tensor X using formula:\n\n```\nmish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))\n```\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input X and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mish", + "code": "node = onnx.helper.make_node(\"Mish\", inputs=[\"X\"], outputs=[\"Y\"])\n\ninput_data = np.linspace(-10, 10, 10000, dtype=np.float32)\n\n# Calculate expected output data\nexpected_output = input_data * np.tanh(np.log1p(np.exp(input_data)))\n\nexpect(node, inputs=[input_data], outputs=[expected_output], name=\"test_mish\")" + } + ] + }, + { + "name": "Mod", + "module": "ai.onnx", + "version": 10, + "description": "Performs element-wise binary modulus (with Numpy-style broadcasting support).\n The sign of the remainder is the same as that of the Divisor.\n\n Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend\n (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided.\n This attribute is set to 0 by default causing the behavior to be like integer mod.\n Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\n If the input type is floating point, then `fmod` attribute must be set to 1.\n\n In case of dividend being zero, the results will be platform dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "attributes": [ + { + "name": "fmod", + "type": "int64", + "required": false, + "description": "Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Dividend tensor" + }, + { + "name": "B", + "type": "T", + "description": "Divisor tensor" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Remainder tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mod_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.arange(0, 30).reshape([3, 2, 5]).astype(np.int32)\ny = np.array([7]).astype(np.int32)\nz = np.mod(x, y)\n# array([[[0, 1, 2, 3, 4],\n# [5, 6, 0, 1, 2]],\n\n# [[3, 4, 5, 6, 0],\n# [1, 2, 3, 4, 5]],\n\n# [[6, 0, 1, 2, 3],\n# [4, 5, 6, 0, 1]]], dtype=int32)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_broadcast\")" + }, + { + "summary": "mod_int64_fmod", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.fmod(x, y) # expected output [ 0, 1, 5, 0, -1, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_int64_fmod\")" + }, + { + "summary": "mod_mixed_sign_float16", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)\nz = np.fmod(\n x, y\n) # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float16\")" + }, + { + "summary": "mod_mixed_sign_float32", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)\nz = np.fmod(\n x, y\n) # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float32\")" + }, + { + "summary": "mod_mixed_sign_float64", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)\nz = np.fmod(x, y) # expected output [-0.1, 0.4, 5. , 0.1, -0.4, 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float64\")" + }, + { + "summary": "mod_mixed_sign_int16", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int16\")" + }, + { + "summary": "mod_mixed_sign_int32", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int32\")" + }, + { + "summary": "mod_mixed_sign_int64", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int64\")" + }, + { + "summary": "mod_mixed_sign_int8", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int8)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int8)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int8\")" + }, + { + "summary": "mod_uint16", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint16)\ny = np.array([2, 3, 8]).astype(np.uint16)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint16\")" + }, + { + "summary": "mod_uint32", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint32)\ny = np.array([2, 3, 8]).astype(np.uint32)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint32\")" + }, + { + "summary": "mod_uint64", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint64)\ny = np.array([2, 3, 8]).astype(np.uint64)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint64\")" + }, + { + "summary": "mod_uint8", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint8)\ny = np.array([2, 3, 8]).astype(np.uint8)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint8\")" + } + ] + }, + { + "name": "Mod", + "module": "ai.onnx", + "version": 13, + "description": "Performs element-wise binary modulus (with Numpy-style broadcasting support).\n The sign of the remainder is the same as that of the Divisor.\n\n Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend\n (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided.\n This attribute is set to 0 by default causing the behavior to be like integer mod.\n Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\n If the input type is floating point, then `fmod` attribute must be set to 1.\n\n In case of dividend being zero, the results will be platform dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "attributes": [ + { + "name": "fmod", + "type": "int64", + "required": false, + "description": "Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Dividend tensor" + }, + { + "name": "B", + "type": "T", + "description": "Divisor tensor" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Remainder tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "mod_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.arange(0, 30).reshape([3, 2, 5]).astype(np.int32)\ny = np.array([7]).astype(np.int32)\nz = np.mod(x, y)\n# array([[[0, 1, 2, 3, 4],\n# [5, 6, 0, 1, 2]],\n\n# [[3, 4, 5, 6, 0],\n# [1, 2, 3, 4, 5]],\n\n# [[6, 0, 1, 2, 3],\n# [4, 5, 6, 0, 1]]], dtype=int32)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_broadcast\")" + }, + { + "summary": "mod_int64_fmod", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.fmod(x, y) # expected output [ 0, 1, 5, 0, -1, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_int64_fmod\")" + }, + { + "summary": "mod_mixed_sign_float16", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)\nz = np.fmod(\n x, y\n) # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float16\")" + }, + { + "summary": "mod_mixed_sign_float32", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)\nz = np.fmod(\n x, y\n) # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float32\")" + }, + { + "summary": "mod_mixed_sign_float64", + "code": "node = onnx.helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=1)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)\nz = np.fmod(x, y) # expected output [-0.1, 0.4, 5. , 0.1, -0.4, 3.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_float64\")" + }, + { + "summary": "mod_mixed_sign_int16", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int16\")" + }, + { + "summary": "mod_mixed_sign_int32", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int32\")" + }, + { + "summary": "mod_mixed_sign_int64", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int64\")" + }, + { + "summary": "mod_mixed_sign_int8", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int8)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int8)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_mixed_sign_int8\")" + }, + { + "summary": "mod_uint16", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint16)\ny = np.array([2, 3, 8]).astype(np.uint16)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint16\")" + }, + { + "summary": "mod_uint32", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint32)\ny = np.array([2, 3, 8]).astype(np.uint32)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint32\")" + }, + { + "summary": "mod_uint64", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint64)\ny = np.array([2, 3, 8]).astype(np.uint64)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint64\")" + }, + { + "summary": "mod_uint8", + "code": "node = onnx.helper.make_node(\n \"Mod\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint8)\ny = np.array([2, 3, 8]).astype(np.uint8)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mod_uint8\")" + } + ] + }, + { + "name": "Momentum", + "module": "ai.onnx.preview.training", + "version": 1, + "description": "Compute one iteration of stochastic gradient update with momentum.\n This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. As you can imagine, SG with momentum requires\n several parameters:\n\n - The learning-rate \"R\".\n - The update count \"T\". That is, the number of conducted training iterations. It should\n be zero in the first training iteration.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A decay coefficient of previous accumulated gradient (i.e., momentum) \"alpha\".\n - The scaling coefficient of current gradient \"beta\".\n - An attribute to choose either standard momentum or Nesterov's momentum \"mode\" should\n be used.\n\n For the sake of simplicity, assume that there is only one tensor (called \"X\") to be optimized.\n Other necessary inputs are \"X\"'s gradient (called \"G\") and \"X\"'s momentum (called \"V\"). This\n Momentum operator maps all these inputs to the new value of \"X\" (called \"X_new\") and its new\n momentum (called \"V_new\").\n\n This operator supports two different momentum algorithms. Set the attribute \"mode\" to\n \"nesterov\" if Nesterov's momentum is desired. Otherwise, set the attribute \"model\" to\n \"standard\" to use standard momentum. Computation details are described subsequently.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise operations with numpy-style broadcasting.\n\n Pseudo code for SG with standard momentum:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared\n // values of all elements in X.\n G_regularized = norm_coefficient * X + G\n\n // In the first training iteration, beta should always be 1.\n beta_adjusted = T > 0 ? beta : 1\n\n // Compute the current momentum based on previous momentum and the current gradient.\n V_new = alpha * V + beta_adjusted * G_regularized\n\n // Update X.\n X_new = X - R * V_new\n\n Pseudo code for SG with Nesterov's momentum:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared\n // values of all elements in X.\n G_regularized = norm_coefficient * X + G;\n\n // In the first training iteration, beta should always be 1.\n beta_adjusted = T > 0 ? beta : 1\n\n // Compute the current momentum based on previous momentum and the current gradient.\n V_new = alpha * V + beta_adjusted * G_regularized;\n\n // Compute final update direction and then update X.\n X_new = X - R * (G_regularized + alpha * V_new)\n\n If one assign this operators to optimize multiple inputs, for example, \"X_1\" and \"X_2\". The same\n pseudo code would be extended to handle all tensors jointly. More specifically, we can view \"X\" as a\n concatenation of \"X_1\" and \"X_2\" (of course, their gradient and accumulate gradient should\n be concatenated too) and then our pseudo code becomes applicable.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": true, + "description": "The decay factor of momentum. It should be a scalar." + }, + { + "name": "beta", + "type": "float32", + "required": true, + "description": "The coefficient of gradient in computing new momentum. It should be a scalar." + }, + { + "name": "mode", + "type": "string", + "required": true, + "description": "Its value should be either \"nesterov\" or \"standard\". The value \"nesterov\" leads to the use of Nesterov's momentum while \"standard\" invokes stochastic gradient method using standard momentum" + }, + { + "name": "norm_coefficient", + "type": "float32", + "required": true, + "description": "Coefficient of 0.5 * norm_coefficient * ||X||^2." + } + ], + "inputs": [ + { + "name": "R", + "type": "T1", + "description": "The learning rate." + }, + { + "name": "T", + "type": "T2", + "description": "Update count of \"X\". It should be a scalar." + }, + { + "name": "inputs", + "type": "T3", + "list": true, + "description": "It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors \"X_1\" and \"X_2\" are optimized, The expected input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", momentum of \"X_1\", momentum of \"X_2\"]." + } + ], + "min_input": 3, + "max_input": 2147483647, + "outputs": [ + { + "name": "outputs", + "type": "T3", + "list": true, + "description": "It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors \"X_1\" and \"X_2\" are optimized, the output list would be [new value of \"X_1,\" new value of \"X_2\" new momentum of \"X_1\", new momentum of \"X_2\"]." + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "3 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to float scalars.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "momentum", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.1\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"Momentum\",\n inputs=[\"R\", \"T\", \"X\", \"G\", \"V\"],\n outputs=[\"X_new\", \"V_new\"],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode=\"standard\",\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx_new, v_new = apply_momentum(r, t, x, g, v, norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x, g, v],\n outputs=[x_new, v_new],\n name=\"test_momentum\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + }, + { + "summary": "momentum_multiple", + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.85\n\nnode = onnx.helper.make_node(\n \"Momentum\",\n inputs=[\"R\", \"T\", \"X1\", \"X2\", \"G1\", \"G2\", \"H1\", \"H2\"],\n outputs=[\"X1_new\", \"X2_new\", \"V1_new\", \"V2_new\"],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode=\"standard\",\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nv1 = np.array([2.0], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nv2 = np.array([4.0, 1.0], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx1_new, v1_new = apply_momentum(r, t, x1, g1, v1, norm_coefficient, alpha, beta)\nx2_new, v2_new = apply_momentum(r, t, x2, g2, v2, norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x1, x2, g1, g2, v1, v2],\n outputs=[x1_new, x2_new, v1_new, v2_new],\n name=\"test_momentum_multiple\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + }, + { + "summary": "nesterov_momentum", + "code": "# Define operator attributes.\nnorm_coefficient = 0.01\nalpha = 0.95\nbeta = 1.0\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"Momentum\",\n inputs=[\"R\", \"T\", \"X\", \"G\", \"V\"],\n outputs=[\"X_new\", \"V_new\"],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode=\"nesterov\",\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n)\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx_new, v_new = apply_nesterov(r, t, x, g, v, norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(\n node,\n inputs=[r, t, x, g, v],\n outputs=[x_new, v_new],\n name=\"test_nesterov_momentum\",\n opset_imports=[\n onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)\n ],\n)" + } + ] + }, + { + "name": "Mul", + "module": "ai.onnx", + "version": 1, + "description": "Performs element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mul", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul\")\n\nx = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_uint8\")" + }, + { + "summary": "mul_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_bcast\")" + } + ] + }, + { + "name": "Mul", + "module": "ai.onnx", + "version": 6, + "description": "Performs element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mul", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul\")\n\nx = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_uint8\")" + }, + { + "summary": "mul_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_bcast\")" + } + ] + }, + { + "name": "Mul", + "module": "ai.onnx", + "version": 7, + "description": "Performs element-wise binary multiplication (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "mul", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul\")\n\nx = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_uint8\")" + }, + { + "summary": "mul_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_bcast\")" + } + ] + }, + { + "name": "Mul", + "module": "ai.onnx", + "version": 13, + "description": "Performs element-wise binary multiplication (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "mul", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul\")\n\nx = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_uint8\")" + }, + { + "summary": "mul_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_bcast\")" + } + ] + }, + { + "name": "Mul", + "module": "ai.onnx", + "version": 14, + "description": "Performs element-wise binary multiplication (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n\n(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "mul", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul\")\n\nx = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_uint8\")" + }, + { + "summary": "mul_broadcast", + "code": "node = onnx.helper.make_node(\n \"Mul\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_mul_bcast\")" + } + ] + }, + { + "name": "Multinomial", + "module": "ai.onnx", + "version": 7, + "description": "Generate a tensor of samples from a multinomial distribution according to the probabilities\nof each of the possible outcomes.\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "default": 6, + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use int32." + }, + { + "name": "sample_size", + "type": "int64", + "required": false, + "default": 1, + "description": "Number of times to sample." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain output types to integral tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "Neg", + "module": "ai.onnx", + "version": 1, + "description": "Neg takes one input data (Tensor) and produces one output data\n(Tensor) where each element flipped sign, y = -x, is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "neg", + "code": "node = onnx.helper.make_node(\n \"Neg\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.negative(x) # expected output [4., -2.],\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.negative(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg\")" + } + ] + }, + { + "name": "Neg", + "module": "ai.onnx", + "version": 6, + "description": "Neg takes one input data (Tensor) and produces one output data\n(Tensor) where each element flipped sign, y = -x, is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to signed numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)", + "tensor(int8)", + "tensor(int16)", + "tensor(int64)", + "tensor(float16)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "neg", + "code": "node = onnx.helper.make_node(\n \"Neg\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.negative(x) # expected output [4., -2.],\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.negative(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg\")" + } + ] + }, + { + "name": "Neg", + "module": "ai.onnx", + "version": 13, + "description": "Neg takes one input data (Tensor) and produces one output data\n(Tensor) where each element flipped sign, y = -x, is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to signed numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)", + "tensor(int8)", + "tensor(int16)", + "tensor(int64)", + "tensor(float16)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "neg", + "code": "node = onnx.helper.make_node(\n \"Neg\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.negative(x) # expected output [4., -2.],\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.negative(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_neg\")" + } + ] + }, + { + "name": "NegativeLogLikelihoodLoss", + "module": "ai.onnx", + "version": 12, + "description": "A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.\nIts \"input\" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.\nThe \"input\" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).\nThe operator's \"target\" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)\nor it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\nloss is zero for the case when target-value equals ignore_index.\n\n loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\nIf \"reduction\" attribute is set to \"none\", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute is set to \"mean\" (the default attribute value), the output loss is (weight) averaged:\n mean(loss), if \"weight\" is not provided,\nor if weight is provided,\n sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.\nIf \"reduction\" attribute is set to \"sum\", the output is a scalar:\n sum(loss).\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\nExample 1:\n // negative log likelihood loss, \"none\" reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n // print(loss)\n // [[-3. -2.]\n // [-0. -2.]]\nExample 2:\n // weighted negative log likelihood loss, sum reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n loss = np.sum(loss)\n // print(loss)\n // -1.1\nExample 3:\n // weighted negative log likelihood loss, mean reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n weight_total = 0\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n weight_total = weight_total + weight[c]\n loss = np.sum(loss) / weight_total\n // print(loss)\n // -1.57\n", + "attributes": [ + { + "name": "ignore_index", + "type": "int64", + "required": false, + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean (default). 'none': the output is the loss for each sample. 'sum': the output will be summed. 'mean': the sum of the output will be divided by the sum of applied weights." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk)." + }, + { + "name": "target", + "type": "Tind", + "description": "Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index." + }, + { + "name": "weight", + "type": "T", + "option": "optional", + "description": "Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "loss", + "type": "T", + "description": "The negative log likelihood loss" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input, weight, and output types to floating-point tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain target to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "input_shape_is_NC", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N,)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NC\",\n)" + }, + { + "summary": "input_shape_is_NCd1", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1\",\n)" + }, + { + "summary": "input_shape_is_NCd1_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\ntarget[0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\ntarget[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_mean_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_weight", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1_weight_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\ntarget[0][0] = np.int64(1)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_weight_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_no_weight_reduction_mean_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\ntarget[0][0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_no_weight_reduction_mean_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_reduction_mean", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_reduction_mean\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_reduction_sum", + "code": "reduction = \"sum\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_reduction_sum\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_mean", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_mean\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum", + "code": "reduction = \"sum\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_sum\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(0)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\ntarget[0][0][0] = np.int64(0)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_sum_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\ntarget[0][0][0][0] = -5\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3_none_no_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N)).astype(np.int64)\ntarget[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3_sum_weight_high_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3d4d5_mean_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3d4d5_none_no_weight\",\n)" + } + ] + }, + { + "name": "NegativeLogLikelihoodLoss", + "module": "ai.onnx", + "version": 13, + "description": "A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.\nIts \"input\" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.\nThe \"input\" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).\nThe operator's \"target\" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)\nor it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n```\n\nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\n```\n\nloss is zero for the case when target-value equals ignore_index.\n\n```\nloss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n```\n\nIf \"reduction\" attribute is set to \"none\", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute is set to \"mean\" (the default attribute value), the output loss is (weight) averaged:\n\n```\nmean(loss), if \"weight\" is not provided,\n```\n\nor if weight is provided,\n\n```\nsum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.\n```\n\nIf \"reduction\" attribute is set to \"sum\", the output is a scalar: `sum(loss)`.\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\nExample 1:\n\n```\n// negative log likelihood loss, \"none\" reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\n\nloss = np.zeros((N, d1))\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n\n// print(loss)\n// [[-3. -2.]\n// [-0. -2.]]\n```\n\nExample 2:\n\n```\n// weighted negative log likelihood loss, sum reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\nloss = np.sum(loss)\n// print(loss)\n// -1.1\n```\n\nExample 3:\n\n```\n// weighted negative log likelihood loss, mean reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\nweight_total = 0\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n weight_total = weight_total + weight[c]\n\nloss = np.sum(loss) / weight_total\n// print(loss)\n// -1.57\n```\n", + "attributes": [ + { + "name": "ignore_index", + "type": "int64", + "required": false, + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean (default). 'none': the output is the loss for each sample. 'sum': the output will be summed. 'mean': the sum of the output will be divided by the sum of applied weights." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk)." + }, + { + "name": "target", + "type": "Tind", + "description": "Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index." + }, + { + "name": "weight", + "type": "T", + "option": "optional", + "description": "Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "loss", + "type": "T", + "description": "The negative log likelihood loss" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input, weight, and output types to floating-point tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain target to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "input_shape_is_NC", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N,)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NC\",\n)" + }, + { + "summary": "input_shape_is_NCd1", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1\",\n)" + }, + { + "summary": "input_shape_is_NCd1_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\ntarget[0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\ntarget[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_mean_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_weight", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1_weight_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)\ntarget[0][0] = np.int64(1)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1_weight_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_no_weight_reduction_mean_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\ntarget[0][0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_no_weight_reduction_mean_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_reduction_mean", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_reduction_mean\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_reduction_sum", + "code": "reduction = \"sum\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=None, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_reduction_sum\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight", + "code": "reduction = \"none\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_mean", + "code": "reduction = \"mean\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_mean\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum", + "code": "reduction = \"sum\"\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_sum\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(0)\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)\ntarget[0][0][0] = np.int64(0)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2_with_weight_reduction_sum_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\ntarget[0][0][0][0] = -5\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3_none_no_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N)).astype(np.int64)\ntarget[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3_sum_weight_high_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\", \"weight\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, weight=weight, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target, weight],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3d4d5_mean_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"NegativeLogLikelihoodLoss\",\n inputs=[\"input\", \"target\"],\n outputs=[\"loss\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(\n input, target, reduction=reduction\n)\n\nexpect(\n node,\n inputs=[input, target],\n outputs=[negative_log_likelihood_loss],\n name=\"test_nllloss_NCd1d2d3d4d5_none_no_weight\",\n)" + } + ] + }, + { + "name": "NonMaxSuppression", + "module": "ai.onnx", + "version": 10, + "description": "Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes.\nBounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box.\nNote that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to\northogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system\nresult in the same boxes being selected by the algorithm.\nThe selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes.\nThe bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.\n", + "attributes": [ + { + "name": "center_point_box", + "type": "int64", + "required": false, + "description": "Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models." + } + ], + "inputs": [ + { + "name": "boxes", + "type": "tensor(float)", + "description": "An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box." + }, + { + "name": "scores", + "type": "tensor(float)", + "description": "An input tensor with shape [num_batches, num_classes, spatial_dimension]" + }, + { + "name": "max_output_boxes_per_class", + "type": "tensor(int64)", + "option": "optional", + "description": "Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output." + }, + { + "name": "iou_threshold", + "type": "tensor(float)", + "option": "optional", + "description": "Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0." + }, + { + "name": "score_threshold", + "type": "tensor(float)", + "option": "optional", + "description": "Float representing the threshold for deciding when to remove boxes based on score. It is a scalar." + } + ], + "min_input": 2, + "max_input": 5, + "outputs": [ + { + "name": "selected_indices", + "type": "tensor(int64)", + "description": "selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index]." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 5", + "examples": [ + { + "summary": "nonmaxsuppression_center_point_box_format", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n center_point_box=1,\n)\nboxes = np.array(\n [\n [\n [0.5, 0.5, 1.0, 1.0],\n [0.5, 0.6, 1.0, 1.0],\n [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0],\n [0.5, 10.6, 1.0, 1.0],\n [0.5, 100.5, 1.0, 1.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_center_point_box_format\",\n)" + }, + { + "summary": "nonmaxsuppression_flipped_coordinates", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [1.0, 1.0, 0.0, 0.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, 0.9, 1.0, -0.1],\n [0.0, 10.0, 1.0, 11.0],\n [1.0, 10.1, 0.0, 11.1],\n [1.0, 101.0, 0.0, 100.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_flipped_coordinates\",\n)" + }, + { + "summary": "nonmaxsuppression_identical_boxes", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_identical_boxes\",\n)" + }, + { + "summary": "nonmaxsuppression_limit_output_size", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_limit_output_size\",\n)" + }, + { + "summary": "nonmaxsuppression_single_box", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array([[[0.0, 0.0, 1.0, 1.0]]]).astype(np.float32)\nscores = np.array([[[0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_single_box\",\n)" + }, + { + "summary": "nonmaxsuppression_suppress_by_IOU", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_suppress_by_IOU\",\n)" + }, + { + "summary": "nonmaxsuppression_suppress_by_IOU_and_scores", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.4]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_suppress_by_IOU_and_scores\",\n)" + }, + { + "summary": "nonmaxsuppression_two_batches", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ],\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array(\n [[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_two_batches\",\n)" + }, + { + "summary": "nonmaxsuppression_two_classes", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array(\n [[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_two_classes\",\n)" + } + ] + }, + { + "name": "NonMaxSuppression", + "module": "ai.onnx", + "version": 11, + "description": "Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes.\nBounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box.\nNote that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to\northogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system\nresult in the same boxes being selected by the algorithm.\nThe selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes.\nThe bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.\n", + "attributes": [ + { + "name": "center_point_box", + "type": "int64", + "required": false, + "description": "Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models." + } + ], + "inputs": [ + { + "name": "boxes", + "type": "tensor(float)", + "description": "An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box." + }, + { + "name": "scores", + "type": "tensor(float)", + "description": "An input tensor with shape [num_batches, num_classes, spatial_dimension]" + }, + { + "name": "max_output_boxes_per_class", + "type": "tensor(int64)", + "option": "optional", + "description": "Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output." + }, + { + "name": "iou_threshold", + "type": "tensor(float)", + "option": "optional", + "description": "Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0." + }, + { + "name": "score_threshold", + "type": "tensor(float)", + "option": "optional", + "description": "Float representing the threshold for deciding when to remove boxes based on score. It is a scalar." + } + ], + "min_input": 2, + "max_input": 5, + "outputs": [ + { + "name": "selected_indices", + "type": "tensor(int64)", + "description": "selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index]." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 5", + "examples": [ + { + "summary": "nonmaxsuppression_center_point_box_format", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n center_point_box=1,\n)\nboxes = np.array(\n [\n [\n [0.5, 0.5, 1.0, 1.0],\n [0.5, 0.6, 1.0, 1.0],\n [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0],\n [0.5, 10.6, 1.0, 1.0],\n [0.5, 100.5, 1.0, 1.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_center_point_box_format\",\n)" + }, + { + "summary": "nonmaxsuppression_flipped_coordinates", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [1.0, 1.0, 0.0, 0.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, 0.9, 1.0, -0.1],\n [0.0, 10.0, 1.0, 11.0],\n [1.0, 10.1, 0.0, 11.1],\n [1.0, 101.0, 0.0, 100.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_flipped_coordinates\",\n)" + }, + { + "summary": "nonmaxsuppression_identical_boxes", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_identical_boxes\",\n)" + }, + { + "summary": "nonmaxsuppression_limit_output_size", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_limit_output_size\",\n)" + }, + { + "summary": "nonmaxsuppression_single_box", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array([[[0.0, 0.0, 1.0, 1.0]]]).astype(np.float32)\nscores = np.array([[[0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_single_box\",\n)" + }, + { + "summary": "nonmaxsuppression_suppress_by_IOU", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_suppress_by_IOU\",\n)" + }, + { + "summary": "nonmaxsuppression_suppress_by_IOU_and_scores", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.4]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_suppress_by_IOU_and_scores\",\n)" + }, + { + "summary": "nonmaxsuppression_two_batches", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ],\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ],\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array(\n [[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_two_batches\",\n)" + }, + { + "summary": "nonmaxsuppression_two_classes", + "code": "node = onnx.helper.make_node(\n \"NonMaxSuppression\",\n inputs=[\n \"boxes\",\n \"scores\",\n \"max_output_boxes_per_class\",\n \"iou_threshold\",\n \"score_threshold\",\n ],\n outputs=[\"selected_indices\"],\n)\nboxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n).astype(np.float32)\nscores = np.array(\n [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array(\n [[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]\n).astype(np.int64)\n\nexpect(\n node,\n inputs=[\n boxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n ],\n outputs=[selected_indices],\n name=\"test_nonmaxsuppression_two_classes\",\n)" + } + ] + }, + { + "name": "NonZero", + "module": "ai.onnx", + "version": 9, + "description": "Returns the indices of the elements that are non-zero\n (in row-major order - by dimension).\n NonZero behaves similar to numpy.nonzero:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html,\n but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy's behavior.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(int64)", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "nonzero", + "code": "node = onnx.helper.make_node(\n \"NonZero\",\n inputs=[\"condition\"],\n outputs=[\"result\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nresult = np.array(\n np.nonzero(condition), dtype=np.int64\n) # expected output [[0, 1, 1], [0, 0, 1]]\nexpect(node, inputs=[condition], outputs=[result], name=\"test_nonzero_example\")" + } + ] + }, + { + "name": "NonZero", + "module": "ai.onnx", + "version": 13, + "description": "Returns the indices of the elements that are non-zero\n (in row-major order - by dimension).\n NonZero behaves similar to numpy.nonzero:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html,\n but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy's behavior.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "input" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(int64)", + "description": "output" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "nonzero", + "code": "node = onnx.helper.make_node(\n \"NonZero\",\n inputs=[\"condition\"],\n outputs=[\"result\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nresult = np.array(\n np.nonzero(condition), dtype=np.int64\n) # expected output [[0, 1, 1], [0, 0, 1]]\nexpect(node, inputs=[condition], outputs=[result], name=\"test_nonzero_example\")" + } + ] + }, + { + "name": "Normalizer", + "module": "ai.onnx.ml", + "version": 1, + "description": "Normalize the input. There are three normalization modes, which have the corresponding formulas,\n defined using element-wise infix operators '/' and '^' and tensor-wide functions 'max' and 'sum':
\n
\n Max: Y = X / max(X)
\n L1: Y = X / sum(X)
\n L2: Y = sqrt(X^2 / sum(X^2)}
\n In all modes, if the divisor is zero, Y == X.\n
\n For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row\n of the batch is normalized independently.\n", + "attributes": [ + { + "name": "norm", + "type": "string", + "required": false, + "default": "MAX", + "description": "One of 'MAX,' 'L1,' 'L2'" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be encoded, a tensor of shape [N,C] or [C]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "Encoded output data" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "Not", + "module": "ai.onnx", + "version": 1, + "description": "Returns the negation of the input tensor element-wise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input/output to boolean tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "not", + "code": "node = onnx.helper.make_node(\n \"Not\",\n inputs=[\"x\"],\n outputs=[\"not\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)], name=\"test_not_2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)], name=\"test_not_3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)], name=\"test_not_4d\")" + } + ] + }, + { + "name": "OneHot", + "module": "ai.onnx", + "version": 9, + "description": "Produces a one-hot tensor based on inputs.\n The locations represented by the index values in the 'indices' input tensor will have 'on_value'\n and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value'\n are specified as part of required input argument 'values', which is a two-element tensor of format\n [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the\n input tensor. The additional dimension is for one-hot representation. The additional dimension will\n be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional\n dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional\n dimension is specified by required scalar input 'depth'. The type of the output tensor is the same\n as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside\n the range [0, depth) will result in one-hot representation with all 'off_value' values in the\n output tensor.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "(Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor." + } + ], + "inputs": [ + { + "name": "indices", + "type": "T1", + "description": "Input tensor containing indices. The values must be non-negative integers. Any entries in the 'indices' input tensor with values outside the range [0, depth) will result in one-hot representation with all 'off_value' values in the output tensor.In case 'indices' is of non-integer type, the values will be casted to int64 before use." + }, + { + "name": "depth", + "type": "T2", + "description": "Scalar or rank 1 tensor containing exactly one element, specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by 'axis' attribute) added on in the output tensor. The values in the 'indices' input tensor are expected to be in the range [0, depth). In case 'depth' is of non-integer type, it will be casted to int64 before use." + }, + { + "name": "values", + "type": "T3", + "description": "Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where 'on_value' is the value used for filling locations specified in 'indices' input tensor, and 'off_value' is the value used for filling locations other than those specified in 'indices' input tensor. " + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T3", + "description": "Tensor of rank one greater than input tensor 'indices', i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input 'values' is used." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to only numeric types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input to only numeric types.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain to any tensor type.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "with_axis", + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([[1, 9], [2, 4]], dtype=np.float32)\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_with_axis\",\n)" + }, + { + "summary": "with_negative_axis", + "code": "axisValue = -2\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([[1, 9], [2, 4]], dtype=np.float32)\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_with_negative_axis\",\n)" + }, + { + "summary": "with_negative_indices", + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([0, -7, -8], dtype=np.int64)\n\n# print(y)\n# [[3. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n# [1. 1. 1. 3. 1. 1. 1. 1. 1. 1.]\n# [1. 1. 3. 1. 1. 1. 1. 1. 1. 1.]]\n\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_negative_indices\",\n)" + }, + { + "summary": "without_axis", + "code": "on_value = 5\noff_value = 2\noutput_type = np.int32\nnode = onnx.helper.make_node(\n \"OneHot\", inputs=[\"indices\", \"depth\", \"values\"], outputs=[\"y\"]\n)\nindices = np.array([0, 7, 8], dtype=np.int64)\ndepth = np.float32(12)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_without_axis\",\n)" + } + ] + }, + { + "name": "OneHot", + "module": "ai.onnx", + "version": 11, + "description": "Produces a one-hot tensor based on inputs.\n The locations represented by the index values in the 'indices' input tensor will have 'on_value'\n and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value'\n are specified as part of required input argument 'values', which is a two-element tensor of format\n [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the\n input tensor. The additional dimension is for one-hot representation. The additional dimension will\n be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional\n dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional\n dimension is specified by required scalar input 'depth'. The type of the output tensor is the same\n as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside\n the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the\n output tensor.\n\n when axis = 0:\n output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.\n\n when axis = -1:\n output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.\n\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "(Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices)." + } + ], + "inputs": [ + { + "name": "indices", + "type": "T1", + "description": "Input tensor containing indices. Any entries in the 'indices' input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the output tensor.In case 'indices' is of non-integer type, the values will be casted to int64 before use." + }, + { + "name": "depth", + "type": "T2", + "description": "Scalar or Rank 1 tensor containing exactly one element, specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by 'axis' attribute) added on in the output tensor. The values in the 'indices' input tensor are expected to be in the range [-depth, depth-1]. In case 'depth' is of non-integer type, it will be casted to int64 before use." + }, + { + "name": "values", + "type": "T3", + "description": "Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where 'on_value' is the value used for filling locations specified in 'indices' input tensor, and 'off_value' is the value used for filling locations other than those specified in 'indices' input tensor. " + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T3", + "description": "Tensor of rank one greater than input tensor 'indices', i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input 'values' is used." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to only numeric types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input to only numeric types.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain to any tensor type.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "with_axis", + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([[1, 9], [2, 4]], dtype=np.float32)\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_with_axis\",\n)" + }, + { + "summary": "with_negative_axis", + "code": "axisValue = -2\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([[1, 9], [2, 4]], dtype=np.float32)\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_with_negative_axis\",\n)" + }, + { + "summary": "with_negative_indices", + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n \"OneHot\",\n inputs=[\"indices\", \"depth\", \"values\"],\n outputs=[\"y\"],\n axis=axisValue,\n)\nindices = np.array([0, -7, -8], dtype=np.int64)\n\n# print(y)\n# [[3. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n# [1. 1. 1. 3. 1. 1. 1. 1. 1. 1.]\n# [1. 1. 3. 1. 1. 1. 1. 1. 1. 1.]]\n\ndepth = np.float32(10)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_negative_indices\",\n)" + }, + { + "summary": "without_axis", + "code": "on_value = 5\noff_value = 2\noutput_type = np.int32\nnode = onnx.helper.make_node(\n \"OneHot\", inputs=[\"indices\", \"depth\", \"values\"], outputs=[\"y\"]\n)\nindices = np.array([0, 7, 8], dtype=np.int64)\ndepth = np.float32(12)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(\n node,\n inputs=[indices, depth, values],\n outputs=[y],\n name=\"test_onehot_without_axis\",\n)" + } + ] + }, + { + "name": "OneHotEncoder", + "module": "ai.onnx.ml", + "version": 1, + "description": "Replace each input element with an array of ones and zeros, where a single\n one is placed at the index of the category that was passed in. The total category count\n will determine the size of the extra dimension of the output array Y.
\n For example, if we pass a tensor with a single value of 4, and a category count of 8,\n the output will be a tensor with ``[0,0,0,0,1,0,0,0]``.
\n This operator assumes every input feature is from the same set of categories.
\n If the input is a tensor of float, int32, or double, the data will be cast\n to integers and the cats_int64s category list will be used for the lookups.\n", + "attributes": [ + { + "name": "cats_int64s", + "type": "int64[]", + "required": false, + "description": "List of categories, ints.
One and only one of the 'cats_*' attributes must be defined." + }, + { + "name": "cats_strings", + "type": "string[]", + "required": false, + "description": "List of categories, strings.
One and only one of the 'cats_*' attributes must be defined." + }, + { + "name": "zeros", + "type": "int64", + "required": false, + "default": 1, + "description": "If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be encoded." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "Encoded output data, having one more dimension than X." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(int32)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "Optional", + "module": "ai.onnx", + "version": 15, + "description": "Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute,\nor a non-empty value containing the input element.\n", + "attributes": [ + { + "name": "type", + "required": false, + "description": "Type of the element in the optional output" + } + ], + "inputs": [ + { + "name": "input", + "type": "V", + "option": "optional", + "description": "The input element." + } + ], + "min_input": 0, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "O", + "description": "The optional output enclosing the input element." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "0 - 1", + "type_constraints": [ + { + "description": "Constrain input type to all tensor and sequence types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain output type to all optional tensor or optional sequence types.", + "type_param_str": "O", + "allowed_type_strs": [ + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + } + ] + }, + { + "name": "OptionalGetElement", + "module": "ai.onnx", + "version": 15, + "description": "Outputs the element in the optional-type input. It is an error if the input value does not have an element\nand the behavior is undefined in this case.\n", + "inputs": [ + { + "name": "input", + "type": "O", + "description": "The optional input." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Output element in the optional input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input type to optional tensor and optional sequence types.", + "type_param_str": "O", + "allowed_type_strs": [ + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + }, + { + "description": "Constrain output type to all tensor or sequence types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ] + }, + { + "name": "OptionalGetElement", + "module": "ai.onnx", + "version": 18, + "description": "If the input is a tensor or sequence type, it returns the input.\nIf the input is an optional type, it outputs the element in the input.\nIt is an error if the input is an empty optional-type (i.e. does not have an element) and the behavior is undefined in this case.\n", + "inputs": [ + { + "name": "input", + "type": "O", + "description": "The optional input." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "V", + "description": "Output element in the optional input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input type to optional tensor and optional sequence types.", + "type_param_str": "O", + "allowed_type_strs": [ + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain output type to all tensor or sequence types.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ] + }, + { + "name": "OptionalHasElement", + "module": "ai.onnx", + "version": 15, + "description": "Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false.\n", + "inputs": [ + { + "name": "input", + "type": "O", + "description": "The optional input." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "B", + "description": "A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input type to optional tensor and optional sequence types.", + "type_param_str": "O", + "allowed_type_strs": [ + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))" + ] + }, + { + "description": "Constrain output to a boolean tensor.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "empty", + "code": "optional = None\n\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.INT32, shape=[]\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\n# OptionalHasElement takes a tensor or optional as input\nfor input_type_proto in [tensor_type_proto, optional_type_proto]:\n input_name_options = {\n \"empty\": \"optional_input\",\n \"empty_no_input_name\": \"\",\n \"empty_no_input\": None,\n }\n for test_name_surfix, input_name in input_name_options.items():\n if input_type_proto == tensor_type_proto and input_name:\n # the input tensor cannot be empty if input name is provided.\n continue\n node = onnx.helper.make_node(\n \"OptionalHasElement\",\n inputs=[] if input_name is None else [input_name],\n outputs=[\"output\"],\n )\n output = optional_has_element_reference_implementation(optional)\n test_name = (\n \"test_optional_has_element_\"\n + test_name_surfix\n + (\n \"_optional_input\"\n if input_type_proto == optional_type_proto\n else \"_tensor_input\"\n )\n )\n expect(\n node,\n inputs=[optional] if input_name else [],\n outputs=[output],\n input_type_protos=[input_type_proto] if input_name else [],\n name=test_name,\n )" + }, + { + "summary": "get_element_sequence", + "code": "optional = [np.array([1, 2, 3, 4]).astype(np.int32)]\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.INT32,\n shape=[\n 4,\n ],\n)\nseq_type_proto = onnx.helper.make_sequence_type_proto(tensor_type_proto)\noptional_type_proto = onnx.helper.make_optional_type_proto(seq_type_proto)\n\nnode = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n)\noutput = optional_get_element_reference_implementation(optional)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=\"test_optional_get_element_optional_sequence\",\n)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[seq_type_proto],\n name=\"test_optional_get_element_sequence\",\n)" + }, + { + "summary": "get_element_tensor", + "code": "optional = np.array([1, 2, 3, 4]).astype(np.float32)\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.FLOAT,\n shape=[\n 4,\n ],\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\nnode = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n)\noutput = optional_get_element_reference_implementation(optional)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=\"test_optional_get_element_optional_tensor\",\n)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[tensor_type_proto],\n name=\"test_optional_get_element_tensor\",\n)" + }, + { + "summary": "optionalhaselement", + "code": "optional = np.array([1, 2, 3, 4]).astype(np.float32)\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.FLOAT,\n shape=[\n 4,\n ],\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\n# OptionalHasElement takes a tensor or optional as input\nfor input_type_protos in [tensor_type_proto, optional_type_proto]:\n node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n )\n output = optional_has_element_reference_implementation(optional)\n test_name = \"test_optional_has_element_\" + (\n \"optional_input\"\n if input_type_protos == optional_type_proto\n else \"tensor_input\"\n )\n expect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=test_name,\n )" + } + ] + }, + { + "name": "OptionalHasElement", + "module": "ai.onnx", + "version": 18, + "description": "Returns true if (1) the input is an optional-type and contains an element,\nor, (2) the input is a tensor or sequence type.\nIf the input is not provided or is an empty optional-type, this op returns false.\n", + "inputs": [ + { + "name": "input", + "type": "O", + "option": "optional", + "description": "The optional input." + } + ], + "min_input": 0, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "B", + "description": "A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "0 - 1", + "type_constraints": [ + { + "description": "Constrain input type to optional tensor and optional sequence types.", + "type_param_str": "O", + "allowed_type_strs": [ + "optional(seq(tensor(uint8)))", + "optional(seq(tensor(uint16)))", + "optional(seq(tensor(uint32)))", + "optional(seq(tensor(uint64)))", + "optional(seq(tensor(int8)))", + "optional(seq(tensor(int16)))", + "optional(seq(tensor(int32)))", + "optional(seq(tensor(int64)))", + "optional(seq(tensor(float16)))", + "optional(seq(tensor(float)))", + "optional(seq(tensor(double)))", + "optional(seq(tensor(string)))", + "optional(seq(tensor(bool)))", + "optional(seq(tensor(complex64)))", + "optional(seq(tensor(complex128)))", + "optional(tensor(uint8))", + "optional(tensor(uint16))", + "optional(tensor(uint32))", + "optional(tensor(uint64))", + "optional(tensor(int8))", + "optional(tensor(int16))", + "optional(tensor(int32))", + "optional(tensor(int64))", + "optional(tensor(float16))", + "optional(tensor(float))", + "optional(tensor(double))", + "optional(tensor(string))", + "optional(tensor(bool))", + "optional(tensor(complex64))", + "optional(tensor(complex128))", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain output to a boolean tensor.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "empty", + "code": "optional = None\n\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.INT32, shape=[]\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\n# OptionalHasElement takes a tensor or optional as input\nfor input_type_proto in [tensor_type_proto, optional_type_proto]:\n input_name_options = {\n \"empty\": \"optional_input\",\n \"empty_no_input_name\": \"\",\n \"empty_no_input\": None,\n }\n for test_name_surfix, input_name in input_name_options.items():\n if input_type_proto == tensor_type_proto and input_name:\n # the input tensor cannot be empty if input name is provided.\n continue\n node = onnx.helper.make_node(\n \"OptionalHasElement\",\n inputs=[] if input_name is None else [input_name],\n outputs=[\"output\"],\n )\n output = optional_has_element_reference_implementation(optional)\n test_name = (\n \"test_optional_has_element_\"\n + test_name_surfix\n + (\n \"_optional_input\"\n if input_type_proto == optional_type_proto\n else \"_tensor_input\"\n )\n )\n expect(\n node,\n inputs=[optional] if input_name else [],\n outputs=[output],\n input_type_protos=[input_type_proto] if input_name else [],\n name=test_name,\n )" + }, + { + "summary": "get_element_sequence", + "code": "optional = [np.array([1, 2, 3, 4]).astype(np.int32)]\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.INT32,\n shape=[\n 4,\n ],\n)\nseq_type_proto = onnx.helper.make_sequence_type_proto(tensor_type_proto)\noptional_type_proto = onnx.helper.make_optional_type_proto(seq_type_proto)\n\nnode = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n)\noutput = optional_get_element_reference_implementation(optional)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=\"test_optional_get_element_optional_sequence\",\n)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[seq_type_proto],\n name=\"test_optional_get_element_sequence\",\n)" + }, + { + "summary": "get_element_tensor", + "code": "optional = np.array([1, 2, 3, 4]).astype(np.float32)\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.FLOAT,\n shape=[\n 4,\n ],\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\nnode = onnx.helper.make_node(\n \"OptionalGetElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n)\noutput = optional_get_element_reference_implementation(optional)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=\"test_optional_get_element_optional_tensor\",\n)\nexpect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[tensor_type_proto],\n name=\"test_optional_get_element_tensor\",\n)" + }, + { + "summary": "optionalhaselement", + "code": "optional = np.array([1, 2, 3, 4]).astype(np.float32)\ntensor_type_proto = onnx.helper.make_tensor_type_proto(\n elem_type=onnx.TensorProto.FLOAT,\n shape=[\n 4,\n ],\n)\noptional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)\n\n# OptionalHasElement takes a tensor or optional as input\nfor input_type_protos in [tensor_type_proto, optional_type_proto]:\n node = onnx.helper.make_node(\n \"OptionalHasElement\", inputs=[\"optional_input\"], outputs=[\"output\"]\n )\n output = optional_has_element_reference_implementation(optional)\n test_name = \"test_optional_has_element_\" + (\n \"optional_input\"\n if input_type_protos == optional_type_proto\n else \"tensor_input\"\n )\n expect(\n node,\n inputs=[optional],\n outputs=[output],\n input_type_protos=[optional_type_proto],\n name=test_name,\n )" + } + ] + }, + { + "name": "Or", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `or` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "or", + "code": "node = onnx.helper.make_node(\n \"Or\",\n inputs=[\"x\", \"y\"],\n outputs=[\"or\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or4d\")" + }, + { + "summary": "or_broadcast", + "code": "node = onnx.helper.make_node(\n \"Or\",\n inputs=[\"x\", \"y\"],\n outputs=[\"or\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v4d\")" + } + ] + }, + { + "name": "Or", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `or` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "or", + "code": "node = onnx.helper.make_node(\n \"Or\",\n inputs=[\"x\", \"y\"],\n outputs=[\"or\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or4d\")" + }, + { + "summary": "or_broadcast", + "code": "node = onnx.helper.make_node(\n \"Or\",\n inputs=[\"x\", \"y\"],\n outputs=[\"or\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_or_bcast4v4d\")" + } + ] + }, + { + "name": "PRelu", + "module": "ai.onnx", + "version": 1, + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\n\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + }, + { + "name": "slope", + "type": "T", + "description": "Slope tensor. If `Slope` is of size 1, the value is sharedacross different channels" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "prelu", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_example\")" + }, + { + "summary": "prelu_broadcast", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_broadcast\")" + } + ], + "category": "Activation" + }, + { + "name": "PRelu", + "module": "ai.onnx", + "version": 6, + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\n\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + }, + { + "name": "slope", + "type": "T", + "description": "Slope tensor. If `Slope` is of size 1, the value is sharedacross different channels" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "prelu", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_example\")" + }, + { + "summary": "prelu_broadcast", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_broadcast\")" + } + ], + "category": "Activation" + }, + { + "name": "PRelu", + "module": "ai.onnx", + "version": 7, + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\nThis operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + }, + { + "name": "slope", + "type": "T", + "description": "Slope tensor. The shape of slope can be smaller than first input X; if so, its shape must be unidirectional broadcastable to X" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor (same size as X)" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "prelu", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_example\")" + }, + { + "summary": "prelu_broadcast", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_broadcast\")" + } + ], + "category": "Activation" + }, + { + "name": "PRelu", + "module": "ai.onnx", + "version": 9, + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\nThis operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + }, + { + "name": "slope", + "type": "T", + "description": "Slope tensor. The shape of slope can be smaller than first input X; if so, its shape must be unidirectional broadcastable to X" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor (same size as X)" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "prelu", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_example\")" + }, + { + "summary": "prelu_broadcast", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_broadcast\")" + } + ], + "category": "Activation" + }, + { + "name": "PRelu", + "module": "ai.onnx", + "version": 16, + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\nThis operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + }, + { + "name": "slope", + "type": "T", + "description": "Slope tensor. The shape of slope can be smaller than first input X; if so, its shape must be unidirectional broadcastable to X" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor (same size as X)" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "prelu", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_example\")" + }, + { + "summary": "prelu_broadcast", + "code": "node = onnx.helper.make_node(\n \"PRelu\",\n inputs=[\"x\", \"slope\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y], name=\"test_prelu_broadcast\")" + } + ], + "category": "Activation" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 1, + "description": "Given `data` tensor, paddings, mode, and value.\nExample:\n Insert 0 paddings to the beginning of the second dimension.\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n paddings = [0, 0, 2, 0]\n output = [\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ],\n ]\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Three modes: constant(default), reflect, edge" + }, + { + "name": "paddings", + "type": "int64[]", + "required": true, + "description": "List of integers indicate the padding element count at the beginning and end of each axis, for 2D it is the number of pixel. `paddings` rank should be double of the input's rank. `paddings` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`." + }, + { + "name": "value", + "type": "float32", + "required": false, + "description": "One float, indicates the value to be filled, default is 0" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 2, + "description": "Given `data` tensor, pads, mode, and value.\nExample:\n Insert 0 pads to the beginning of the second dimension.\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n pads = [0, 2, 0, 0]\n output = [\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ],\n ]\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Three modes: constant(default), reflect, edge" + }, + { + "name": "pads", + "type": "int64[]", + "required": true, + "description": "List of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D it is the number of pixels. `pads` rank should be double of the input's rank. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`." + }, + { + "name": "value", + "type": "float32", + "required": false, + "description": "One float, indicates the value to be filled." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 11, + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,\na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n\nExample 1 (`constant` mode):\n Insert 0 pads to the beginning of the second dimension.\n\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'constant'\n\n constant_value = 0.0\n\n output =\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ]\n\n\nExample 2 (`reflect` mode):\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'reflect'\n\n output =\n [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n ]\n\n\nExample 3 (`edge` mode):\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'edge'\n\n output =\n [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n ]\n\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + }, + { + "name": "pads", + "type": "tensor(int64)", + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * input_rank]. `pads` format should be: [x1_begin, x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `i` and xi_end, the number of pad values added at the end of axis `i`." + }, + { + "name": "constant_value", + "type": "T", + "option": "optional", + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0)." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output to only numeric types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 13, + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,\na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n\nExample 1 (`constant` mode):\n Insert 0 pads to the beginning of the second dimension.\n\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'constant'\n\n constant_value = 0.0\n\n output =\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ]\n\n\nExample 2 (`reflect` mode):\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'reflect'\n\n output =\n [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n ]\n\n\nExample 3 (`edge` mode):\n data =\n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n\n pads = [0, 2, 0, 0]\n\n mode = 'edge'\n\n output =\n [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n ]\n\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + }, + { + "name": "pads", + "type": "tensor(int64)", + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * input_rank]. `pads` format should be: [x1_begin, x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `i` and xi_end, the number of pad values added at the end of axis `i`." + }, + { + "name": "constant_value", + "type": "T", + "option": "optional", + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, empty string or False)." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 18, + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,\na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n\nExample 1 (`constant` mode):\n\nInsert 0 pads to the beginning of the second dimension.\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'constant'\n\nconstant_value = 0.0\n\noutput = [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n]\n```\n\nExample 2 (`reflect` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'reflect'\n\noutput = [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n]\n```\n\nExample 3 (`edge` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'edge'\n\noutput = [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n]\n```\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + }, + { + "name": "pads", + "type": "tensor(int64)", + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to the number of elements in the `axes` input or the input rank if `axes` are not provided explicitly. `pads` format should be: [x1_begin, x2_begin, ..., x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `axes[i]` and xi_end, the number of pad values added at the end of axis `axes[i]`." + }, + { + "name": "constant_value", + "type": "T", + "option": "optional", + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, empty string or False)." + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `pads` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., input_rank-1]`)." + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 19, + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,\na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n4) `wrap` - wrap-around padding as if the data tensor forms a torus\n\n\nExample 1 (`constant` mode):\n\nInsert 0 pads to the beginning of the second dimension.\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'constant'\n\nconstant_value = 0.0\n\noutput = [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n]\n```\n\nExample 2 (`reflect` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'reflect'\n\noutput = [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n]\n```\n\nExample 3 (`edge` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'edge'\n\noutput = [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n]\n```\n\nExample 4 (`wrap` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [2, 1, 1, 1]\n\nmode = 'wrap'\n\noutput = [\n [3.4, 2.3, 3.4, 2.3],\n [5.7, 4.5, 5.7, 4.5],\n [1.2, 1.0, 1.2, 1.0],\n [3.4, 2.3, 3.4, 2.3],\n [5.7, 4.5, 5.7, 4.5],\n [1.2, 1.0, 1.2, 1.0],\n]\n```\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`, `wrap`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + }, + { + "name": "pads", + "type": "tensor(int64)", + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to the number of elements in the `axes` input or the input rank if `axes` are not provided explicitly. `pads` format should be: [x1_begin, x2_begin, ..., x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `axes[i]` and xi_end, the number of pad values added at the end of axis `axes[i]`." + }, + { + "name": "constant_value", + "type": "T", + "option": "optional", + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, empty string or False)." + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `pads` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., input_rank-1]`)." + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pad", + "module": "ai.onnx", + "version": 21, + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,\na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n4) `wrap` - wrap-around padding as if the data tensor forms a torus\n\n\nExample 1 (`constant` mode):\n\nInsert 0 pads to the beginning of the second dimension.\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'constant'\n\nconstant_value = 0.0\n\noutput = [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n]\n```\n\nExample 2 (`reflect` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'reflect'\n\noutput = [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n]\n```\n\nExample 3 (`edge` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [0, 2, 0, 0]\n\nmode = 'edge'\n\noutput = [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n]\n```\n\nExample 4 (`wrap` mode):\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\n\npads = [2, 1, 1, 1]\n\nmode = 'wrap'\n\noutput = [\n [3.4, 2.3, 3.4, 2.3],\n [5.7, 4.5, 5.7, 4.5],\n [1.2, 1.0, 1.2, 1.0],\n [3.4, 2.3, 3.4, 2.3],\n [5.7, 4.5, 5.7, 4.5],\n [1.2, 1.0, 1.2, 1.0],\n]\n```\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`, `wrap`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Input tensor." + }, + { + "name": "pads", + "type": "tensor(int64)", + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to the number of elements in the `axes` input or the input rank if `axes` are not provided explicitly. `pads` format should be: [x1_begin, x2_begin, ..., x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `axes[i]` and xi_end, the number of pad values added at the end of axis `axes[i]`." + }, + { + "name": "constant_value", + "type": "T", + "option": "optional", + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, empty string or False)." + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `pads` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., input_rank-1]`)." + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor after padding." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types up to IRv10.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "constant_pad", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(x, pads, \"constant\", 1.2)\n\nexpect(node, inputs=[x, pads, value], outputs=[y], name=\"test_constant_pad\")" + }, + { + "summary": "constant_pad_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([1, 3], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [1, 3],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_axes\",\n)" + }, + { + "summary": "constant_pad_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\", \"value\", \"axes\"], outputs=[\"y\"], mode=\"constant\"\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 3, 0, 4]).astype(\n np.int64\n) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\naxes = np.array([-3, -1], dtype=np.int64)\ny = pad_impl(\n x,\n pads,\n \"constant\",\n 1.2,\n [-3, -1],\n)\n\nexpect(\n node,\n inputs=[x, pads, value, axes],\n outputs=[y],\n name=\"test_constant_pad_negative_axes\",\n)" + }, + { + "summary": "reflection_edge_and_wrap_pad", + "code": "for mode in (\"edge\", \"reflect\", \"wrap\"):\n node = onnx.helper.make_node(\n \"Pad\", inputs=[\"x\", \"pads\"], outputs=[\"y\"], mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(\n np.int64\n ) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(x, pads, mode)\n\n expect(node, inputs=[x, pads], outputs=[y], name=f\"test_{mode}_pad\")" + } + ], + "category": "Tensor" + }, + { + "name": "Pow", + "module": "ai.onnx", + "version": 1, + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor of any shape, base of the exponent." + }, + { + "name": "Y", + "type": "T", + "description": "Input tensor of any shape broadcastable to X shape, the exponent component." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor (same size as X)" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "pow", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_example\")\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow\")" + }, + { + "summary": "pow_broadcast", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_scalar\")\n\nnode = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_array\")" + }, + { + "summary": "types", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int32\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint64\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint32\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_int32\")" + } + ] + }, + { + "name": "Pow", + "module": "ai.onnx", + "version": 7, + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "First operand, base of the exponent." + }, + { + "name": "Y", + "type": "T", + "description": "Second operand, power of the exponent." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "pow", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_example\")\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow\")" + }, + { + "summary": "pow_broadcast", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_scalar\")\n\nnode = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_array\")" + }, + { + "summary": "types", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int32\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint64\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint32\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_int32\")" + } + ] + }, + { + "name": "Pow", + "module": "ai.onnx", + "version": 12, + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "First operand, base of the exponent." + }, + { + "name": "Y", + "type": "T1", + "description": "Second operand, power of the exponent." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input X and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain input Y types to float/int tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "pow", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_example\")\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow\")" + }, + { + "summary": "pow_broadcast", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_scalar\")\n\nnode = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_array\")" + }, + { + "summary": "types", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int32\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint64\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint32\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_int32\")" + } + ] + }, + { + "name": "Pow", + "module": "ai.onnx", + "version": 13, + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "First operand, base of the exponent." + }, + { + "name": "Y", + "type": "T1", + "description": "Second operand, power of the exponent." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input X and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain input Y types to float/int tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "pow", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_example\")\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow\")" + }, + { + "summary": "pow_broadcast", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_scalar\")\n\nnode = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_array\")" + }, + { + "summary": "types", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int32\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint64\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint32\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_int32\")" + } + ] + }, + { + "name": "Pow", + "module": "ai.onnx", + "version": 15, + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "First operand, base of the exponent." + }, + { + "name": "Y", + "type": "T1", + "description": "Second operand, power of the exponent." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input X and output types to float/int tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain input Y types to float/int tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "pow", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_example\")\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow\")" + }, + { + "summary": "pow_broadcast", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_scalar\")\n\nnode = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_bcast_array\")" + }, + { + "summary": "types", + "code": "node = onnx.helper.make_node(\n \"Pow\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_int32\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_float32\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint64\")\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_float32_uint32\")\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int64_int64\")\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_pow_types_int32_int32\")" + } + ] + }, + { + "name": "QLinearConv", + "module": "ai.onnx", + "version": 10, + "description": "The convolution operator consumes a quantized input tensor, its scale and zero point,\na quantized filter, its scale and zero point, and output's scale and zero point,\nand computes the quantized output. Each scale and zero-point pair must have same shape.\nIt means they must be either scalars (per tensor) or 1-D tensors (per output channel).\nEach input or output and its related zero point must have same type.\nWhen bias is present it must be quantized using scale = input scale * weight scale and\nzero point as 0.\n", + "attributes": [ + { + "name": "auto_pad", + "type": "string", + "required": false, + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER." + }, + { + "name": "dilations", + "type": "int64[]", + "required": false, + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis." + }, + { + "name": "group", + "type": "int64", + "required": false, + "default": 1, + "description": "number of groups input channels and output channels are divided into. default is 1." + }, + { + "name": "kernel_shape", + "type": "int64[]", + "required": false, + "description": "The shape of the convolution kernel. If not present, should be inferred from input 'w'." + }, + { + "name": "pads", + "type": "int64[]", + "required": false, + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0.The value represent the number of pixels added to the beginning and end part of the corresponding axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number ofpixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaultsto 0 along start and end of each spatial axis." + }, + { + "name": "strides", + "type": "int64[]", + "required": false, + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]." + }, + { + "name": "x_scale", + "type": "tensor(float)", + "description": "Scale tensor for input 'x'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "x_zero_point", + "type": "T1", + "description": "Zero point tensor for input 'x'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "w", + "type": "T2", + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. " + }, + { + "name": "w_scale", + "type": "tensor(float)", + "description": "Scale tensor for input 'w'. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M)." + }, + { + "name": "w_zero_point", + "type": "T2", + "description": "Zero point tensor for input 'w'. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M)." + }, + { + "name": "y_scale", + "type": "tensor(float)", + "description": "Scale tensor for output 'y'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "y_zero_point", + "type": "T3", + "description": "Zero point tensor for output 'y'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "B", + "type": "T4", + "option": "optional", + "description": "Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0" + } + ], + "min_input": 8, + "max_input": 9, + "outputs": [ + { + "name": "y", + "type": "T3", + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "8 - 9", + "type_constraints": [ + { + "description": "Constrain input type to 8-bit integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain filter type to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain output type to 8-bit integer tensor.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain bias type to 32-bit integer tensor.", + "type_param_str": "T4", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "qlinearconv", + "code": "node = onnx.helper.make_node(\n \"QLinearConv\",\n inputs=[\n \"x\",\n \"x_scale\",\n \"x_zero_point\",\n \"w\",\n \"w_scale\",\n \"w_zero_point\",\n \"y_scale\",\n \"y_zero_point\",\n ],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [255, 174, 162, 25, 203, 168, 58],\n [15, 59, 237, 95, 129, 0, 64],\n [56, 242, 153, 221, 168, 12, 166],\n [232, 178, 186, 195, 237, 162, 237],\n [188, 39, 124, 77, 80, 102, 43],\n [127, 230, 21, 83, 41, 40, 134],\n [255, 154, 92, 141, 42, 148, 247],\n ],\n dtype=np.uint8,\n).reshape((1, 1, 7, 7))\n\nx_scale = np.float32(0.00369204697)\nx_zero_point = np.uint8(132)\n\nw = np.array([0], dtype=np.uint8).reshape((1, 1, 1, 1))\n\nw_scale = np.array([0.00172794575], dtype=np.float32)\nw_zero_point = np.array([255], dtype=np.uint8)\n\ny_scale = np.float32(0.00162681262)\ny_zero_point = np.uint8(123)\n\noutput = np.array(\n [\n [0, 81, 93, 230, 52, 87, 197],\n [240, 196, 18, 160, 126, 255, 191],\n [199, 13, 102, 34, 87, 243, 89],\n [23, 77, 69, 60, 18, 93, 18],\n [67, 216, 131, 178, 175, 153, 212],\n [128, 25, 234, 172, 214, 215, 121],\n [0, 101, 163, 114, 213, 107, 8],\n ],\n dtype=np.uint8,\n).reshape((1, 1, 7, 7))\n\nexpect(\n node,\n inputs=[\n x,\n x_scale,\n x_zero_point,\n w,\n w_scale,\n w_zero_point,\n y_scale,\n y_zero_point,\n ],\n outputs=[output],\n name=\"test_qlinearconv\",\n)" + } + ] + }, + { + "name": "QLinearMatMul", + "module": "ai.onnx", + "version": 10, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nIt consumes two quantized input tensors, their scales and zero points, scale and zero point of output,\nand computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).\nFor (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\nScale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor\n(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row\nor per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be\nan M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]\nfor per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may\nhave shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.\nProduction must never overflow, and accumulation may overflow if and only if in 32 bits.\n", + "inputs": [ + { + "name": "a", + "type": "T1", + "description": "N-dimensional quantized matrix a" + }, + { + "name": "a_scale", + "type": "tensor(float)", + "description": "scale of quantized input a" + }, + { + "name": "a_zero_point", + "type": "T1", + "description": "zero point of quantized input a" + }, + { + "name": "b", + "type": "T2", + "description": "N-dimensional quantized matrix b" + }, + { + "name": "b_scale", + "type": "tensor(float)", + "description": "scale of quantized input b" + }, + { + "name": "b_zero_point", + "type": "T2", + "description": "zero point of quantized input b" + }, + { + "name": "y_scale", + "type": "tensor(float)", + "description": "scale of quantized output y" + }, + { + "name": "y_zero_point", + "type": "T3", + "description": "zero point of quantized output y" + } + ], + "min_input": 8, + "max_input": 8, + "outputs": [ + { + "name": "y", + "type": "T3", + "description": "Quantized matrix multiply results from a * b" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input a and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain input b and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + }, + { + "description": "Constrain output y and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + } + ], + "examples": [ + { + "summary": "int", + "code": "for quant_type_name in [\"uint8\", \"int8\"]:\n quant_type = getattr(np, quant_type_name)\n for dtype_name in [\"float32\", \"float16\"]:\n dtype = getattr(np, dtype_name)\n node = onnx.helper.make_node(\n \"QLinearMatMul\",\n inputs=[\n \"a\",\n \"a_scale\",\n \"a_zero_point\",\n \"b\",\n \"b_scale\",\n \"b_zero_point\",\n \"y_scale\",\n \"y_zero_point\",\n ],\n outputs=[\"y\"],\n )\n\n # 2D\n a = np.array([[208, 236, 0, 238], [3, 214, 255, 29]])\n if quant_type == np.int8:\n a -= 127\n a = a.astype(quant_type)\n\n a_scale = np.array([0.0066], dtype=dtype)\n a_zero_point = np.array(\n [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type\n )\n\n b = np.array(\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]]\n )\n if quant_type == np.int8:\n b -= 127\n b = b.astype(quant_type)\n\n b_scale = np.array([0.00705], dtype=dtype)\n b_zero_point = np.array(\n [114 - 127] if quant_type == np.int8 else [114], dtype=quant_type\n )\n\n y_scale = np.array([0.0107], dtype=dtype)\n y_zero_point = np.array(\n [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type\n )\n\n if quant_type == np.int8:\n output = np.array([[41, -12, -9], [1, -75, 20]])\n else:\n output = np.array([[168, 115, 255], [1, 66, 151]])\n output = output.astype(quant_type)\n\n expect(\n node,\n inputs=[\n a,\n a_scale,\n a_zero_point,\n b,\n b_scale,\n b_zero_point,\n y_scale,\n y_zero_point,\n ],\n outputs=[output],\n name=f\"test_qlinearmatmul_2D_{quant_type_name}_{dtype_name}\",\n )\n\n # 3D\n a = np.array(\n [\n [[208, 236, 0, 238], [3, 214, 255, 29]],\n [[208, 236, 0, 238], [3, 214, 255, 29]],\n ],\n )\n if quant_type == np.int8:\n a -= 127\n a = a.astype(quant_type)\n\n a_scale = np.array([0.0066], dtype=dtype)\n a_zero_point = np.array(\n [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type\n )\n\n b = np.array(\n [\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],\n ],\n )\n if quant_type == np.int8:\n b -= 127\n b = b.astype(quant_type)\n\n b_scale = np.array([0.00705], dtype=dtype)\n b_zero_point = np.array([114], dtype=quant_type)\n\n y_scale = np.array([0.0107], dtype=dtype)\n y_zero_point = np.array(\n [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type\n )\n\n if quant_type == np.int8:\n if dtype == np.float32:\n output = np.array(\n [\n [[-86, 117, 120], [115, 39, -121]],\n [[-86, 117, 120], [115, 39, -121]],\n ]\n )\n else:\n output = np.array(\n [\n [[-86, 116, 119], [115, 39, -121]],\n [[-86, 116, 119], [115, 39, -121]],\n ]\n )\n else:\n output = np.array(\n [\n [[168, 115, 255], [1, 66, 151]],\n [[168, 115, 255], [1, 66, 151]],\n ]\n )\n output = output.astype(quant_type)\n\n expect(\n node,\n inputs=[\n a,\n a_scale,\n a_zero_point,\n b,\n b_scale,\n b_zero_point,\n y_scale,\n y_zero_point,\n ],\n outputs=[output],\n name=f\"test_qlinearmatmul_3D_{quant_type_name}_{dtype_name}\",\n )" + } + ] + }, + { + "name": "QLinearMatMul", + "module": "ai.onnx", + "version": 21, + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nIt consumes two quantized input tensors, their scales and zero points, scale and zero point of output,\nand computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).\nFor (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\nScale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor\n(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row\nor per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be\nan M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]\nfor per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may\nhave shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.\nProduction must never overflow, and accumulation may overflow if and only if in 32 bits.\n", + "inputs": [ + { + "name": "a", + "type": "T1", + "description": "N-dimensional quantized matrix a" + }, + { + "name": "a_scale", + "type": "TS", + "description": "scale of quantized input a" + }, + { + "name": "a_zero_point", + "type": "T1", + "description": "zero point of quantized input a" + }, + { + "name": "b", + "type": "T2", + "description": "N-dimensional quantized matrix b" + }, + { + "name": "b_scale", + "type": "TS", + "description": "scale of quantized input b" + }, + { + "name": "b_zero_point", + "type": "T2", + "description": "zero point of quantized input b" + }, + { + "name": "y_scale", + "type": "TS", + "description": "scale of quantized output y" + }, + { + "name": "y_zero_point", + "type": "T3", + "description": "zero point of quantized output y" + } + ], + "min_input": 8, + "max_input": 8, + "outputs": [ + { + "name": "y", + "type": "T3", + "description": "Quantized matrix multiply results from a * b" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain scales.", + "type_param_str": "TS", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(bfloat16)" + ] + }, + { + "description": "The type of input a and its zeropoint.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "The type of input b and its zeropoint.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "The type of the output and its zeropoint.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "int", + "code": "for quant_type_name in [\"uint8\", \"int8\"]:\n quant_type = getattr(np, quant_type_name)\n for dtype_name in [\"float32\", \"float16\"]:\n dtype = getattr(np, dtype_name)\n node = onnx.helper.make_node(\n \"QLinearMatMul\",\n inputs=[\n \"a\",\n \"a_scale\",\n \"a_zero_point\",\n \"b\",\n \"b_scale\",\n \"b_zero_point\",\n \"y_scale\",\n \"y_zero_point\",\n ],\n outputs=[\"y\"],\n )\n\n # 2D\n a = np.array([[208, 236, 0, 238], [3, 214, 255, 29]])\n if quant_type == np.int8:\n a -= 127\n a = a.astype(quant_type)\n\n a_scale = np.array([0.0066], dtype=dtype)\n a_zero_point = np.array(\n [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type\n )\n\n b = np.array(\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]]\n )\n if quant_type == np.int8:\n b -= 127\n b = b.astype(quant_type)\n\n b_scale = np.array([0.00705], dtype=dtype)\n b_zero_point = np.array(\n [114 - 127] if quant_type == np.int8 else [114], dtype=quant_type\n )\n\n y_scale = np.array([0.0107], dtype=dtype)\n y_zero_point = np.array(\n [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type\n )\n\n if quant_type == np.int8:\n output = np.array([[41, -12, -9], [1, -75, 20]])\n else:\n output = np.array([[168, 115, 255], [1, 66, 151]])\n output = output.astype(quant_type)\n\n expect(\n node,\n inputs=[\n a,\n a_scale,\n a_zero_point,\n b,\n b_scale,\n b_zero_point,\n y_scale,\n y_zero_point,\n ],\n outputs=[output],\n name=f\"test_qlinearmatmul_2D_{quant_type_name}_{dtype_name}\",\n )\n\n # 3D\n a = np.array(\n [\n [[208, 236, 0, 238], [3, 214, 255, 29]],\n [[208, 236, 0, 238], [3, 214, 255, 29]],\n ],\n )\n if quant_type == np.int8:\n a -= 127\n a = a.astype(quant_type)\n\n a_scale = np.array([0.0066], dtype=dtype)\n a_zero_point = np.array(\n [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type\n )\n\n b = np.array(\n [\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],\n [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],\n ],\n )\n if quant_type == np.int8:\n b -= 127\n b = b.astype(quant_type)\n\n b_scale = np.array([0.00705], dtype=dtype)\n b_zero_point = np.array([114], dtype=quant_type)\n\n y_scale = np.array([0.0107], dtype=dtype)\n y_zero_point = np.array(\n [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type\n )\n\n if quant_type == np.int8:\n if dtype == np.float32:\n output = np.array(\n [\n [[-86, 117, 120], [115, 39, -121]],\n [[-86, 117, 120], [115, 39, -121]],\n ]\n )\n else:\n output = np.array(\n [\n [[-86, 116, 119], [115, 39, -121]],\n [[-86, 116, 119], [115, 39, -121]],\n ]\n )\n else:\n output = np.array(\n [\n [[168, 115, 255], [1, 66, 151]],\n [[168, 115, 255], [1, 66, 151]],\n ]\n )\n output = output.astype(quant_type)\n\n expect(\n node,\n inputs=[\n a,\n a_scale,\n a_zero_point,\n b,\n b_scale,\n b_zero_point,\n y_scale,\n y_zero_point,\n ],\n outputs=[output],\n name=f\"test_qlinearmatmul_3D_{quant_type_name}_{dtype_name}\",\n )" + } + ] + }, + { + "name": "QuantizeLinear", + "module": "ai.onnx", + "version": 10, + "description": "The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor.\nThe quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.\n", + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D full precision Input tensor to be quantized." + }, + { + "name": "y_scale", + "type": "tensor(float)", + "description": "Scale for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization." + }, + { + "name": "y_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization. Default value is uint8 typed 0 if it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D quantized output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x' to float or int32 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)" + ] + }, + { + "description": "Constrain 'y_zero_point' and 'y' to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [\n [[-162, 10], [-100, 232], [-20, -50]],\n [[-76, 0], [0, 252], [32, -44]],\n [[245, -485], [-960, -270], [-375, -470]],\n ],\n ],\n dtype=np.float32,\n)\ny_scale = np.array([2, 4, 5], dtype=np.float32)\ny_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (x / y_scale.reshape(1, 3, 1, 1) + y_zero_point.reshape(1, 3, 1, 1)).astype(\n np.uint8\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_axis\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E5M2, [1], [0.0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -514.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65022.0,\n -66046.0,\n 65023.0,\n -66047.0,\n 65024.0,\n -66048.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.int16(256)\ny = np.array(\n [\n 256,\n -1,\n 258,\n 254,\n 257,\n 255,\n 258,\n 254,\n 32767,\n -32767,\n 32767,\n -32768,\n 32767,\n -32768,\n 32767,\n -32768,\n ]\n).astype(np.int16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int4\",\n)" + }, + { + "summary": "quantizelinear", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = np.uint8(128)\ny = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -128.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65536.0,\n -65534.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.uint16(32767)\ny = np.array(\n [\n 32767,\n 32703,\n 32769,\n 32765,\n 32768,\n 32766,\n 32769,\n 32765,\n 65535,\n 0,\n 65535,\n 0,\n ]\n).astype(np.uint16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "QuantizeLinear", + "module": "ai.onnx", + "version": 13, + "description": "The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor.\nThe scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\nThe quantization formula is y = saturate ((x / y_scale) + y_zero_point).\nFor saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D full precision Input tensor to be quantized." + }, + { + "name": "y_scale", + "type": "tensor(float)", + "description": "Scale for doing quantization to get 'y'. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization." + }, + { + "name": "y_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point for doing quantization to get 'y'. Shape must match y_scale. Default is uint8 with zero point of 0 if it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D quantized output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x' to float or int32 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)" + ] + }, + { + "description": "Constrain 'y_zero_point' and 'y' to 8-bit integer tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [\n [[-162, 10], [-100, 232], [-20, -50]],\n [[-76, 0], [0, 252], [32, -44]],\n [[245, -485], [-960, -270], [-375, -470]],\n ],\n ],\n dtype=np.float32,\n)\ny_scale = np.array([2, 4, 5], dtype=np.float32)\ny_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (x / y_scale.reshape(1, 3, 1, 1) + y_zero_point.reshape(1, 3, 1, 1)).astype(\n np.uint8\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_axis\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E5M2, [1], [0.0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -514.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65022.0,\n -66046.0,\n 65023.0,\n -66047.0,\n 65024.0,\n -66048.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.int16(256)\ny = np.array(\n [\n 256,\n -1,\n 258,\n 254,\n 257,\n 255,\n 258,\n 254,\n 32767,\n -32767,\n 32767,\n -32768,\n 32767,\n -32768,\n 32767,\n -32768,\n ]\n).astype(np.int16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int4\",\n)" + }, + { + "summary": "quantizelinear", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = np.uint8(128)\ny = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -128.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65536.0,\n -65534.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.uint16(32767)\ny = np.array(\n [\n 32767,\n 32703,\n 32769,\n 32765,\n 32768,\n 32766,\n 32769,\n 32765,\n 65535,\n 0,\n 65535,\n 0,\n ]\n).astype(np.uint16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "QuantizeLinear", + "module": "ai.onnx", + "version": 19, + "description": "The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor.\nThe scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\nThe quantization formula is `y = saturate ((x / y_scale) + y_zero_point)`.\nFor saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\n'y_zero_point' and 'y' must have same type.\n'y_zero_point' is usually not used for quantization to float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz,\nbut the quantization formula remains the same for consistency and\nthe type of the attribute 'y_zero_point' still determines the quantization type.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. All cases are fully described in two tables inserted in the operator description." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D full precision Input tensor to be quantized." + }, + { + "name": "y_scale", + "type": "T1", + "description": "Scale for doing quantization to get 'y'. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization." + }, + { + "name": "y_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point for doing quantization to get 'y'. Shape must match y_scale. Default is uint8 with zero point of 0 if it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D quantized output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain 'x' to float, float16, bfloat16 or int32 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(bfloat16)", + "tensor(int32)" + ] + }, + { + "description": "Constrain 'y_zero_point' and 'y' to 8-bit integer/float tensor.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [\n [[-162, 10], [-100, 232], [-20, -50]],\n [[-76, 0], [0, 252], [32, -44]],\n [[245, -485], [-960, -270], [-375, -470]],\n ],\n ],\n dtype=np.float32,\n)\ny_scale = np.array([2, 4, 5], dtype=np.float32)\ny_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (x / y_scale.reshape(1, 3, 1, 1) + y_zero_point.reshape(1, 3, 1, 1)).astype(\n np.uint8\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_axis\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E5M2, [1], [0.0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -514.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65022.0,\n -66046.0,\n 65023.0,\n -66047.0,\n 65024.0,\n -66048.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.int16(256)\ny = np.array(\n [\n 256,\n -1,\n 258,\n 254,\n 257,\n 255,\n 258,\n 254,\n 32767,\n -32767,\n 32767,\n -32768,\n 32767,\n -32768,\n 32767,\n -32768,\n ]\n).astype(np.int16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int4\",\n)" + }, + { + "summary": "quantizelinear", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = np.uint8(128)\ny = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -128.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65536.0,\n -65534.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.uint16(32767)\ny = np.array(\n [\n 32767,\n 32703,\n 32769,\n 32765,\n 32768,\n 32766,\n 32769,\n 32765,\n 65535,\n 0,\n 65535,\n 0,\n ]\n).astype(np.uint16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "QuantizeLinear", + "module": "ai.onnx", + "version": 21, + "description": "The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor.\nThe scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\nThe quantization formula is `y = saturate ((x / y_scale) + y_zero_point)`.\nFor saturation, it saturates according to:\nuint8: [0, 255], int8: [-128, 127], uint16: [0, 65535], int16: [-32768, 32767], uint4: [0, 15], int4: [-8, 7]\nFor (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details.\n'y_zero_point' and 'y' must have same type.\n'y_zero_point' is usually not used for quantization to float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz,\nbut the quantization formula remains the same for consistency and\nthe type of the attribute 'y_zero_point' still determines the quantization type.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "saturate", + "type": "int64", + "required": false, + "default": 1, + "description": "The parameter defines how the conversion behaves if an input value is out of range of the destination type. It only applies for float 8 quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. All cases are fully described in two tables inserted in the operator description." + } + ], + "inputs": [ + { + "name": "x", + "type": "T1", + "description": "N-D full precision Input tensor to be quantized." + }, + { + "name": "y_scale", + "type": "T1", + "description": "Scale for doing quantization to get 'y'. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization." + }, + { + "name": "y_zero_point", + "type": "T2", + "option": "optional", + "description": "Zero point for doing quantization to get 'y'. Shape must match y_scale. Default is uint8 with zero point of 0 if it's not specified." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "y", + "type": "T2", + "description": "N-D quantized output tensor. It has same shape as input 'x'." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "The type of the input 'x'.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(bfloat16)", + "tensor(int32)" + ] + }, + { + "description": "The type of the input 'y_zero_point' and the output 'y'.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int16)", + "tensor(uint16)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "axis", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [\n [[-162, 10], [-100, 232], [-20, -50]],\n [[-76, 0], [0, 252], [32, -44]],\n [[245, -485], [-960, -270], [-375, -470]],\n ],\n ],\n dtype=np.float32,\n)\ny_scale = np.array([2, 4, 5], dtype=np.float32)\ny_zero_point = np.array([84, 24, 196], dtype=np.uint8)\ny = (x / y_scale.reshape(1, 3, 1, 1) + y_zero_point.reshape(1, 3, 1, 1)).astype(\n np.uint8\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_axis\",\n)" + }, + { + "summary": "e4m3fn", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E4M3FN, [1], [0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e4m3fn\",\n)" + }, + { + "summary": "e5m2", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = make_tensor(\"zero_point\", TensorProto.FLOAT8E5M2, [1], [0.0])\ny = make_tensor(\"y\", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_e5m2\",\n)" + }, + { + "summary": "int16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -514.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65022.0,\n -66046.0,\n 65023.0,\n -66047.0,\n 65024.0,\n -66048.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.int16(256)\ny = np.array(\n [\n 256,\n -1,\n 258,\n 254,\n 257,\n 255,\n 258,\n 254,\n 32767,\n -32767,\n 32767,\n -32768,\n 32767,\n -32768,\n 32767,\n -32768,\n ]\n).astype(np.int16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int16\",\n)" + }, + { + "summary": "int4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_int4\",\n)" + }, + { + "summary": "quantizelinear", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = np.uint8(128)\ny = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear\",\n)" + }, + { + "summary": "uint16", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.0,\n -128.0,\n 3.0,\n -3.0,\n 2.9,\n -2.9,\n 3.1,\n -3.1,\n 65536.0,\n -65534.0,\n 70000.0,\n -70000.0,\n ]\n).astype(np.float32)\ny_scale = np.float32(2.0)\ny_zero_point = np.uint16(32767)\ny = np.array(\n [\n 32767,\n 32703,\n 32769,\n 32765,\n 32768,\n 32766,\n 32769,\n 32765,\n 65535,\n 0,\n 65535,\n 0,\n ]\n).astype(np.uint16)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint16\",\n)" + }, + { + "summary": "uint4", + "code": "node = onnx.helper.make_node(\n \"QuantizeLinear\",\n inputs=[\"x\", \"y_scale\", \"y_zero_point\"],\n outputs=[\"y\"],\n axis=0,\n)\n\nx = np.array(\n [\n [0.0, 2.5, 4.8, 8.6],\n [-30, -20, 6, 9],\n [12, 15, 16, 40],\n ]\n).astype(np.float32)\n\ny_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)\ny_zero_point = make_tensor(\n \"zero_point\", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)\n)\ny = make_tensor(\n \"y\", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]\n)\n\nexpect(\n node,\n inputs=[x, y_scale, y_zero_point],\n outputs=[y],\n name=\"test_quantizelinear_uint4\",\n)" + } + ] + }, + { + "name": "RNN", + "module": "ai.onnx", + "version": 1, + "description": "Computes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T) + Ht-1*Ri + Wbi + Rbi)\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "output_sequence", + "type": "int64", + "required": false, + "description": "The sequence output for the hidden is optional if 0. Default 0." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0." + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.5\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_simple_rnn_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 5\ncustom_bias = 0.1\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)\nR_B = np.zeros((1, hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, hidden_size).astype(np.float32)\nR_B = np.random.randn(1, hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_rnn_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "RNN", + "module": "ai.onnx", + "version": 7, + "description": "Computes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.5\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_simple_rnn_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 5\ncustom_bias = 0.1\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)\nR_B = np.zeros((1, hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, hidden_size).astype(np.float32)\nR_B = np.random.randn(1, hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_rnn_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "RNN", + "module": "ai.onnx", + "version": 14, + "description": "Computes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `i` - input gate\n* `t` - time step (t-1 means previous time step)\n* `Wi` - W parameter weight matrix for input gate\n* `Ri` - R recurrence weight matrix for input gate\n* `Wbi` - W parameter bias vector for input gate\n* `Rbi` - R parameter bias vector for input gate\n* `WBi` - W parameter weight matrix for backward input gate\n* `RBi` - R recurrence weight matrix for backward input gate\n* `WBbi` - WR bias vectors for backward input gate\n* `RBbi` - RR bias vectors for backward input gate\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE: Below are optional\n\n* Affine(x) - alpha*x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha*Tanh(beta*x)\n* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n* Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "attributes": [ + { + "name": "activation_alpha", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01." + }, + { + "name": "activation_beta", + "type": "float32[]", + "required": false, + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators." + }, + { + "name": "activations", + "type": "string[]", + "required": false, + "description": "One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified." + }, + { + "name": "clip", + "type": "float32", + "required": false, + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified." + }, + { + "name": "direction", + "type": "string", + "required": false, + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional." + }, + { + "name": "hidden_size", + "type": "int64", + "required": false, + "description": "Number of neurons in the hidden layer" + }, + { + "name": "layout", + "type": "int64", + "required": false, + "description": "The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`." + }, + { + "name": "W", + "type": "T", + "description": "The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`." + }, + { + "name": "R", + "type": "T", + "description": "The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`." + }, + { + "name": "B", + "type": "T", + "option": "optional", + "description": "The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0." + }, + { + "name": "sequence_lens", + "type": "T1", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`." + }, + { + "name": "initial_h", + "type": "T", + "option": "optional", + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_input": 3, + "max_input": 6, + "outputs": [ + { + "name": "Y", + "type": "T", + "option": "optional", + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. " + }, + { + "name": "Y_h", + "type": "T", + "option": "optional", + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`." + } + ], + "min_output": 0, + "max_output": 2, + "inputs_range": "3 - 6", + "outputs_range": "0 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int32)" + ] + } + ], + "examples": [ + { + "summary": "batchwise", + "code": "input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.5\nlayout = 1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\"],\n outputs=[\"Y\", \"Y_h\"],\n hidden_size=hidden_size,\n layout=layout,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R, layout=layout)\nY, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],\n name=\"test_simple_rnn_batchwise\",\n)" + }, + { + "summary": "defaults", + "code": "input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\", inputs=[\"X\", \"W\", \"R\"], outputs=[\"\", \"Y_h\"], hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNNHelper(X=input, W=W, R=R)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_defaults\",\n)" + }, + { + "summary": "initial_bias", + "code": "input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(\n np.float32\n)\n\ninput_size = 3\nhidden_size = 5\ncustom_bias = 0.1\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)\nR_B = np.zeros((1, hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_simple_rnn_with_initial_bias\",\n)" + }, + { + "summary": "seq_length", + "code": "input = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],\n ]\n).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\n\nnode = onnx.helper.make_node(\n \"RNN\",\n inputs=[\"X\", \"W\", \"R\", \"B\"],\n outputs=[\"\", \"Y_h\"],\n hidden_size=hidden_size,\n)\n\nW = np.random.randn(1, hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, hidden_size).astype(np.float32)\nR_B = np.random.randn(1, hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNNHelper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(\n node,\n inputs=[input, W, R, B],\n outputs=[Y_h.astype(np.float32)],\n name=\"test_rnn_seq_length\",\n)" + } + ], + "category": "Layer" + }, + { + "name": "RandomNormal", + "module": "ai.onnx", + "version": 1, + "description": "Generate a tensor with random values drawn from a normal distribution. The shape\nof the tensor is specified by the `shape` argument and the parameter of the normal distribution\nspecified by `mean` and `scale`.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message.\n", + "attributes": [ + { + "name": "dtype", + "type": "DataType", + "required": false, + "default": 1, + "description": "The data type for the elements of the output tensor. Default is TensorProto::FLOAT." + }, + { + "name": "mean", + "type": "float32", + "required": false, + "description": "The mean of the normal distribution." + }, + { + "name": "scale", + "type": "float32", + "required": false, + "default": 1.0, + "description": "The standard deviation of the normal distribution." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + }, + { + "name": "shape", + "type": "int64[]", + "required": true, + "description": "The shape of the output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of random values drawn from normal distribution" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "RandomNormalLike", + "module": "ai.onnx", + "version": 1, + "description": "Generate a tensor with random values drawn from a normal distribution.\nThe shape of the output tensor is copied from the shape of the input tensor,\nand the parameters of the normal distribution are specified by `mean` and `scale`.\n\nThe data type is specified by the 'dtype' argument, or copied from the input tensor if not provided.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message, and be valid as an output type.\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor." + }, + { + "name": "mean", + "type": "float32", + "required": false, + "description": "The mean of the normal distribution." + }, + { + "name": "scale", + "type": "float32", + "required": false, + "default": 1.0, + "description": "The standard deviation of the normal distribution." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to copy shape and optionally type information from." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor of random values drawn from normal distribution" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output types to float tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "RandomUniform", + "module": "ai.onnx", + "version": 1, + "description": "Generate a tensor with random values drawn from a uniform distribution. The shape\nof the tensor is specified by the `shape` argument and the range by `low` and `high`.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message.\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "default": 1, + "description": "The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT." + }, + { + "name": "high", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Upper boundary of the output values." + }, + { + "name": "low", + "type": "float32", + "required": false, + "description": "Lower boundary of the output values." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + }, + { + "name": "shape", + "type": "int64[]", + "required": true, + "description": "The shape of the output tensor." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of random values drawn from uniform distribution" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "RandomUniformLike", + "module": "ai.onnx", + "version": 1, + "description": "Generate a tensor with random values drawn from a uniform distribution.\nThe shape of the output tensor is copied from the shape of the input tensor,\nand the parameters of the uniform distribution are specified by `low` and `high`.\n\nThe data type is specified by the 'dtype' argument, or copied from the input tensor if not provided.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message and be valid as an output type.\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor." + }, + { + "name": "high", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Upper boundary of the output values." + }, + { + "name": "low", + "type": "float32", + "required": false, + "description": "Lower boundary of the output values." + }, + { + "name": "seed", + "type": "float32", + "required": false, + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one." + } + ], + "inputs": [ + { + "name": "input", + "type": "T1", + "description": "Input tensor to copy shape and optionally type information from." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T2", + "description": "Output tensor of random values drawn from uniform distribution" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output types to float tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ] + }, + { + "name": "Range", + "module": "ai.onnx", + "version": 11, + "description": "Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta`\nup to `limit` (exclusive).\n\nThe number of elements in the output of range is computed as below:\n\n```\nnumber_of_elements = max( ceil( (limit - start) / delta ) , 0 )\n```\n\nThe pseudocode determining the contents of the output is shown below:\n\n```\nfor(int i=0; i) and produces one output data\n(Tensor) where the reciprocal is, y = 1/x, is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "reciprocal", + "code": "node = onnx.helper.make_node(\n \"Reciprocal\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.reciprocal(x) # expected output [-0.25, 0.5],\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal_example\")\n\nx = np.random.rand(3, 4, 5).astype(np.float32) + 0.5\ny = np.reciprocal(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal\")" + } + ] + }, + { + "name": "Reciprocal", + "module": "ai.onnx", + "version": 6, + "description": "Reciprocal takes one input data (Tensor) and produces one output data\n(Tensor) where the reciprocal is, y = 1/x, is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "reciprocal", + "code": "node = onnx.helper.make_node(\n \"Reciprocal\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.reciprocal(x) # expected output [-0.25, 0.5],\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal_example\")\n\nx = np.random.rand(3, 4, 5).astype(np.float32) + 0.5\ny = np.reciprocal(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal\")" + } + ] + }, + { + "name": "Reciprocal", + "module": "ai.onnx", + "version": 13, + "description": "Reciprocal takes one input data (Tensor) and produces one output data\n(Tensor) where the reciprocal is, y = 1/x, is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "reciprocal", + "code": "node = onnx.helper.make_node(\n \"Reciprocal\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.reciprocal(x) # expected output [-0.25, 0.5],\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal_example\")\n\nx = np.random.rand(3, 4, 5).astype(np.float32) + 0.5\ny = np.reciprocal(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_reciprocal\")" + } + ] + }, + { + "name": "ReduceL1", + "module": "ai.onnx", + "version": 1, + "description": "Computes the L1 norm of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL1", + "module": "ai.onnx", + "version": 11, + "description": "Computes the L1 norm of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL1", + "module": "ai.onnx", + "version": 13, + "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL1", + "module": "ai.onnx", + "version": 18, + "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL1\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l1_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL2", + "module": "ai.onnx", + "version": 1, + "description": "Computes the L2 norm of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[25.49509757]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL2", + "module": "ai.onnx", + "version": 11, + "description": "Computes the L2 norm of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[25.49509757]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL2", + "module": "ai.onnx", + "version": 13, + "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[25.49509757]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceL2", + "module": "ai.onnx", + "version": 18, + "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[25.49509757]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_keep_dims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceL2\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n# print(reduced)\n# [[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(\n np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_l2_negative_axes_keep_dims_random\",\n)" + } + ] + }, + { + "name": "ReduceLogSum", + "module": "ai.onnx", + "version": 1, + "description": "Computes the log sum of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "node = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\naxes = np.array([], dtype=np.int64)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_default\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "axes = np.array([-2], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=True))\n# print(reduced)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_negative_axes\",\n)" + }, + { + "summary": "nokeepdims", + "code": "shape = [3, 4, 5]\naxes = np.array([2, 1], dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_desc_axes\",\n)\n\naxes = np.array([0, 1], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_asc_axes\",\n)" + } + ] + }, + { + "name": "ReduceLogSum", + "module": "ai.onnx", + "version": 11, + "description": "Computes the log sum of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "node = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\naxes = np.array([], dtype=np.int64)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_default\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "axes = np.array([-2], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=True))\n# print(reduced)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_negative_axes\",\n)" + }, + { + "summary": "nokeepdims", + "code": "shape = [3, 4, 5]\naxes = np.array([2, 1], dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_desc_axes\",\n)\n\naxes = np.array([0, 1], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_asc_axes\",\n)" + } + ] + }, + { + "name": "ReduceLogSum", + "module": "ai.onnx", + "version": 13, + "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "node = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\naxes = np.array([], dtype=np.int64)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_default\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "axes = np.array([-2], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=True))\n# print(reduced)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_negative_axes\",\n)" + }, + { + "summary": "nokeepdims", + "code": "shape = [3, 4, 5]\naxes = np.array([2, 1], dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_desc_axes\",\n)\n\naxes = np.array([0, 1], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_asc_axes\",\n)" + } + ] + }, + { + "name": "ReduceLogSum", + "module": "ai.onnx", + "version": 18, + "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "node = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\naxes = np.array([], dtype=np.int64)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_default\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "axes = np.array([-2], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=True))\n# print(reduced)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_negative_axes\",\n)" + }, + { + "summary": "nokeepdims", + "code": "shape = [3, 4, 5]\naxes = np.array([2, 1], dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_desc_axes\",\n)\n\naxes = np.array([0, 1], dtype=np.int64)\nnode = onnx.helper.make_node(\n \"ReduceLogSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=0,\n)\ndata = np.random.ranf(shape).astype(np.float32)\nreduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_asc_axes\",\n)" + } + ] + }, + { + "name": "ReduceLogSumExp", + "module": "ai.onnx", + "version": 1, + "description": "Computes the log sum exponent of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(\n np.sum(np.exp(data), axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceLogSumExp", + "module": "ai.onnx", + "version": 11, + "description": "Computes the log sum exponent of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(\n np.sum(np.exp(data), axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceLogSumExp", + "module": "ai.onnx", + "version": 13, + "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(\n np.sum(np.exp(data), axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceLogSumExp", + "module": "ai.onnx", + "version": 18, + "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = np.log(zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceLogSumExp\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double\n)\nreduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.double)\nreduced = np.log(\n np.sum(np.exp(data), axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_log_sum_exp_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 1, + "description": "Computes the max of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 11, + "description": "Computes the max of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 12, + "description": "Computes the max of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 13, + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 18, + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 20, + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise.\n\n\nIf the input data type is Boolean, the comparison should consider `False < True`.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric and Boolean tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = -(one / zero) # -inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMean", + "module": "ai.onnx", + "version": 1, + "description": "Computes the mean of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields undefined.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[18.25]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_random\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceMean", + "module": "ai.onnx", + "version": 11, + "description": "Computes the mean of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[18.25]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_random\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceMean", + "module": "ai.onnx", + "version": 13, + "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields undefined.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[18.25]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_random\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceMean", + "module": "ai.onnx", + "version": 18, + "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields undefined.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[18.25]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_do_not_keepdims_random\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMean\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_mean_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 1, + "description": "Computes the min of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 11, + "description": "Computes the min of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 12, + "description": "Computes the min of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 13, + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 18, + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 20, + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise.\n\n\nIf the input data type is Boolean, the comparison should consider `False < True`.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric and Boolean tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\none = np.array(np.ones(reduced_shape, dtype=np.float32))\nzero = np.array(np.zeros(reduced_shape, dtype=np.float32))\nreduced = one / zero # inf\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceProd", + "module": "ai.onnx", + "version": 1, + "description": "Computes the product of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 1.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[4.790016e+08]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.ones(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceProd", + "module": "ai.onnx", + "version": 11, + "description": "Computes the product of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[4.790016e+08]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.ones(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceProd", + "module": "ai.onnx", + "version": 13, + "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 1.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[4.790016e+08]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.ones(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceProd", + "module": "ai.onnx", + "version": 18, + "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 1.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[4.790016e+08]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_prod_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.ones(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceProd\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_prod_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceSum", + "module": "ai.onnx", + "version": 1, + "description": "Computes the sum of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[4., 6.]\n# [12., 14.]\n# [20., 22.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_axes_input_noop", + "code": "shape = [3, 2, 2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n noop_with_empty_axes=True,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\naxes = np.array([], dtype=np.int64)\nreduced = np.array(data)\n# print(reduced)\n# [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_axes_input_noop_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.array(data)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "\"\"\"Test case with the reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "non_reduced_axis_zero", + "code": "\"\"\"Test case with the non-reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 0, 1]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([2], dtype=np.int64)\nreduced = np.array([], dtype=np.float32).reshape(reduced_shape)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set_non_reduced_axis_zero\",\n)" + } + ] + }, + { + "name": "ReduceSum", + "module": "ai.onnx", + "version": 11, + "description": "Computes the sum of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[4., 6.]\n# [12., 14.]\n# [20., 22.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_axes_input_noop", + "code": "shape = [3, 2, 2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n noop_with_empty_axes=True,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\naxes = np.array([], dtype=np.int64)\nreduced = np.array(data)\n# print(reduced)\n# [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_axes_input_noop_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.array(data)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "\"\"\"Test case with the reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "non_reduced_axis_zero", + "code": "\"\"\"Test case with the non-reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 0, 1]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([2], dtype=np.int64)\nreduced = np.array([], dtype=np.float32).reshape(reduced_shape)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set_non_reduced_axis_zero\",\n)" + } + ] + }, + { + "name": "ReduceSum", + "module": "ai.onnx", + "version": 13, + "description": "Computes the sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[78.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[4., 6.]\n# [12., 14.]\n# [20., 22.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_axes_input_noop", + "code": "shape = [3, 2, 2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n noop_with_empty_axes=True,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\naxes = np.array([], dtype=np.int64)\nreduced = np.array(data)\n# print(reduced)\n# [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_axes_input_noop_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.array(data)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "\"\"\"Test case with the reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSum\", inputs=[\"data\", \"axes\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n# print(reduced)\n# [[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes.tolist()), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_negative_axes_keepdims_random\",\n)" + }, + { + "summary": "non_reduced_axis_zero", + "code": "\"\"\"Test case with the non-reduced-axis of size zero.\"\"\"\nshape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 0, 1]\n\nnode = onnx.helper.make_node(\n \"ReduceSum\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([2], dtype=np.int64)\nreduced = np.array([], dtype=np.float32).reshape(reduced_shape)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_empty_set_non_reduced_axis_zero\",\n)" + } + ] + }, + { + "name": "ReduceSumSquare", + "module": "ai.onnx", + "version": 1, + "description": "Computes the sum square of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[650.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceSumSquare", + "module": "ai.onnx", + "version": 11, + "description": "Computes the sum square of the input tensor's element along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[650.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceSumSquare", + "module": "ai.onnx", + "version": 13, + "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[650.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "ReduceSumSquare", + "module": "ai.onnx", + "version": 18, + "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid. Reduction over an empty set of values yields 0.\n\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n# print(reduced)\n# [[[650.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=None, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_default_axes_keepdims_random\",\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_do_not_keepdims_random\",\n)" + }, + { + "summary": "empty_set", + "code": "shape = [2, 0, 4]\nkeepdims = 1\nreduced_shape = [2, 1, 4]\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array([], dtype=np.float32).reshape(shape)\naxes = np.array([1], dtype=np.int64)\nreduced = np.array(np.zeros(reduced_shape, dtype=np.float32))\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_empty_set\",\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_keepdims_random\",\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceSumSquare\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32\n)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_sum_square_negative_axes_keepdims_random\",\n)" + } + ] + }, + { + "name": "RegexFullMatch", + "module": "ai.onnx", + "version": 20, + "description": "RegexFullMatch performs a full regex match on each element of the input tensor. If an element fully matches the regex pattern specified as an attribute, the corresponding element in the output is True and it is False otherwise. [RE2](https://github.com/google/re2/wiki/Syntax) regex syntax is used.", + "attributes": [ + { + "name": "pattern", + "type": "string", + "required": false, + "description": "Regex pattern to match on. This must be valid RE2 syntax." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Tensor with strings to match on." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Tensor of bools indicating if each input string fully matches the regex pattern specified." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Inputs must be UTF-8 strings", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)" + ] + }, + { + "description": "Outputs are bools and are True where there is a full regex match and False otherwise.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "basic", + "code": "node = onnx.helper.make_node(\n \"RegexFullMatch\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n pattern=r\"www\\.[\\w.-]+\\.\\bcom\\b\",\n)\n\nx = np.array([\"www.google.com\", \"www.facebook.com\", \"www.bbc.co.uk\"]).astype(\n object\n)\nresult = np.array([True, True, False])\nexpect(node, inputs=[x], outputs=[result], name=\"test_regex_full_match_basic\")" + }, + { + "summary": "match_email_domain", + "code": "node = onnx.helper.make_node(\n \"RegexFullMatch\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n pattern=r\"(\\W|^)[\\w.\\-]{0,25}@(yahoo|gmail)\\.com(\\W|$)\",\n)\n\nx = np.array(\n [\n [\"account@gmail.com\", \"account@hotmail.com\"],\n [\"not email\", \"account2@yahoo.com\"],\n ]\n).astype(object)\nresult = np.array([[True, False], [False, True]])\nexpect(\n node,\n inputs=[x],\n outputs=[result],\n name=\"test_regex_full_match_email_domain\",\n)" + }, + { + "summary": "match_empty", + "code": "node = onnx.helper.make_node(\n \"RegexFullMatch\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n pattern=r\"(\\W|^)[\\w.\\-]{0,25}@(yahoo|gmail)\\.com(\\W|$)\",\n)\n\nx = np.array([[], []]).astype(object)\nresult = np.array([[], []]).astype(bool)\nexpect(\n node,\n inputs=[x],\n outputs=[result],\n name=\"test_regex_full_match_empty\",\n)" + } + ] + }, + { + "name": "Relu", + "module": "ai.onnx", + "version": 1, + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "relu", + "code": "node = onnx.helper.make_node(\n \"Relu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_relu\")" + } + ], + "category": "Activation" + }, + { + "name": "Relu", + "module": "ai.onnx", + "version": 6, + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "relu", + "code": "node = onnx.helper.make_node(\n \"Relu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_relu\")" + } + ], + "category": "Activation" + }, + { + "name": "Relu", + "module": "ai.onnx", + "version": 13, + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "relu", + "code": "node = onnx.helper.make_node(\n \"Relu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_relu\")" + } + ], + "category": "Activation" + }, + { + "name": "Relu", + "module": "ai.onnx", + "version": 14, + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to signed numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)", + "tensor(int8)", + "tensor(int16)", + "tensor(int64)", + "tensor(float16)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "relu", + "code": "node = onnx.helper.make_node(\n \"Relu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_relu\")" + } + ], + "category": "Activation" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 1, + "description": "Reshape the input tensor similar to numpy.reshape.\nIt takes a tensor as input and an argument `shape`. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + }, + { + "name": "shape", + "type": "int64[]", + "required": false, + "description": "New shape" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 5, + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "Specified shape for output." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 13, + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "Specified shape for output." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 14, + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). If 'allowzero' is set, and the new shape includes 0, the\ndimension will be set explicitly to zero (i.e. not taken from input tensor).\nShape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.\n\nIf the attribute 'allowzero' is set, it is invalid for the specified shape to\ncontain both a zero value and -1, as the value of the dimension corresponding\nto -1 cannot be determined uniquely.\n", + "attributes": [ + { + "name": "allowzero", + "type": "int64", + "required": false, + "description": "(Optional) By default, when any value in the 'shape' input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "Specified shape for output." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 19, + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). If 'allowzero' is set, and the new shape includes 0, the\ndimension will be set explicitly to zero (i.e. not taken from input tensor).\nShape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.\n\nIf the attribute 'allowzero' is set, it is invalid for the specified shape to\ncontain both a zero value and -1, as the value of the dimension corresponding\nto -1 cannot be determined uniquely.\n", + "attributes": [ + { + "name": "allowzero", + "type": "int64", + "required": false, + "description": "(Optional) By default, when any value in the 'shape' input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "Specified shape for output." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Reshape", + "module": "ai.onnx", + "version": 21, + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor). If 'allowzero' is set, and the new shape includes 0, the\ndimension will be set explicitly to zero (i.e. not taken from input tensor).\nShape (second input) could be an empty shape, which means converting to a scalar.\nThe input tensor's shape and the output tensor's shape are required to have the same number of elements.\n\nIf the attribute 'allowzero' is set, it is invalid for the specified shape to\ncontain both a zero value and -1, as the value of the dimension corresponding\nto -1 cannot be determined uniquely.\n", + "attributes": [ + { + "name": "allowzero", + "type": "int64", + "required": false, + "description": "(Optional) By default, when any value in the 'shape' input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "shape", + "type": "tensor(int64)", + "description": "Specified shape for output." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "reshaped", + "type": "T", + "description": "Reshaped data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "allowzero", + "code": "original_shape = [0, 3, 4]\ntest_cases = {\n \"allowzero_reordered\": np.array([3, 4, 0], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n allowzero=1, # if allowzero=1, final shape = (3, 4, 0)\n # if allowzero=0, final shape = (3, 4, 4)\n )\n\n reshaped = reshape_reference_implementation(data, shape, allowzero=1)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + }, + { + "summary": "reshape", + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n \"reordered_all_dims\": np.array([4, 2, 3], dtype=np.int64),\n \"reordered_last_dims\": np.array([2, 4, 3], dtype=np.int64),\n \"reduced_dims\": np.array([2, 12], dtype=np.int64),\n \"extended_dims\": np.array([2, 3, 2, 2], dtype=np.int64),\n \"one_dim\": np.array([24], dtype=np.int64),\n \"negative_dim\": np.array([2, -1, 2], dtype=np.int64),\n \"negative_extended_dims\": np.array([-1, 2, 3, 4], dtype=np.int64),\n \"zero_dim\": np.array([2, 0, 4, 1], dtype=np.int64),\n \"zero_and_negative_dim\": np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[\"data\", \"shape\"],\n outputs=[\"reshaped\"],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(\n node,\n inputs=[data, shape],\n outputs=[reshaped],\n name=\"test_reshape_\" + test_name,\n )" + } + ], + "category": "Shape" + }, + { + "name": "Resize", + "module": "ai.onnx", + "version": 10, + "description": "Resize the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "N-D tensor" + }, + { + "name": "scales", + "type": "tensor(float)", + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "resize_downsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.5180721 4.2858863]\n# [ 9.589329 11.357142 ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.875 4.5 ]\n# [ 9.375 11. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)\n\n# [[[[1.6666667, 3.3333333]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_downsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.7750092 3.1200073 4.4650054]\n# [ 7.1550016 8.5 9.844998 ]\n# [12.534994 13.8799925 15.224991 ]]]]\noutput = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 2.3636363 3.590909 4.818182 ]\n# [ 7.2727275 8.5 9.727273 ]\n# [12.181818 13.409091 14.636364 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_pytorch_half_pixel\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 2. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 1x2\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 2x3\n\n# [[[[1. 2. 4.]\n# [5. 6. 8.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_smaller\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_2_3\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_3_2\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_extrapolation_value", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"asymmetric\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.75),\n scale_factors=scales,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_asymmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)\n\n# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],\n# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],\n# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],\n# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([3.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([8, 7], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"half_pixel\",\n nearest_mode=\"ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x, mode=\"ceil\"), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_ceil_half_pixel\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_floor_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"align_corners\",\n nearest_mode=\"floor\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"floor\"),\n output_size=sizes,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_floor_align_corners\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 7x7\n\n# [[[[1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 8x8\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"asymmetric\",\n nearest_mode=\"round_prefer_ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"round_prefer_ceil\"),\n output_size=sizes,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric\",\n)" + } + ] + }, + { + "name": "Resize", + "module": "ai.onnx", + "version": 11, + "description": "Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\\"sizes\\\" is not specified.\n", + "attributes": [ + { + "name": "coordinate_transformation_mode", + "type": "string", + "required": false, + "default": "half_pixel", + "description": "\nThis attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
\n\nThe coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.\nDenote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input \"roi\", scale = length_resized / length_original,
\n\nif coordinate_transformation_mode is \"half_pixel\",
\nx_original = (x_resized + 0.5) / scale - 0.5,
\n\nif coordinate_transformation_mode is \"pytorch_half_pixel\",
\nx_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0,
\n\nif coordinate_transformation_mode is \"align_corners\",
\nx_original = x_resized * (length_original - 1) / (length_resized - 1),
\n\nif coordinate_transformation_mode is \"asymmetric\",
\nx_original = x_resized / scale,
\n\nif coordinate_transformation_mode is \"tf_half_pixel_for_nn\",
\nx_original = (x_resized + 0.5) / scale,
\n\nif coordinate_transformation_mode is \"tf_crop_and_resize\",
\nx_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)." + }, + { + "name": "cubic_coeff_a", + "type": "float32", + "required": false, + "default": -0.75, + "description": "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if \"mode\" is \"cubic\"." + }, + { + "name": "exclude_outside", + "type": "int64", + "required": false, + "description": "If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0." + }, + { + "name": "extrapolation_value", + "type": "float32", + "required": false, + "description": "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Three interpolation modes: nearest (default), linear and cubic. The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor)." + }, + { + "name": "nearest_mode", + "type": "string", + "required": false, + "default": "round_prefer_floor", + "description": "Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\"." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "N-D tensor" + }, + { + "name": "roi", + "type": "T2", + "description": "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X. The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"" + }, + { + "name": "scales", + "type": "tensor(float)", + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'. If 'size' is needed, the user must set 'scales' to an empty tensor." + }, + { + "name": "sizes", + "type": "tensor(int64)", + "option": "optional", + "description": "The size of the output tensor. The number of elements of 'sizes' should be the same as the rank of input 'X'. May only be set if 'scales' is set to an empty tensor." + } + ], + "min_input": 3, + "max_input": 4, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "3 - 4", + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain roi type to float or double.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "resize_downsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.5180721 4.2858863]\n# [ 9.589329 11.357142 ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.875 4.5 ]\n# [ 9.375 11. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)\n\n# [[[[1.6666667, 3.3333333]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_downsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.7750092 3.1200073 4.4650054]\n# [ 7.1550016 8.5 9.844998 ]\n# [12.534994 13.8799925 15.224991 ]]]]\noutput = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 2.3636363 3.590909 4.818182 ]\n# [ 7.2727275 8.5 9.727273 ]\n# [12.181818 13.409091 14.636364 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_pytorch_half_pixel\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 2. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 1x2\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 2x3\n\n# [[[[1. 2. 4.]\n# [5. 6. 8.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_smaller\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_2_3\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_3_2\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_extrapolation_value", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"asymmetric\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.75),\n scale_factors=scales,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_asymmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)\n\n# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],\n# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],\n# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],\n# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([3.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([8, 7], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"half_pixel\",\n nearest_mode=\"ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x, mode=\"ceil\"), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_ceil_half_pixel\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_floor_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"align_corners\",\n nearest_mode=\"floor\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"floor\"),\n output_size=sizes,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_floor_align_corners\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 7x7\n\n# [[[[1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 8x8\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"asymmetric\",\n nearest_mode=\"round_prefer_ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"round_prefer_ceil\"),\n output_size=sizes,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric\",\n)" + } + ] + }, + { + "name": "Resize", + "module": "ai.onnx", + "version": 13, + "description": "Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\\"sizes\\\" is not specified.\n", + "attributes": [ + { + "name": "coordinate_transformation_mode", + "type": "string", + "required": false, + "default": "half_pixel", + "description": "\nThis attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
\n\nThe coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.\nDenote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input \"roi\", scale = length_resized / length_original,
\n\nif coordinate_transformation_mode is \"half_pixel\",
\nx_original = (x_resized + 0.5) / scale - 0.5,
\n\nif coordinate_transformation_mode is \"pytorch_half_pixel\",
\nx_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0,
\n\nif coordinate_transformation_mode is \"align_corners\",
\nx_original = x_resized * (length_original - 1) / (length_resized - 1),
\n\nif coordinate_transformation_mode is \"asymmetric\",
\nx_original = x_resized / scale,
\n\nif coordinate_transformation_mode is \"tf_crop_and_resize\",
\nx_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)." + }, + { + "name": "cubic_coeff_a", + "type": "float32", + "required": false, + "default": -0.75, + "description": "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if \"mode\" is \"cubic\"." + }, + { + "name": "exclude_outside", + "type": "int64", + "required": false, + "description": "If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0." + }, + { + "name": "extrapolation_value", + "type": "float32", + "required": false, + "description": "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Three interpolation modes: nearest (default), linear and cubic. The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor)." + }, + { + "name": "nearest_mode", + "type": "string", + "required": false, + "default": "round_prefer_floor", + "description": "Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\"." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "N-D tensor" + }, + { + "name": "roi", + "type": "T2", + "option": "optional", + "description": "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X. The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"" + }, + { + "name": "scales", + "type": "tensor(float)", + "option": "optional", + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'. One of 'scales' and 'sizes' MUST be specified and it is an error if both are specified. If 'sizes' is needed, the user can use an empty string as the name of 'scales' in this operator's input list." + }, + { + "name": "sizes", + "type": "tensor(int64)", + "option": "optional", + "description": "The size of the output tensor. The number of elements of 'sizes' should be the same as the rank of input 'X'. Only one of 'scales' and 'sizes' can be specified." + } + ], + "min_input": 1, + "max_input": 4, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 4", + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain roi type to float or double.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "resize_downsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.5180721 4.2858863]\n# [ 9.589329 11.357142 ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.875 4.5 ]\n# [ 9.375 11. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)\n\n# [[[[1.6666667, 3.3333333]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_downsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.7750092 3.1200073 4.4650054]\n# [ 7.1550016 8.5 9.844998 ]\n# [12.534994 13.8799925 15.224991 ]]]]\noutput = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 2.3636363 3.590909 4.818182 ]\n# [ 7.2727275 8.5 9.727273 ]\n# [12.181818 13.409091 14.636364 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_pytorch_half_pixel\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 2. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 1x2\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 2x3\n\n# [[[[1. 2. 4.]\n# [5. 6. 8.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_smaller\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_2_3\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_3_2\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_extrapolation_value", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"asymmetric\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.75),\n scale_factors=scales,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_asymmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)\n\n# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],\n# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],\n# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],\n# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([3.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([8, 7], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"half_pixel\",\n nearest_mode=\"ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x, mode=\"ceil\"), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_ceil_half_pixel\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_floor_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"align_corners\",\n nearest_mode=\"floor\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"floor\"),\n output_size=sizes,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_floor_align_corners\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 7x7\n\n# [[[[1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 8x8\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"asymmetric\",\n nearest_mode=\"round_prefer_ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"round_prefer_ceil\"),\n output_size=sizes,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric\",\n)" + } + ] + }, + { + "name": "Resize", + "module": "ai.onnx", + "version": 18, + "description": "Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.\nEach dimension value of the output tensor is:
\n `output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)`
\nif input \\\"sizes\\\" is not specified.\n", + "attributes": [ + { + "name": "antialias", + "type": "int64", + "required": false, + "description": "If set to 1, \"linear\" and \"cubic\" interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel." + }, + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). Non-specified dimensions are interpreted as non-resizable. Negative value means counting dimensions from the back. Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined if an axis is repeated." + }, + { + "name": "coordinate_transformation_mode", + "type": "string", + "required": false, + "default": "half_pixel", + "description": "\nThis attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
\n\nThe coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.\nDenote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, `length_original` as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input \"roi\", `scale = length_resized / length_original`,
\n\nif coordinate_transformation_mode is `\"half_pixel\"`,
\n`x_original = (x_resized + 0.5) / scale - 0.5`
\n\nif coordinate_transformation_mode is `\"pytorch_half_pixel\"`,
\n`x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0`
\n\nif coordinate_transformation_mode is `\"align_corners\"`,
\n`x_original = x_resized * (length_original - 1) / (length_resized - 1)`
\n\nif coordinate_transformation_mode is `\"asymmetric\"`,
\n`x_original = x_resized / scale`
\n\nif coordinate_transformation_mode is `\"tf_crop_and_resize\"`,
\n`x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`\n." + }, + { + "name": "cubic_coeff_a", + "type": "float32", + "required": false, + "default": -0.75, + "description": "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if mode is \"cubic\"." + }, + { + "name": "exclude_outside", + "type": "int64", + "required": false, + "description": "If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0." + }, + { + "name": "extrapolation_value", + "type": "float32", + "required": false, + "description": "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f." + }, + { + "name": "keep_aspect_ratio_policy", + "type": "string", + "required": false, + "default": "stretch", + "description": "\nThis attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when\nthe `scales` input is used.
\n\nGiven a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`.
\n\nIf `keep_aspect_ratio_policy` is `\"stretch\"`, the original aspect ratio is disregarded, and the input is resized to the specified size:
\n`out_size[d] = sizes[i]`
\n\nIf `keep_aspect_ratio_policy` is `\"not_larger\"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio:
\n`scale = Min(sizes[i] / in_size[d])`
\n`out_size[d] = round_int(scale * in_size[i])`
\n\nIf `keep_aspect_ratio_policy` is `\"not_smaller\"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio:
\n`scale = Max(sizes[i] / in_size[d])`
\n`out_size[d] = round_int(scale * in_size[i])`
\n\nFor non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.\n\nNote: `round_int` stands for computing the nearest integer value, rounding halfway cases up." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Three interpolation modes: \"nearest\" (default), \"linear\" and \"cubic\". The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor)." + }, + { + "name": "nearest_mode", + "type": "string", + "required": false, + "default": "round_prefer_floor", + "description": "Four modes: \"round_prefer_floor\" (default, as known as round half down), \"round_prefer_ceil\" (as known as round half up), \"floor\", \"ceil\". Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\"." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "N-D tensor" + }, + { + "name": "roi", + "type": "T2", + "option": "optional", + "description": "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"" + }, + { + "name": "scales", + "type": "tensor(float)", + "option": "optional", + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' MUST be specified and it is an error if both are specified. If 'sizes' is needed, the user can use an empty string as the name of 'scales' in this operator's input list." + }, + { + "name": "sizes", + "type": "tensor(int64)", + "option": "optional", + "description": "Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' should be the same as the rank of input 'X', or the length of 'axes', if provided. Only one of 'scales' and 'sizes' can be specified. " + } + ], + "min_input": 1, + "max_input": 4, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 4", + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain roi type to float or double.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "resize_downsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.5180721 4.2858863]\n# [ 9.589329 11.357142 ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.875 4.5 ]\n# [ 9.375 11. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)\n\n# [[[[1.6666667, 3.3333333]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_downsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.7750092 3.1200073 4.4650054]\n# [ 7.1550016 8.5 9.844998 ]\n# [12.534994 13.8799925 15.224991 ]]]]\noutput = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 2.3636363 3.590909 4.818182 ]\n# [ 7.2727275 8.5 9.727273 ]\n# [12.181818 13.409091 14.636364 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_pytorch_half_pixel\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 2. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 1x2\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 2x3\n\n# [[[[1. 2. 4.]\n# [5. 6. 8.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_smaller\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_2_3\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_3_2\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_extrapolation_value", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"asymmetric\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.75),\n scale_factors=scales,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_asymmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)\n\n# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],\n# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],\n# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],\n# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([3.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([8, 7], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"half_pixel\",\n nearest_mode=\"ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x, mode=\"ceil\"), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_ceil_half_pixel\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_floor_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"align_corners\",\n nearest_mode=\"floor\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"floor\"),\n output_size=sizes,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_floor_align_corners\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 7x7\n\n# [[[[1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 8x8\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"asymmetric\",\n nearest_mode=\"round_prefer_ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"round_prefer_ceil\"),\n output_size=sizes,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric\",\n)" + } + ] + }, + { + "name": "Resize", + "module": "ai.onnx", + "version": 19, + "description": "Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.\nEach dimension value of the output tensor is:\n```\noutput_dimension = floor(input_dimension * (roi_end - roi_start) * scale)\n```\nif input \\\"sizes\\\" is not specified.\n", + "attributes": [ + { + "name": "antialias", + "type": "int64", + "required": false, + "description": "If set to 1, \"linear\" and \"cubic\" interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel." + }, + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). Non-specified dimensions are interpreted as non-resizable. Negative value means counting dimensions from the back. Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined if an axis is repeated." + }, + { + "name": "coordinate_transformation_mode", + "type": "string", + "required": false, + "default": "half_pixel", + "description": "\nThis attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.\n\nThe coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.\nDenote `x_resized` as the coordinate of axis x in the resized tensor,\n `x_original` as the coordinate of axis x in the original tensor,\n `length_original` as the length of the original tensor in axis x,\n `length_resized` as the length of the resized tensor in axis x,\n `scale = length_resized / length_original`,\n `output_width` the target length on the axis x which can be a fractional number when it is calculated out of a scale factor,\n and `output_width_int` the effective output width as an integer.\n\nif coordinate_transformation_mode is `\"half_pixel\"`,\n```\nx_original = (x_resized + 0.5) / scale - 0.5\n```\n\nif coordinate_transformation_mode is `\"half_pixel_symmetric\"`,\n```\nadjustment = output_width_int / output_width\ncenter = input_width / 2\noffset = center * (1 - adjustment)\nx_ori = offset + (x + 0.5) / scale - 0.5\n```\n\nif coordinate_transformation_mode is `\"pytorch_half_pixel\"`,\n```\nx_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0\n```\n\nif coordinate_transformation_mode is `\"align_corners\"`,\n```\nx_original = x_resized * (length_original - 1) / (length_resized - 1)\n```\n\nif coordinate_transformation_mode is `\"asymmetric\"`,\n```\nx_original = x_resized / scale\n```\n\nif coordinate_transformation_mode is `\"tf_crop_and_resize\"`,\n```\nx_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)\n```\n." + }, + { + "name": "cubic_coeff_a", + "type": "float32", + "required": false, + "default": -0.75, + "description": "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if mode is \"cubic\"." + }, + { + "name": "exclude_outside", + "type": "int64", + "required": false, + "description": "If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0." + }, + { + "name": "extrapolation_value", + "type": "float32", + "required": false, + "description": "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f." + }, + { + "name": "keep_aspect_ratio_policy", + "type": "string", + "required": false, + "default": "stretch", + "description": "\nThis attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when\nthe `scales` input is used.\n\nGiven a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`.\n\nIf `keep_aspect_ratio_policy` is `\"stretch\"`, the original aspect ratio is disregarded, and the input is resized to the specified size:\n`out_size[d] = sizes[i]`\n\nIf `keep_aspect_ratio_policy` is `\"not_larger\"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio:\n```\nscale = Min(sizes[i] / in_size[d])\nout_size[d] = round_int(scale * in_size[i])\n```\n\nIf `keep_aspect_ratio_policy` is `\"not_smaller\"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio:\n```\nscale = Max(sizes[i] / in_size[d])\nout_size[d] = round_int(scale * in_size[i])\n```\n\nFor non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.\n\nNote: `round_int` stands for computing the nearest integer value, rounding halfway cases up." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Three interpolation modes: \"nearest\" (default), \"linear\" and \"cubic\". The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor)." + }, + { + "name": "nearest_mode", + "type": "string", + "required": false, + "default": "round_prefer_floor", + "description": "Four modes: \"round_prefer_floor\" (default, as known as round half down), \"round_prefer_ceil\" (as known as round half up), \"floor\", \"ceil\". Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\"." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "N-D tensor" + }, + { + "name": "roi", + "type": "T2", + "option": "optional", + "description": "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"" + }, + { + "name": "scales", + "type": "tensor(float)", + "option": "optional", + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' MUST be specified and it is an error if both are specified. If 'sizes' is needed, the user can use an empty string as the name of 'scales' in this operator's input list." + }, + { + "name": "sizes", + "type": "tensor(int64)", + "option": "optional", + "description": "Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' should be the same as the rank of input 'X', or the length of 'axes', if provided. Only one of 'scales' and 'sizes' can be specified. " + } + ], + "min_input": 1, + "max_input": 4, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 4", + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain roi type to float or double.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "resize_downsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.5180721 4.2858863]\n# [ 9.589329 11.357142 ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[ 2.875 4.5 ]\n# [ 9.375 11. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)\n\n# [[[[1.6666667, 3.3333333]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_downsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_downsample_scales_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_downsample_sizes_cubic_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.7750092 3.1200073 4.4650054]\n# [ 7.1550016 8.5 9.844998 ]\n# [12.534994 13.8799925 15.224991 ]]]]\noutput = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(\n np.float32\n)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_cubic_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_antialias", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n antialias=1,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 2.3636363 3.590909 4.818182 ]\n# [ 7.2727275 8.5 9.727273 ]\n# [12.181818 13.409091 14.636364 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs_antialias, output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_antialias\",\n)" + }, + { + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n coordinate_transformation_mode=\"pytorch_half_pixel\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_linear_pytorch_half_pixel\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 2. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 1x2\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_downsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 3], dtype=np.int64) # Results in 2x3\n\n# [[[[1. 2. 4.]\n# [5. 6. 8.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_downsample_sizes_nearest_not_smaller\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_2_3\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)\nsizes = np.array([3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n axes=axes,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize_axes_3_2\",\n)" + }, + { + "summary": "resize_tf_crop_and_resize_extrapolation_value", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"roi\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n output_size=sizes,\n roi=roi,\n coordinate_transformation_mode=\"tf_crop_and_resize\",\n extrapolation_value=10.0,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, roi, sizes],\n outputs=[output],\n name=\"test_resize_tf_crop_and_resize\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n cubic_coeff_a=-0.5,\n exclude_outside=True,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.5),\n scale_factors=scales,\n exclude_outside=True,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_A_n0p5_exclude_outside\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_cubic_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n coordinate_transformation_mode=\"asymmetric\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: cubic_coeffs(x, A=-0.75),\n scale_factors=scales,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_cubic_asymmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_linear", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, lambda x, _: linear_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"align_corners\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_align_corners\",\n)" + }, + { + "summary": "resize_upsample_scales_linear_half_pixel_symmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"linear\",\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n)\n\ndata = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)\n\n# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],\n# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],\n# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],\n# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: linear_coeffs(x),\n scale_factors=scales,\n coordinate_transformation_mode=\"half_pixel_symmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_linear_half_pixel_symmetric\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_scales_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([3.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_resize_upsample_scales_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_cubic", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"cubic\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, lambda x, _: cubic_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_cubic\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_2_3", + "code": "axes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_2_3\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_axes_3_2", + "code": "axes = [3, 2]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([8, 7], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_axes_3_2\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"half_pixel\",\n nearest_mode=\"ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x, _: nearest_coeffs(x, mode=\"ceil\"), output_size=sizes\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_ceil_half_pixel\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_floor_align_corners", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"align_corners\",\n nearest_mode=\"floor\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"floor\"),\n output_size=sizes,\n coordinate_transformation_mode=\"align_corners\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_floor_align_corners\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_larger", + "code": "keep_aspect_ratio_policy = \"not_larger\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 7x7\n\n# [[[[1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_not_smaller", + "code": "keep_aspect_ratio_policy = \"not_smaller\"\naxes = [2, 3]\nnode = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([7, 8], dtype=np.int64) # Results in 8x8\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x),\n output_size=sizes,\n axes=axes,\n keep_aspect_ratio_policy=keep_aspect_ratio_policy,\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_not_larger\",\n)" + }, + { + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric", + "code": "node = onnx.helper.make_node(\n \"Resize\",\n inputs=[\"X\", \"\", \"\", \"sizes\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n coordinate_transformation_mode=\"asymmetric\",\n nearest_mode=\"round_prefer_ceil\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data,\n lambda x, _: nearest_coeffs(x, mode=\"round_prefer_ceil\"),\n output_size=sizes,\n coordinate_transformation_mode=\"asymmetric\",\n).astype(np.float32)\n\nexpect(\n node,\n inputs=[data, sizes],\n outputs=[output],\n name=\"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric\",\n)" + } + ] + }, + { + "name": "ReverseSequence", + "module": "ai.onnx", + "version": 10, + "description": "Reverse batch of sequences having different lengths specified by `sequence_lens`.\n\nFor each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis,\nand copies elements whose index's beyond sequence_lens[i] to the output. So the output slice i contains reversed\nsequences on the first sequence_lens[i] elements, then have original values copied for the other elements.\n\nExample 1:\n input = [[0.0, 4.0, 8.0, 12.0],\n [1.0, 5.0, 9.0, 13.0],\n [2.0, 6.0, 10.0, 14.0],\n [3.0, 7.0, 11.0, 15.0]]\n sequence_lens = [4, 3, 2, 1]\n time_axis = 0\n batch_axis = 1\n\n output = [[3.0, 6.0, 9.0, 12.0],\n [2.0, 5.0, 8.0, 13.0],\n [1.0, 4.0, 10.0, 14.0],\n [0.0, 7.0, 11.0, 15.0]]\n\nExample 2:\n input = [[0.0, 1.0, 2.0, 3.0 ],\n [4.0, 5.0, 6.0, 7.0 ],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0]]\n sequence_lens = [1, 2, 3, 4]\n time_axis = 1\n batch_axis = 0\n\n output = [[0.0, 1.0, 2.0, 3.0 ],\n [5.0, 4.0, 6.0, 7.0 ],\n [10.0, 9.0, 8.0, 11.0],\n [15.0, 14.0, 13.0, 12.0]]\n", + "attributes": [ + { + "name": "batch_axis", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0." + }, + { + "name": "time_axis", + "type": "int64", + "required": false, + "description": "(Optional) Specify which axis is time axis. Must be one of 0 (default), or 1." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Tensor of rank r >= 2." + }, + { + "name": "sequence_lens", + "type": "tensor(int64)", + "description": "Tensor specifying lengths of the sequences in a batch. It has shape `[batch_size]`." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Tensor with same shape of input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "reversesequence_batch", + "code": "node = onnx.helper.make_node(\n \"ReverseSequence\",\n inputs=[\"x\", \"sequence_lens\"],\n outputs=[\"y\"],\n time_axis=1,\n batch_axis=0,\n)\nx = np.array(\n [\n [0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0],\n ],\n dtype=np.float32,\n)\nsequence_lens = np.array([1, 2, 3, 4], dtype=np.int64)\n\ny = np.array(\n [\n [0.0, 1.0, 2.0, 3.0],\n [5.0, 4.0, 6.0, 7.0],\n [10.0, 9.0, 8.0, 11.0],\n [15.0, 14.0, 13.0, 12.0],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[x, sequence_lens],\n outputs=[y],\n name=\"test_reversesequence_batch\",\n)" + }, + { + "summary": "reversesequence_time", + "code": "node = onnx.helper.make_node(\n \"ReverseSequence\",\n inputs=[\"x\", \"sequence_lens\"],\n outputs=[\"y\"],\n time_axis=0,\n batch_axis=1,\n)\nx = np.array(\n [\n [0.0, 4.0, 8.0, 12.0],\n [1.0, 5.0, 9.0, 13.0],\n [2.0, 6.0, 10.0, 14.0],\n [3.0, 7.0, 11.0, 15.0],\n ],\n dtype=np.float32,\n)\nsequence_lens = np.array([4, 3, 2, 1], dtype=np.int64)\n\ny = np.array(\n [\n [3.0, 6.0, 9.0, 12.0],\n [2.0, 5.0, 8.0, 13.0],\n [1.0, 4.0, 10.0, 14.0],\n [0.0, 7.0, 11.0, 15.0],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[x, sequence_lens],\n outputs=[y],\n name=\"test_reversesequence_time\",\n)" + } + ] + }, + { + "name": "RoiAlign", + "module": "ai.onnx", + "version": 10, + "description": "Region of Interest (RoI) align operation described in the\n[Mask R-CNN paper](https://arxiv.org/abs/1703.06870).\nRoiAlign consumes an input tensor X and region of interests (rois)\nto apply pooling across each RoI; it produces a 4-D tensor of shape\n(num_rois, C, output_height, output_width).\n\nRoiAlign is proposed to avoid the misalignment by removing\nquantizations while converting from original image into feature\nmap and from feature map into RoI feature; in each ROI bin,\nthe value of the sampled locations are computed directly\nthrough bilinear interpolation.\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "avg", + "description": "The pooling method. Two modes are supported: 'avg' and 'max'. Default is 'avg'." + }, + { + "name": "output_height", + "type": "int64", + "required": false, + "default": 1, + "description": "default 1; Pooled output Y's height." + }, + { + "name": "output_width", + "type": "int64", + "required": false, + "default": 1, + "description": "default 1; Pooled output Y's width." + }, + { + "name": "sampling_ratio", + "type": "int64", + "required": false, + "description": "Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0." + }, + { + "name": "spatial_scale", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. " + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data." + }, + { + "name": "rois", + "type": "T1", + "description": "RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the 'batch_indices' input." + }, + { + "name": "batch_indices", + "type": "T2", + "description": "1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain types to int tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "roialign_aligned_false", + "code": "node = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"output_half_pixel\",\n)\n\nX, batch_indices, rois = get_roi_align_input_values()\n# (num_rois, C, output_height, output_width)\nY = np.array(\n [\n [\n [\n [0.4664, 0.4466, 0.3405, 0.5688, 0.6068],\n [0.3714, 0.4296, 0.3835, 0.5562, 0.3510],\n [0.2768, 0.4883, 0.5222, 0.5528, 0.4171],\n [0.4713, 0.4844, 0.6904, 0.4920, 0.8774],\n [0.6239, 0.7125, 0.6289, 0.3355, 0.3495],\n ]\n ],\n [\n [\n [0.3022, 0.4305, 0.4696, 0.3978, 0.5423],\n [0.3656, 0.7050, 0.5165, 0.3172, 0.7015],\n [0.2912, 0.5059, 0.6476, 0.6235, 0.8299],\n [0.5916, 0.7389, 0.7048, 0.8372, 0.8893],\n [0.6227, 0.6153, 0.7097, 0.6154, 0.4585],\n ]\n ],\n [\n [\n [0.2384, 0.3379, 0.3717, 0.6100, 0.7601],\n [0.3767, 0.3785, 0.7147, 0.9243, 0.9727],\n [0.5749, 0.5826, 0.5709, 0.7619, 0.8770],\n [0.5355, 0.2566, 0.2141, 0.2796, 0.3600],\n [0.4365, 0.3504, 0.2887, 0.3661, 0.2349],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_aligned_false\",\n)" + }, + { + "summary": "roialign_aligned_true", + "code": "node = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"half_pixel\",\n)\n\nX, batch_indices, rois = get_roi_align_input_values()\n# (num_rois, C, output_height, output_width)\nY = np.array(\n [\n [\n [\n [0.5178, 0.3434, 0.3229, 0.4474, 0.6344],\n [0.4031, 0.5366, 0.4428, 0.4861, 0.4023],\n [0.2512, 0.4002, 0.5155, 0.6954, 0.3465],\n [0.3350, 0.4601, 0.5881, 0.3439, 0.6849],\n [0.4932, 0.7141, 0.8217, 0.4719, 0.4039],\n ]\n ],\n [\n [\n [0.3070, 0.2187, 0.3337, 0.4880, 0.4870],\n [0.1871, 0.4914, 0.5561, 0.4192, 0.3686],\n [0.1433, 0.4608, 0.5971, 0.5310, 0.4982],\n [0.2788, 0.4386, 0.6022, 0.7000, 0.7524],\n [0.5774, 0.7024, 0.7251, 0.7338, 0.8163],\n ]\n ],\n [\n [\n [0.2393, 0.4075, 0.3379, 0.2525, 0.4743],\n [0.3671, 0.2702, 0.4105, 0.6419, 0.8308],\n [0.5556, 0.4543, 0.5564, 0.7502, 0.9300],\n [0.6626, 0.5617, 0.4813, 0.4954, 0.6663],\n [0.6636, 0.3721, 0.2056, 0.1928, 0.2478],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_aligned_true\",\n)" + }, + { + "summary": "roialign_mode_max", + "code": "X = np.array(\n [\n [\n [\n [\n 0.2764,\n 0.715,\n 0.1958,\n 0.3416,\n 0.4638,\n 0.0259,\n 0.2963,\n 0.6518,\n 0.4856,\n 0.725,\n ],\n [\n 0.9637,\n 0.0895,\n 0.2919,\n 0.6753,\n 0.0234,\n 0.6132,\n 0.8085,\n 0.5324,\n 0.8992,\n 0.4467,\n ],\n [\n 0.3265,\n 0.8479,\n 0.9698,\n 0.2471,\n 0.9336,\n 0.1878,\n 0.4766,\n 0.4308,\n 0.34,\n 0.2162,\n ],\n [\n 0.0206,\n 0.172,\n 0.2155,\n 0.4394,\n 0.0653,\n 0.3406,\n 0.7724,\n 0.3921,\n 0.2541,\n 0.5799,\n ],\n [\n 0.4062,\n 0.2194,\n 0.4473,\n 0.4687,\n 0.7109,\n 0.9327,\n 0.9815,\n 0.632,\n 0.1728,\n 0.6119,\n ],\n [\n 0.3097,\n 0.1283,\n 0.4984,\n 0.5068,\n 0.4279,\n 0.0173,\n 0.4388,\n 0.043,\n 0.4671,\n 0.7119,\n ],\n [\n 0.1011,\n 0.8477,\n 0.4726,\n 0.1777,\n 0.9923,\n 0.4042,\n 0.1869,\n 0.7795,\n 0.9946,\n 0.9689,\n ],\n [\n 0.1366,\n 0.3671,\n 0.7011,\n 0.6234,\n 0.9867,\n 0.5585,\n 0.6985,\n 0.5609,\n 0.8788,\n 0.9928,\n ],\n [\n 0.5697,\n 0.8511,\n 0.6711,\n 0.9406,\n 0.8751,\n 0.7496,\n 0.165,\n 0.1049,\n 0.1559,\n 0.2514,\n ],\n [\n 0.7012,\n 0.4056,\n 0.7879,\n 0.3461,\n 0.0415,\n 0.2998,\n 0.5094,\n 0.3727,\n 0.5482,\n 0.0502,\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\nrois = np.array(\n [[0.0, 0.0, 9.0, 9.0], [0.0, 5.0, 4.0, 9.0], [5.0, 5.0, 9.0, 9.0]],\n dtype=np.float32,\n)\nbatch_indices = np.array([0, 0, 0], dtype=np.int64)\n\nY = np.array(\n [\n [\n [\n [0.3445228, 0.37310338, 0.37865096, 0.446696, 0.37991184],\n [0.4133513, 0.5455125, 0.6651902, 0.55805874, 0.27110294],\n [0.21223956, 0.40924096, 0.8417618, 0.792561, 0.37196714],\n [0.46835402, 0.39741728, 0.8012819, 0.4969306, 0.5495158],\n [0.3595896, 0.5196813, 0.5403741, 0.23814403, 0.19992709],\n ]\n ],\n [\n [\n [0.30517197, 0.5086199, 0.3189761, 0.4054401, 0.47630402],\n [0.50862, 0.8477, 0.37808004, 0.24936005, 0.79384017],\n [0.17620805, 0.29368007, 0.44870415, 0.4987201, 0.63148826],\n [0.51066005, 0.8511, 0.5368801, 0.9406, 0.70008016],\n [0.4487681, 0.51066035, 0.5042561, 0.5643603, 0.42004836],\n ]\n ],\n [\n [\n [0.21062402, 0.3510401, 0.37416005, 0.5967599, 0.46507207],\n [0.32336006, 0.31180006, 0.6236001, 0.9946, 0.7751202],\n [0.35744014, 0.5588001, 0.35897616, 0.7030401, 0.6353923],\n [0.5996801, 0.27940005, 0.17948808, 0.35152006, 0.31769615],\n [0.3598083, 0.40752012, 0.2385281, 0.43856013, 0.26313624],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n mode=\"max\",\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"output_half_pixel\",\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_mode_max\",\n)" + } + ] + }, + { + "name": "RoiAlign", + "module": "ai.onnx", + "version": 16, + "description": "Region of Interest (RoI) align operation described in the\n[Mask R-CNN paper](https://arxiv.org/abs/1703.06870).\nRoiAlign consumes an input tensor X and region of interests (rois)\nto apply pooling across each RoI; it produces a 4-D tensor of shape\n(num_rois, C, output_height, output_width).\n\nRoiAlign is proposed to avoid the misalignment by removing\nquantizations while converting from original image into feature\nmap and from feature map into RoI feature; in each ROI bin,\nthe value of the sampled locations are computed directly\nthrough bilinear interpolation.\n", + "attributes": [ + { + "name": "coordinate_transformation_mode", + "type": "string", + "required": false, + "default": "half_pixel", + "description": "Allowed values are 'half_pixel' and 'output_half_pixel'. Use the value 'half_pixel' to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value 'output_half_pixel' to omit the pixel shift for the input (use this for a backward-compatible behavior)." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "avg", + "description": "The pooling method. Two modes are supported: 'avg' and 'max'. Default is 'avg'." + }, + { + "name": "output_height", + "type": "int64", + "required": false, + "default": 1, + "description": "default 1; Pooled output Y's height." + }, + { + "name": "output_width", + "type": "int64", + "required": false, + "default": 1, + "description": "default 1; Pooled output Y's width." + }, + { + "name": "sampling_ratio", + "type": "int64", + "required": false, + "description": "Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0." + }, + { + "name": "spatial_scale", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. " + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data." + }, + { + "name": "rois", + "type": "T1", + "description": "RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the 'batch_indices' input." + }, + { + "name": "batch_indices", + "type": "T2", + "description": "1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain types to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain types to int tensors.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "roialign_aligned_false", + "code": "node = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"output_half_pixel\",\n)\n\nX, batch_indices, rois = get_roi_align_input_values()\n# (num_rois, C, output_height, output_width)\nY = np.array(\n [\n [\n [\n [0.4664, 0.4466, 0.3405, 0.5688, 0.6068],\n [0.3714, 0.4296, 0.3835, 0.5562, 0.3510],\n [0.2768, 0.4883, 0.5222, 0.5528, 0.4171],\n [0.4713, 0.4844, 0.6904, 0.4920, 0.8774],\n [0.6239, 0.7125, 0.6289, 0.3355, 0.3495],\n ]\n ],\n [\n [\n [0.3022, 0.4305, 0.4696, 0.3978, 0.5423],\n [0.3656, 0.7050, 0.5165, 0.3172, 0.7015],\n [0.2912, 0.5059, 0.6476, 0.6235, 0.8299],\n [0.5916, 0.7389, 0.7048, 0.8372, 0.8893],\n [0.6227, 0.6153, 0.7097, 0.6154, 0.4585],\n ]\n ],\n [\n [\n [0.2384, 0.3379, 0.3717, 0.6100, 0.7601],\n [0.3767, 0.3785, 0.7147, 0.9243, 0.9727],\n [0.5749, 0.5826, 0.5709, 0.7619, 0.8770],\n [0.5355, 0.2566, 0.2141, 0.2796, 0.3600],\n [0.4365, 0.3504, 0.2887, 0.3661, 0.2349],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_aligned_false\",\n)" + }, + { + "summary": "roialign_aligned_true", + "code": "node = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"half_pixel\",\n)\n\nX, batch_indices, rois = get_roi_align_input_values()\n# (num_rois, C, output_height, output_width)\nY = np.array(\n [\n [\n [\n [0.5178, 0.3434, 0.3229, 0.4474, 0.6344],\n [0.4031, 0.5366, 0.4428, 0.4861, 0.4023],\n [0.2512, 0.4002, 0.5155, 0.6954, 0.3465],\n [0.3350, 0.4601, 0.5881, 0.3439, 0.6849],\n [0.4932, 0.7141, 0.8217, 0.4719, 0.4039],\n ]\n ],\n [\n [\n [0.3070, 0.2187, 0.3337, 0.4880, 0.4870],\n [0.1871, 0.4914, 0.5561, 0.4192, 0.3686],\n [0.1433, 0.4608, 0.5971, 0.5310, 0.4982],\n [0.2788, 0.4386, 0.6022, 0.7000, 0.7524],\n [0.5774, 0.7024, 0.7251, 0.7338, 0.8163],\n ]\n ],\n [\n [\n [0.2393, 0.4075, 0.3379, 0.2525, 0.4743],\n [0.3671, 0.2702, 0.4105, 0.6419, 0.8308],\n [0.5556, 0.4543, 0.5564, 0.7502, 0.9300],\n [0.6626, 0.5617, 0.4813, 0.4954, 0.6663],\n [0.6636, 0.3721, 0.2056, 0.1928, 0.2478],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_aligned_true\",\n)" + }, + { + "summary": "roialign_mode_max", + "code": "X = np.array(\n [\n [\n [\n [\n 0.2764,\n 0.715,\n 0.1958,\n 0.3416,\n 0.4638,\n 0.0259,\n 0.2963,\n 0.6518,\n 0.4856,\n 0.725,\n ],\n [\n 0.9637,\n 0.0895,\n 0.2919,\n 0.6753,\n 0.0234,\n 0.6132,\n 0.8085,\n 0.5324,\n 0.8992,\n 0.4467,\n ],\n [\n 0.3265,\n 0.8479,\n 0.9698,\n 0.2471,\n 0.9336,\n 0.1878,\n 0.4766,\n 0.4308,\n 0.34,\n 0.2162,\n ],\n [\n 0.0206,\n 0.172,\n 0.2155,\n 0.4394,\n 0.0653,\n 0.3406,\n 0.7724,\n 0.3921,\n 0.2541,\n 0.5799,\n ],\n [\n 0.4062,\n 0.2194,\n 0.4473,\n 0.4687,\n 0.7109,\n 0.9327,\n 0.9815,\n 0.632,\n 0.1728,\n 0.6119,\n ],\n [\n 0.3097,\n 0.1283,\n 0.4984,\n 0.5068,\n 0.4279,\n 0.0173,\n 0.4388,\n 0.043,\n 0.4671,\n 0.7119,\n ],\n [\n 0.1011,\n 0.8477,\n 0.4726,\n 0.1777,\n 0.9923,\n 0.4042,\n 0.1869,\n 0.7795,\n 0.9946,\n 0.9689,\n ],\n [\n 0.1366,\n 0.3671,\n 0.7011,\n 0.6234,\n 0.9867,\n 0.5585,\n 0.6985,\n 0.5609,\n 0.8788,\n 0.9928,\n ],\n [\n 0.5697,\n 0.8511,\n 0.6711,\n 0.9406,\n 0.8751,\n 0.7496,\n 0.165,\n 0.1049,\n 0.1559,\n 0.2514,\n ],\n [\n 0.7012,\n 0.4056,\n 0.7879,\n 0.3461,\n 0.0415,\n 0.2998,\n 0.5094,\n 0.3727,\n 0.5482,\n 0.0502,\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\nrois = np.array(\n [[0.0, 0.0, 9.0, 9.0], [0.0, 5.0, 4.0, 9.0], [5.0, 5.0, 9.0, 9.0]],\n dtype=np.float32,\n)\nbatch_indices = np.array([0, 0, 0], dtype=np.int64)\n\nY = np.array(\n [\n [\n [\n [0.3445228, 0.37310338, 0.37865096, 0.446696, 0.37991184],\n [0.4133513, 0.5455125, 0.6651902, 0.55805874, 0.27110294],\n [0.21223956, 0.40924096, 0.8417618, 0.792561, 0.37196714],\n [0.46835402, 0.39741728, 0.8012819, 0.4969306, 0.5495158],\n [0.3595896, 0.5196813, 0.5403741, 0.23814403, 0.19992709],\n ]\n ],\n [\n [\n [0.30517197, 0.5086199, 0.3189761, 0.4054401, 0.47630402],\n [0.50862, 0.8477, 0.37808004, 0.24936005, 0.79384017],\n [0.17620805, 0.29368007, 0.44870415, 0.4987201, 0.63148826],\n [0.51066005, 0.8511, 0.5368801, 0.9406, 0.70008016],\n [0.4487681, 0.51066035, 0.5042561, 0.5643603, 0.42004836],\n ]\n ],\n [\n [\n [0.21062402, 0.3510401, 0.37416005, 0.5967599, 0.46507207],\n [0.32336006, 0.31180006, 0.6236001, 0.9946, 0.7751202],\n [0.35744014, 0.5588001, 0.35897616, 0.7030401, 0.6353923],\n [0.5996801, 0.27940005, 0.17948808, 0.35152006, 0.31769615],\n [0.3598083, 0.40752012, 0.2385281, 0.43856013, 0.26313624],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nnode = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n mode=\"max\",\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n coordinate_transformation_mode=\"output_half_pixel\",\n)\n\nexpect(\n node,\n inputs=[X, rois, batch_indices],\n outputs=[Y],\n name=\"test_roialign_mode_max\",\n)" + } + ] + }, + { + "name": "Round", + "module": "ai.onnx", + "version": 11, + "description": "Round takes one input Tensor and rounds the values, element-wise, meaning\nit finds the nearest integer for each value.\nIn case of halves, the rule is to round them to the nearest even integer.\nIf input x is integral, +0, -0, NaN, or infinite, x itself is returned.\nThe output tensor has the same shape and type as the input.\n\nExamples:\n```\nround([0.9]) = [1.0]\nround([2.5]) = [2.0]\nround([2.3]) = [2.0]\nround([1.5]) = [2.0]\nround([-4.5]) = [-4.0]\n```\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "round", + "code": "node = onnx.helper.make_node(\n \"Round\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n 0.1,\n 0.5,\n 0.9,\n 1.2,\n 1.5,\n 1.8,\n 2.3,\n 2.5,\n 2.7,\n -1.1,\n -1.5,\n -1.9,\n -2.2,\n -2.5,\n -2.8,\n ]\n).astype(np.float32)\ny = np.array(\n [\n 0.0,\n 0.0,\n 1.0,\n 1.0,\n 2.0,\n 2.0,\n 2.0,\n 2.0,\n 3.0,\n -1.0,\n -2.0,\n -2.0,\n -2.0,\n -2.0,\n -3.0,\n ]\n).astype(\n np.float32\n) # expected output\nexpect(node, inputs=[x], outputs=[y], name=\"test_round\")" + } + ] + }, + { + "name": "STFT", + "module": "ai.onnx", + "version": 17, + "description": "Computes the Short-time Fourier Transform of the signal.", + "attributes": [ + { + "name": "onesided", + "type": "int64", + "required": false, + "default": 1, + "description": "If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT).When invoked with real or complex valued input, the default value is 1. Values can be 0 or 1." + } + ], + "inputs": [ + { + "name": "signal", + "type": "T1", + "description": "Input tensor representing a real or complex valued signal. For real input, the following shape is expected: [batch_size][signal_length][1]. For complex input, the following shape is expected: [batch_size][signal_length][2], where [batch_size][signal_length][0] represents the real component and [batch_size][signal_length][1] represents the imaginary component of the signal." + }, + { + "name": "frame_step", + "type": "T2", + "description": "The number of samples to step between successive DFTs." + }, + { + "name": "window", + "type": "T1", + "option": "optional", + "description": "A tensor representing the window that will be slid over the signal.The window must have rank 1 with shape: [window_shape]. It's an optional value. " + }, + { + "name": "frame_length", + "type": "T2", + "option": "optional", + "description": "A scalar representing the size of the DFT. It's an optional value." + } + ], + "min_input": 2, + "max_input": 4, + "outputs": [ + { + "name": "output", + "type": "T1", + "description": "The Short-time Fourier Transform of the signals.If onesided is 1, the output has the shape: [batch_size][frames][dft_unique_bins][2], where dft_unique_bins is frame_length // 2 + 1 (the unique components of the DFT) If onesided is 0, the output has the shape: [batch_size][frames][frame_length][2], where frame_length is the length of the DFT." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 4", + "type_constraints": [ + { + "description": "Constrain signal and output to float tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(float16)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain scalar length types to int64_t.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "stft", + "code": "signal = np.arange(0, 128, dtype=np.float32).reshape(1, 128, 1)\nlength = np.array(16).astype(np.int64)\nonesided_length = (length >> 1) + 1\nstep = np.array(8).astype(np.int64)\n\nno_window = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"STFT\",\n inputs=[\"signal\", \"frame_step\", no_window, \"frame_length\"],\n outputs=[\"output\"],\n)\n\nnstfts = ((signal.shape[1] - length) // step) + 1\n# [batch_size][frames][frame_length][2]\noutput = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)\nfor i in range(nstfts):\n start = i * step\n stop = i * step + length\n complex_out = np.fft.fft(signal[0, start:stop, 0])[0:onesided_length]\n output[0, i] = np.stack((complex_out.real, complex_out.imag), axis=1)\n\nexpect(node, inputs=[signal, step, length], outputs=[output], name=\"test_stft\")\n\nnode = onnx.helper.make_node(\n \"STFT\",\n inputs=[\"signal\", \"frame_step\", \"window\"],\n outputs=[\"output\"],\n)\n\n# Test with window\na0 = 0.5\na1 = 0.5\nwindow = a0 + a1 * np.cos(\n 2 * np.pi * np.arange(0, length, 1, dtype=np.float32) / length\n)\nnstfts = 1 + (signal.shape[1] - window.shape[0]) // step\n\n# [batch_size][frames][frame_length][2]\noutput = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)\nfor i in range(nstfts):\n start = i * step\n stop = i * step + length\n complex_out = np.fft.fft(signal[0, start:stop, 0] * window)[\n 0:onesided_length\n ]\n output[0, i] = np.stack((complex_out.real, complex_out.imag), axis=1)\nexpect(\n node,\n inputs=[signal, step, window],\n outputs=[output],\n name=\"test_stft_with_window\",\n)" + } + ] + }, + { + "name": "SVMClassifier", + "module": "ai.onnx.ml", + "version": 1, + "description": "Support Vector Machine classifier\n", + "attributes": [ + { + "name": "classlabels_ints", + "type": "int64[]", + "required": false, + "description": "Class labels if using integer labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "classlabels_strings", + "type": "string[]", + "required": false, + "description": "Class labels if using string labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "coefficients", + "type": "float32[]", + "required": false, + "description": "" + }, + { + "name": "kernel_params", + "type": "float32[]", + "required": false, + "description": "List of 3 elements containing gamma, coef0, and degree, in that order. Zero if unused for the kernel." + }, + { + "name": "kernel_type", + "type": "string", + "required": false, + "default": "LINEAR", + "description": "The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'" + }, + { + "name": "prob_a", + "type": "float32[]", + "required": false, + "description": "First set of probability coefficients." + }, + { + "name": "prob_b", + "type": "float32[]", + "required": false, + "description": "Second set of probability coefficients. This array must be same size as prob_a.
If these are provided then output Z are probability estimates, otherwise they are raw scores." + }, + { + "name": "rho", + "type": "float32[]", + "required": false, + "description": "" + }, + { + "name": "support_vectors", + "type": "float32[]", + "required": false, + "description": "" + }, + { + "name": "vectors_per_class", + "type": "int64[]", + "required": false, + "description": "" + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Data to be classified." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Classification outputs (one class per example)." + }, + { + "name": "Z", + "type": "tensor(float)", + "description": "Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type, either [C] or [N,C].", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + }, + { + "description": "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "SVMRegressor", + "module": "ai.onnx.ml", + "version": 1, + "description": "Support Vector Machine regression prediction and one-class SVM anomaly detection.\n", + "attributes": [ + { + "name": "coefficients", + "type": "float32[]", + "required": false, + "description": "Support vector coefficients." + }, + { + "name": "kernel_params", + "type": "float32[]", + "required": false, + "description": "List of 3 elements containing gamma, coef0, and degree, in that order. Zero if unused for the kernel." + }, + { + "name": "kernel_type", + "type": "string", + "required": false, + "default": "LINEAR", + "description": "The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'." + }, + { + "name": "n_supports", + "type": "int64", + "required": false, + "description": "The number of support vectors." + }, + { + "name": "one_class", + "type": "int64", + "required": false, + "description": "Flag indicating whether the regression is a one-class SVM or not." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.'" + }, + { + "name": "rho", + "type": "float32[]", + "required": false, + "description": "" + }, + { + "name": "support_vectors", + "type": "float32[]", + "required": false, + "description": "Chosen support vectors" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be regressed." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "Regression outputs (one score per target per example)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type, either [C] or [N,C].", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "Scaler", + "module": "ai.onnx.ml", + "version": 1, + "description": "Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.\n", + "attributes": [ + { + "name": "offset", + "type": "float32[]", + "required": false, + "description": "First, offset by this.
Can be length of features in an [N,F] tensor or length 1, in which case it applies to all features, regardless of dimension count." + }, + { + "name": "scale", + "type": "float32[]", + "required": false, + "description": "Second, multiply by this.
Can be length of features in an [N,F] tensor or length 1, in which case it applies to all features, regardless of dimension count.
Must be same length as 'offset'" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Data to be scaled." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "Scaled output data." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 8, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops). All these tensors are required to\nhave the same shape in each iteration of the loop (a restriction imposed to enable efficient\nmemory allocation). Many common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs).\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe operation supports batching, and the batch-axis is required to be 0.\nWhen multiple scan_input tensors are used, they must all have the same batch-size,\nand they must all have the same maximum-sequence-length (the dimensionality of the\nsequence axis or scan axis). The sequence axis or scan axis is required to be 1.\n\nThe operation has an optional sequence_lens input (of shape [BATCH_SIZE]) to\nallow variable length sequences of length <= the maximum-sequence-length. If this\ninput is not specified, all sequences are assumed to be of length equal to\nmaximum-sequence-length. For variable length input sequences, the scan_outputs\nwill consist of a sequence of same length as the input, padded to the\nmaximum-sequence-length.\n\nThe optional attribute directions can be used to scan a sequence in the reverse direction.\nIf this attribute is omitted, all sequences are scanned in the forward direction.\nA bidirectional scan be performed by specifying the same tensor input twice in the\nscan_inputs, once with a forward direction, and once with a backward direction.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body\n > (sequence_lengths, init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // T.shape[0] denotes the batch-size of T\n // The batch-size of scan_1, ..., scan_m are all required to be equal\n batch_size = scan_1.shape[0];\n\n // scan_i.shape[1] denotes the (max) sequence-length of scan_i\n // scan_i.shape[1] is required to be equal to scan_j.shape[1] for all i,j.\n max_sequence_length = scan_1.shape[1];\n\n for (int batch = 0; batch < batch_size; ++batch) {\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n N = (sequence_lengths specified) ? sequence_lengths[batch] : max_sequence_length;\n\n // execute loop\n for (int t = 0; t < N; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = (scan_1[batch])[t];\n ... ;\n si_m = (scan_m[batch])[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n // accumulate the outputs for this batch:\n bst_1[batch] = st_1; ..., bst_n[batch] = st_n;\n // Note scan-outputs will have size max_sequence_length, but only first N values will be meaningful.\n // The remaining values have an undefined value.\n b_scan_out_1[batch] = scan_out_1; ...; b_scan_out_k[batch] = scan_out_k;\n }\n return bst_1, ..., bst_n, b_scan_out_1, ..., b_scan_out_k;\n\n\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](\"\", %H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + } + ], + "inputs": [ + { + "name": "sequence_lens", + "type": "I", + "option": "optional", + "description": "Optional tensor specifying lengths of the sequences in a batch. If this input is not specified, all sequences are assumed to be of the maximum sequence length (the dimension of the sequence axis of the scan_input tensors)." + }, + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 2, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "2 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Int64 tensor", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + }, + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 9, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + }, + { + "name": "scan_input_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input." + }, + { + "name": "scan_input_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "scan_output_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output." + }, + { + "name": "scan_output_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration." + } + ], + "inputs": [ + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 11, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + }, + { + "name": "scan_input_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "scan_input_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "scan_output_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1]." + }, + { + "name": "scan_output_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration." + } + ], + "inputs": [ + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 16, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + }, + { + "name": "scan_input_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "scan_input_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "scan_output_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1]." + }, + { + "name": "scan_output_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration." + } + ], + "inputs": [ + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types up to IRv4.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 19, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + }, + { + "name": "scan_input_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "scan_input_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "scan_output_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1]." + }, + { + "name": "scan_output_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration." + } + ], + "inputs": [ + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types up to IRv9.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scan", + "module": "ai.onnx", + "version": 21, + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip, and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations." + }, + { + "name": "num_scan_inputs", + "type": "int64", + "required": true, + "description": "An attribute specifying the number of scan_inputs M. " + }, + { + "name": "scan_input_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "scan_input_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction." + }, + { + "name": "scan_output_axes", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1]." + }, + { + "name": "scan_output_directions", + "type": "int64[]", + "required": false, + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration." + } + ], + "inputs": [ + { + "name": "initial_state_and_scan_inputs", + "type": "V", + "list": true, + "description": "Initial values of the loop's N state variables followed by M scan_inputs" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "final_state_and_scan_outputs", + "type": "V", + "list": true, + "description": "Final values of the loop's N state variables followed by K scan_outputs" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "All Tensor types up to IRv10.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "scan_8", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[no_sequence_lens, \"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 8)],\n)" + }, + { + "summary": "scan_9", + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info(\n \"sum_in\", onnx.TensorProto.FLOAT, [2]\n)\nnext = onnx.helper.make_tensor_value_info( # noqa: A001\n \"next\", onnx.TensorProto.FLOAT, [2]\n)\nsum_out = onnx.helper.make_tensor_value_info(\n \"sum_out\", onnx.TensorProto.FLOAT, [2]\n)\nscan_out = onnx.helper.make_tensor_value_info(\n \"scan_out\", onnx.TensorProto.FLOAT, [2]\n)\nadd_node = onnx.helper.make_node(\n \"Add\", inputs=[\"sum_in\", \"next\"], outputs=[\"sum_out\"]\n)\nid_node = onnx.helper.make_node(\n \"Identity\", inputs=[\"sum_out\"], outputs=[\"scan_out\"]\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node], \"scan_body\", [sum_in, next], [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n \"Scan\",\n inputs=[\"initial\", \"x\"],\n outputs=[\"y\", \"z\"],\n num_scan_inputs=1,\n body=scan_body,\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(\n node,\n inputs=[initial, x],\n outputs=[y, z],\n name=\"test_scan9_sum\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 9)],\n)" + } + ] + }, + { + "name": "Scatter", + "module": "ai.onnx", + "version": 9, + "description": "Given `data`, `updates` and `indices` input tensors of rank r >= 1, write the values provided by `updates`\ninto the first input, `data`, along `axis` dimension of `data` (by default outer-most one as axis=0) at corresponding `indices`.\nFor each entry in `updates`, the target index in `data` is specified by corresponding entry in `indices`\nfor dimension = axis, and index in source for dimension != axis. For instance, in a 2-D tensor case,\ndata[indices[i][j]][j] = updates[i][j] if axis = 0, or data[i][indices[i][j]] = updates[i][j] if axis = 1,\nwhere i and j are loop counters from 0 up to the respective size in `updates` - 1.\nExample 1:\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\nExample 2:\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1]" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input)." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"Scatter\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter(data, indices, updates, axis=axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_with_axis\",\n opset_imports=[helper.make_opsetid(\"\", 10)],\n)" + }, + { + "summary": "scatter_without_axis", + "code": "node = onnx.helper.make_node(\n \"Scatter\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_without_axis\",\n opset_imports=[helper.make_opsetid(\"\", 10)],\n)" + } + ] + }, + { + "name": "Scatter", + "module": "ai.onnx", + "version": 11, + "description": "This operator is deprecated. Please use ScatterElements, which provides the same functionality.\n\nScatter takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\nFor instance, in a 2-D tensor case, the update corresponding to the [i][j] entry\nis performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0,\n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"Scatter\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter(data, indices, updates, axis=axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_with_axis\",\n opset_imports=[helper.make_opsetid(\"\", 10)],\n)" + }, + { + "summary": "scatter_without_axis", + "code": "node = onnx.helper.make_node(\n \"Scatter\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_without_axis\",\n opset_imports=[helper.make_opsetid(\"\", 10)],\n)" + } + ] + }, + { + "name": "ScatterElements", + "module": "ai.onnx", + "version": 11, + "description": "ScatterElements takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\nFor instance, in a 2-D tensor case, the update corresponding to the [i][j] entry\nis performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0,\n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_elements_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_axis\",\n)" + }, + { + "summary": "scatter_elements_with_duplicate_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"add\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"add\")\n# print(y) produces\n# [[1.0, 5.2, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_duplicate_indices\",\n)" + }, + { + "summary": "scatter_elements_with_negative_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, -3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 2.1, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_negative_indices\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_max", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"max\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"max\")\n# print(y) produces\n# [[1.0, 2.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_max\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_min", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"min\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"min\")\n# print(y) produces\n# [[1.0, 1.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_min\",\n)" + }, + { + "summary": "scatter_elements_without_axis", + "code": "node = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_without_axis\",\n)" + } + ] + }, + { + "name": "ScatterElements", + "module": "ai.onnx", + "version": 13, + "description": "ScatterElements takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\nFor instance, in a 2-D tensor case, the update corresponding to the [i][j] entry\nis performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0,\n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_elements_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_axis\",\n)" + }, + { + "summary": "scatter_elements_with_duplicate_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"add\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"add\")\n# print(y) produces\n# [[1.0, 5.2, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_duplicate_indices\",\n)" + }, + { + "summary": "scatter_elements_with_negative_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, -3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 2.1, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_negative_indices\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_max", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"max\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"max\")\n# print(y) produces\n# [[1.0, 2.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_max\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_min", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"min\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"min\")\n# print(y) produces\n# [[1.0, 1.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_min\",\n)" + }, + { + "summary": "scatter_elements_without_axis", + "code": "node = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_without_axis\",\n)" + } + ] + }, + { + "name": "ScatterElements", + "module": "ai.onnx", + "version": 16, + "description": "ScatterElements takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n`reduction` allows specification of an optional reduction operation, which is applied to all values in `updates`\ntensor into `output` at the specified `indices`.\nIn cases where `reduction` is set to \"none\", indices should not have duplicate entries: that is, if idx1 != idx2,\nthen indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update\ncorresponding to the [i][j] entry is performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0,\n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\nWhen `reduction` is set to \"add\", the update corresponding to the [i][j] entry is performed as below:\n```\n output[indices[i][j]][j] += updates[i][j] if axis = 0,\n output[i][indices[i][j]] += updates[i][j] if axis = 1,\n```\nWhen `reduction` is set to \"mul\", the update corresponding to the [i][j] entry is performed as below:\n```\n output[indices[i][j]][j] *= updates[i][j] if axis = 0,\n output[i][indices[i][j]] *= updates[i][j] if axis = 1,\n```\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "none", + "description": "Type of reduction to apply: none (default), add, mul. 'none': no reduction applied. 'add': reduction using the addition operation. 'mul': reduction using the multiplication operation." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_elements_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_axis\",\n)" + }, + { + "summary": "scatter_elements_with_duplicate_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"add\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"add\")\n# print(y) produces\n# [[1.0, 5.2, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_duplicate_indices\",\n)" + }, + { + "summary": "scatter_elements_with_negative_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, -3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 2.1, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_negative_indices\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_max", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"max\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"max\")\n# print(y) produces\n# [[1.0, 2.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_max\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_min", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"min\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"min\")\n# print(y) produces\n# [[1.0, 1.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_min\",\n)" + }, + { + "summary": "scatter_elements_without_axis", + "code": "node = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_without_axis\",\n)" + } + ] + }, + { + "name": "ScatterElements", + "module": "ai.onnx", + "version": 18, + "description": "ScatterElements takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\n`reduction` allows specification of an optional reduction operation, which is applied to all values in `updates`\ntensor into `output` at the specified `indices`.\nIn cases where `reduction` is set to \"none\", indices should not have duplicate entries: that is, if idx1 != idx2,\nthen indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update\ncorresponding to the [i][j] entry is performed as below:\n```\noutput[indices[i][j]][j] = updates[i][j] if axis = 0,\noutput[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\nWhen `reduction` is set to some reduction function `f`, the update corresponding to the [i][j] entry is performed as below:\n```\noutput[indices[i][j]][j] = f(output[indices[i][j]][j], updates[i][j]) if axis = 0,\noutput[i][indices[i][j]] = f(output[i][indices[i][j]], updates[i][j]) if axis = 1,\n```\nwhere the `f` is `+`, `*`, `max` or `min` as specified.\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\n(Opset 18 change): Adds max/min to the set of allowed reduction ops.\n\nExample 1:\n```\ndata = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n]\nindices = [\n [1, 0, 2],\n [0, 2, 1],\n]\nupdates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n]\noutput = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n]\n```\nExample 2:\n```\ndata = [[1.0, 2.0, 3.0, 4.0, 5.0]]\nindices = [[1, 3]]\nupdates = [[1.1, 2.1]]\naxis = 1\noutput = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "none", + "description": "Type of reduction to apply: none (default), add, mul, max, min. 'none': no reduction applied. 'add': reduction using the addition operation. 'mul': reduction using the multiplication operation.'max': reduction using the maximum operation.'min': reduction using the minimum operation." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "Tind", + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank r >=1 (same rank and shape as indices)" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1 (same rank as input)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "scatter_elements_with_axis", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_axis\",\n)" + }, + { + "summary": "scatter_elements_with_duplicate_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"add\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"add\")\n# print(y) produces\n# [[1.0, 5.2, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_duplicate_indices\",\n)" + }, + { + "summary": "scatter_elements_with_negative_indices", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, -3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 2.1, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_negative_indices\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_max", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"max\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"max\")\n# print(y) produces\n# [[1.0, 2.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_max\",\n)" + }, + { + "summary": "scatter_elements_with_reduction_min", + "code": "axis = 1\nnode = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n axis=axis,\n reduction=\"min\",\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 1]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis, reduction=\"min\")\n# print(y) produces\n# [[1.0, 1.1, 3.0, 4.0, 5.0]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_with_reduction_min\",\n)" + }, + { + "summary": "scatter_elements_without_axis", + "code": "node = onnx.helper.make_node(\n \"ScatterElements\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[y],\n name=\"test_scatter_elements_without_axis\",\n)" + } + ] + }, + { + "name": "ScatterND", + "module": "ai.onnx", + "version": 11, + "description": "ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1,\nand `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value to values\nspecified by `updates` at specific index positions specified by `indices`. Its output shape\nis the same as the shape of `data`. Note that `indices` should not have duplicate entries.\nThat is, two or more `updates` for the same index-location is not supported.\n\n`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.\n `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.\nHence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an\nupdate to a single element of the tensor. When k is less than rank(data) each update entry specifies an\nupdate to a slice of the tensor. Index values are allowed to be negative, as per the usual\nconvention for counting backwards from the end, but are expected in the valid range.\n\n`updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the\nfirst (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape.\nThe remaining dimensions of `updates` correspond to the dimensions of the\nreplacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor,\ncorresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates`\nmust equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation\nof shapes.\n\nThe `output` is calculated via the following equation:\n\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] = updates[idx]\n\nThe order of iteration in the above loop is not specified.\nIn particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].\nThis ensures that the output value does not depend on the iteration order.\n\nThis operator is the inverse of GatherND.\n\nExample 1:\n```\n data = [1, 2, 3, 4, 5, 6, 7, 8]\n indices = [[4], [3], [1], [7]]\n updates = [9, 10, 11, 12]\n output = [1, 11, 3, 10, 9, 6, 7, 12]\n```\n\nExample 2:\n```\n data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n indices = [[0], [2]]\n updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\n output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n```\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scatternd", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [2]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates)\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd\",\n)" + }, + { + "summary": "scatternd_add", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"add\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[7, 8, 9, 10], [13, 14, 15, 16], [18, 17, 16, 15], [16, 15, 14, 13]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"add\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_add\",\n)" + }, + { + "summary": "scatternd_max", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"max\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 7, 8], [8, 7, 7, 7], [8, 8 ,8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"max\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_max\",\n)" + }, + { + "summary": "scatternd_min", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"min\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 3, 2, 1]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"min\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_min\",\n)" + }, + { + "summary": "scatternd_multiply", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"mul\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 10, 15, 20], [60, 72, 84, 96], [168, 147, 126, 105], [128, 96, 64, 32]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"mul\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_multiply\",\n)" + } + ] + }, + { + "name": "ScatterND", + "module": "ai.onnx", + "version": 13, + "description": "ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1,\nand `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value to values\nspecified by `updates` at specific index positions specified by `indices`. Its output shape\nis the same as the shape of `data`. Note that `indices` should not have duplicate entries.\nThat is, two or more `updates` for the same index-location is not supported.\n\n`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.\n `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.\nHence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an\nupdate to a single element of the tensor. When k is less than rank(data) each update entry specifies an\nupdate to a slice of the tensor. Index values are allowed to be negative, as per the usual\nconvention for counting backwards from the end, but are expected in the valid range.\n\n`updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the\nfirst (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape.\nThe remaining dimensions of `updates` correspond to the dimensions of the\nreplacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor,\ncorresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates`\nmust equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation\nof shapes.\n\nThe `output` is calculated via the following equation:\n\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] = updates[idx]\n\nThe order of iteration in the above loop is not specified.\nIn particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].\nThis ensures that the output value does not depend on the iteration order.\n\nThis operator is the inverse of GatherND.\n\nExample 1:\n```\n data = [1, 2, 3, 4, 5, 6, 7, 8]\n indices = [[4], [3], [1], [7]]\n updates = [9, 10, 11, 12]\n output = [1, 11, 3, 10, 9, 6, 7, 12]\n```\n\nExample 2:\n```\n data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n indices = [[0], [2]]\n updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\n output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n```\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scatternd", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [2]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates)\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd\",\n)" + }, + { + "summary": "scatternd_add", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"add\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[7, 8, 9, 10], [13, 14, 15, 16], [18, 17, 16, 15], [16, 15, 14, 13]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"add\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_add\",\n)" + }, + { + "summary": "scatternd_max", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"max\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 7, 8], [8, 7, 7, 7], [8, 8 ,8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"max\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_max\",\n)" + }, + { + "summary": "scatternd_min", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"min\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 3, 2, 1]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"min\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_min\",\n)" + }, + { + "summary": "scatternd_multiply", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"mul\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 10, 15, 20], [60, 72, 84, 96], [168, 147, 126, 105], [128, 96, 64, 32]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"mul\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_multiply\",\n)" + } + ] + }, + { + "name": "ScatterND", + "module": "ai.onnx", + "version": 16, + "description": "ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1,\nand `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value to values\nspecified by `updates` at specific index positions specified by `indices`. Its output shape\nis the same as the shape of `data`.\n\n`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.\n `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.\nHence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an\nupdate to a single element of the tensor. When k is less than rank(data) each update entry specifies an\nupdate to a slice of the tensor. Index values are allowed to be negative, as per the usual\nconvention for counting backwards from the end, but are expected in the valid range.\n\n`updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the\nfirst (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape.\nThe remaining dimensions of `updates` correspond to the dimensions of the\nreplacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor,\ncorresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates`\nmust equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation\nof shapes.\n\nThe `output` is calculated via the following equation:\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] = updates[idx]\nThe order of iteration in the above loop is not specified.\nIn particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].\nThis ensures that the output value does not depend on the iteration order.\n\n`reduction` allows specification of an optional reduction operation, which is applied to all values in `updates`\ntensor into `output` at the specified `indices`.\nIn cases where `reduction` is set to \"none\", indices should not have duplicate entries: that is, if idx1 != idx2,\nthen indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order.\nWhen `reduction` is set to \"add\", `output` is calculated as follows:\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] += updates[idx]\nWhen `reduction` is set to \"mul\", `output` is calculated as follows:\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] *= updates[idx]\nThis operator is the inverse of GatherND.\nExample 1:\n```\n data = [1, 2, 3, 4, 5, 6, 7, 8]\n indices = [[4], [3], [1], [7]]\n updates = [9, 10, 11, 12]\n output = [1, 11, 3, 10, 9, 6, 7, 12]\n```\nExample 2:\n```\n data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n indices = [[0], [2]]\n updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\n output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n```\n", + "attributes": [ + { + "name": "reduction", + "type": "string", + "required": false, + "default": "none", + "description": "Type of reduction to apply: none (default), add, mul. 'none': no reduction applied. 'add': reduction using the addition operation. 'mul': reduction using the multiplication operation." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scatternd", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [2]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates)\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd\",\n)" + }, + { + "summary": "scatternd_add", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"add\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[7, 8, 9, 10], [13, 14, 15, 16], [18, 17, 16, 15], [16, 15, 14, 13]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"add\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_add\",\n)" + }, + { + "summary": "scatternd_max", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"max\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 7, 8], [8, 7, 7, 7], [8, 8 ,8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"max\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_max\",\n)" + }, + { + "summary": "scatternd_min", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"min\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 3, 2, 1]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"min\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_min\",\n)" + }, + { + "summary": "scatternd_multiply", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"mul\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 10, 15, 20], [60, 72, 84, 96], [168, 147, 126, 105], [128, 96, 64, 32]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"mul\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_multiply\",\n)" + } + ] + }, + { + "name": "ScatterND", + "module": "ai.onnx", + "version": 18, + "description": "ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1,\nand `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value to values\nspecified by `updates` at specific index positions specified by `indices`. Its output shape\nis the same as the shape of `data`.\n\n`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.\n`indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.\nHence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an\nupdate to a single element of the tensor. When k is less than rank(data) each update entry specifies an\nupdate to a slice of the tensor. Index values are allowed to be negative, as per the usual\nconvention for counting backwards from the end, but are expected in the valid range.\n\n`updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the\nfirst (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape.\nThe remaining dimensions of `updates` correspond to the dimensions of the\nreplacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor,\ncorresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates`\nmust equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation\nof shapes.\n\nThe `output` is calculated via the following equation:\n\n```\noutput = np.copy(data)\nupdate_indices = indices.shape[:-1]\nfor idx in np.ndindex(update_indices):\n output[indices[idx]] = updates[idx]\n```\n\nThe order of iteration in the above loop is not specified.\nIn particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].\nThis ensures that the output value does not depend on the iteration order.\n\n`reduction` allows specification of an optional reduction operation, which is applied to all values in `updates`\ntensor into `output` at the specified `indices`.\nIn cases where `reduction` is set to \"none\", indices should not have duplicate entries: that is, if idx1 != idx2,\nthen indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order.\nWhen `reduction` is set to some reduction function `f`, `output` is calculated as follows:\n\n```\noutput = np.copy(data)\nupdate_indices = indices.shape[:-1]\nfor idx in np.ndindex(update_indices):\n output[indices[idx]] = f(output[indices[idx]], updates[idx])\n```\n\nwhere the `f` is `+`, `*`, `max` or `min` as specified.\n\nThis operator is the inverse of GatherND.\n\n(Opset 18 change): Adds max/min to the set of allowed reduction ops.\n\nExample 1:\n```\ndata = [1, 2, 3, 4, 5, 6, 7, 8]\nindices = [[4], [3], [1], [7]]\nupdates = [9, 10, 11, 12]\noutput = [1, 11, 3, 10, 9, 6, 7, 12]\n```\n\nExample 2:\n```\ndata = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\nindices = [[0], [2]]\nupdates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\noutput = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n```\n", + "attributes": [ + { + "name": "reduction", + "type": "string", + "required": false, + "default": "none", + "description": "Type of reduction to apply: none (default), add, mul, max, min. 'none': no reduction applied. 'add': reduction using the addition operation. 'mul': reduction using the addition operation. 'max': reduction using the maximum operation.'min': reduction using the minimum operation." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of rank r >= 1." + }, + { + "name": "indices", + "type": "tensor(int64)", + "description": "Tensor of rank q >= 1." + }, + { + "name": "updates", + "type": "T", + "description": "Tensor of rank q + r - indices_shape[-1] - 1." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of rank r >= 1." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "scatternd", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [2]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates)\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd\",\n)" + }, + { + "summary": "scatternd_add", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"add\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[7, 8, 9, 10], [13, 14, 15, 16], [18, 17, 16, 15], [16, 15, 14, 13]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"add\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_add\",\n)" + }, + { + "summary": "scatternd_max", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"max\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 7, 8], [8, 7, 7, 7], [8, 8 ,8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"max\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_max\",\n)" + }, + { + "summary": "scatternd_min", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"min\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 3, 2, 1]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"min\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_min\",\n)" + }, + { + "summary": "scatternd_multiply", + "code": "node = onnx.helper.make_node(\n \"ScatterND\",\n inputs=[\"data\", \"indices\", \"updates\"],\n outputs=[\"y\"],\n reduction=\"mul\",\n)\ndata = np.array(\n [\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n ],\n dtype=np.float32,\n)\nindices = np.array([[0], [0]], dtype=np.int64)\nupdates = np.array(\n [\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ],\n dtype=np.float32,\n)\n# Expecting output as np.array(\n# [[[5, 10, 15, 20], [60, 72, 84, 96], [168, 147, 126, 105], [128, 96, 64, 32]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates, reduction=\"mul\")\nexpect(\n node,\n inputs=[data, indices, updates],\n outputs=[output],\n name=\"test_scatternd_multiply\",\n)" + } + ] + }, + { + "name": "Selu", + "module": "ai.onnx", + "version": 1, + "description": "Selu takes one input data (Tensor) and produces one output data\n(Tensor) where the scaled exponential linear unit function,\n`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.673200011253357, + "description": "Coefficient of SELU default to 1.6732." + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + }, + { + "name": "gamma", + "type": "float32", + "required": false, + "default": 1.0506999492645264, + "description": "Coefficient of SELU default to 1.0507." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "selu", + "code": "node = onnx.helper.make_node(\n \"Selu\", inputs=[\"x\"], outputs=[\"y\"], alpha=2.0, gamma=3.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-3.79272318, 0., 3.]\ny = (\n np.clip(x, 0, np.inf) * 3.0\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = (\n np.clip(x, 0, np.inf) * 3.0\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu\")" + }, + { + "summary": "selu_default", + "code": "default_alpha = 1.67326319217681884765625\ndefault_gamma = 1.05070102214813232421875\nnode = onnx.helper.make_node(\n \"Selu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = (\n np.clip(x, 0, np.inf) * default_gamma\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "Selu", + "module": "ai.onnx", + "version": 6, + "description": "Selu takes one input data (Tensor) and produces one output data\n(Tensor) where the scaled exponential linear unit function,\n`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.6732631921768188, + "description": "Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717)." + }, + { + "name": "gamma", + "type": "float32", + "required": false, + "default": 1.0507010221481323, + "description": "Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946)." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "selu", + "code": "node = onnx.helper.make_node(\n \"Selu\", inputs=[\"x\"], outputs=[\"y\"], alpha=2.0, gamma=3.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-3.79272318, 0., 3.]\ny = (\n np.clip(x, 0, np.inf) * 3.0\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = (\n np.clip(x, 0, np.inf) * 3.0\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu\")" + }, + { + "summary": "selu_default", + "code": "default_alpha = 1.67326319217681884765625\ndefault_gamma = 1.05070102214813232421875\nnode = onnx.helper.make_node(\n \"Selu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = (\n np.clip(x, 0, np.inf) * default_gamma\n + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_selu_default\")" + } + ], + "category": "Activation" + }, + { + "name": "SequenceAt", + "module": "ai.onnx", + "version": 11, + "description": "Outputs a tensor copy from the tensor at 'position' in 'input_sequence'.\nAccepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n", + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Input sequence." + }, + { + "name": "position", + "type": "I", + "description": "Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "tensor", + "type": "T", + "description": "Output tensor at the specified position in the input sequence." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "SequenceConstruct", + "module": "ai.onnx", + "version": 11, + "description": "Construct a tensor sequence containing 'inputs' tensors.\nAll tensors in 'inputs' must have the same data type.\n", + "inputs": [ + { + "name": "inputs", + "type": "T", + "list": true, + "description": "Tensors." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "output_sequence", + "type": "S", + "description": "Sequence enclosing the input tensors." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ] + }, + { + "name": "SequenceEmpty", + "module": "ai.onnx", + "version": 11, + "description": "Construct an empty tensor sequence, with given data type.\n", + "attributes": [ + { + "name": "dtype", + "type": "int64", + "required": false, + "description": "(Optional) The data type of the tensors in the output sequence. The default type is 'float'." + } + ], + "min_input": 0, + "max_input": 0, + "outputs": [ + { + "name": "output", + "type": "S", + "description": "Empty sequence." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain output types to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ] + }, + { + "name": "SequenceErase", + "module": "ai.onnx", + "version": 11, + "description": "Outputs a tensor sequence that removes the tensor at 'position' from 'input_sequence'.\nAccepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n'position' is optional, by default it erases the last tensor from 'input_sequence'.\n", + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Input sequence." + }, + { + "name": "position", + "type": "I", + "option": "optional", + "description": "Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "output_sequence", + "type": "S", + "description": "Output sequence that has the tensor at the specified position removed." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "SequenceInsert", + "module": "ai.onnx", + "version": 11, + "description": "Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at 'position'.\n'tensor' must have the same data type as 'input_sequence'.\nAccepted range for 'position' is in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'.\n", + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Input sequence." + }, + { + "name": "tensor", + "type": "T", + "description": "Input tensor to be inserted into the input sequence." + }, + { + "name": "position", + "type": "I", + "option": "optional", + "description": "Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape)." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output_sequence", + "type": "S", + "description": "Output sequence that contains the inserted tensor at given position." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "2 - 3", + "type_constraints": [ + { + "description": "Constrain to any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "sequenceinsert", + "code": "test_cases = {\n \"at_back\": [np.array([10, 11, 12]).astype(np.int64)],\n \"at_front\": [np.array([-2, -1, 0]), np.array([0]).astype(np.int64)],\n}\nsequence = [\n np.array([1, 2, 3, 4]).astype(np.int64),\n np.array([5, 6, 7]).astype(np.int64),\n np.array([8, 9]).astype(np.int64),\n]\n\nfor test_name, test_inputs in test_cases.items():\n tensor = test_inputs[0].astype(np.int64)\n\n if len(test_inputs) > 1:\n node = onnx.helper.make_node(\n \"SequenceInsert\",\n inputs=[\"sequence\", \"tensor\", \"position\"],\n outputs=[\"output_sequence\"],\n )\n position = test_inputs[1]\n inserted = sequence_insert_reference_implementation(\n sequence, tensor, position\n )\n expect(\n node,\n inputs=[sequence, tensor, position],\n outputs=[inserted],\n name=\"test_sequence_insert_\" + test_name,\n )\n else:\n node = onnx.helper.make_node(\n \"SequenceInsert\",\n inputs=[\"sequence\", \"tensor\"],\n outputs=[\"output_sequence\"],\n )\n inserted = sequence_insert_reference_implementation(sequence, tensor)\n expect(\n node,\n inputs=[sequence, tensor],\n outputs=[inserted],\n name=\"test_sequence_insert_\" + test_name,\n )" + } + ] + }, + { + "name": "SequenceLength", + "module": "ai.onnx", + "version": 11, + "description": "Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'.\n", + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Input sequence." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "length", + "type": "I", + "description": "Length of input sequence. It must be a scalar(tensor of empty shape)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to any tensor type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain output to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ] + }, + { + "name": "SequenceMap", + "module": "ai.onnx", + "version": 17, + "description": "Applies a sub-graph to each sample in the input sequence(s).\n\nInputs can be either tensors or sequences, with the exception of the first input which must\nbe a sequence. The length of the first input sequence will determine the number of samples in the\noutputs. Any other sequence inputs should have the same number of samples. The number of inputs\nand outputs, should match the one of the subgraph.\n\nFor each i-th element in the output, a sample will be extracted from the input sequence(s) at\nthe i-th position and the sub-graph will be applied to it.\nThe outputs will contain the outputs of the sub-graph for each sample, in the same order as in\nthe input.\n\nThis operator assumes that processing each sample is independent and could executed in parallel\nor in any order. Users cannot expect any specific ordering in which each subgraph is computed.", + "attributes": [ + { + "name": "body", + "type": "graph", + "required": true, + "description": "The graph to be run for each sample in the sequence(s). It should have as many inputs and outputs as inputs and outputs to the SequenceMap function." + } + ], + "inputs": [ + { + "name": "input_sequence", + "type": "S", + "description": "Input sequence." + }, + { + "name": "additional_inputs", + "type": "V", + "list": true, + "description": "Additional inputs to the graph" + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "out_sequence", + "type": "S", + "list": true, + "description": "Output sequence(s)" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - ∞", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to any sequence type.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + }, + { + "description": "Constrain to any tensor or sequence type.", + "type_param_str": "V", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "sequence_map_add_1_sequence_1_tensor", + "code": "body = onnx.helper.make_graph(\n [onnx.helper.make_node(\"Add\", [\"in0\", \"in1\"], [\"out0\"])],\n \"seq_map_body\",\n [\n onnx.helper.make_tensor_value_info(\n \"in0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"in1\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n ],\n [onnx.helper.make_tensor_value_info(\"out0\", onnx.TensorProto.FLOAT, [\"N\"])],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"x0\", \"x1\"], outputs=[\"y0\"], body=body\n)\n\nx0 = [np.random.uniform(0.0, 1.0, 10).astype(np.float32) for k in range(3)]\nx1 = np.random.uniform(0.0, 1.0, 10).astype(np.float32)\ny0 = [x0[i] + x1 for i in range(3)]\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"]),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n]\nexpect(\n node,\n inputs=[x0, x1],\n outputs=[y0],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_add_1_sequence_1_tensor\",\n)" + }, + { + "summary": "sequence_map_add_2_sequences", + "code": "body = onnx.helper.make_graph(\n [onnx.helper.make_node(\"Add\", [\"in0\", \"in1\"], [\"out0\"])],\n \"seq_map_body\",\n [\n onnx.helper.make_tensor_value_info(\n \"in0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"in1\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n ],\n [onnx.helper.make_tensor_value_info(\"out0\", onnx.TensorProto.FLOAT, [\"N\"])],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"x0\", \"x1\"], outputs=[\"y0\"], body=body\n)\n\nN = [np.random.randint(1, 10) for _ in range(3)]\nx0 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32) for k in range(3)]\nx1 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32) for k in range(3)]\ny0 = [x0[k] + x1[k] for k in range(3)]\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n]\nexpect(\n node,\n inputs=[x0, x1],\n outputs=[y0],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_add_2_sequences\",\n)" + }, + { + "summary": "sequence_map_extract_shapes", + "code": "body = onnx.helper.make_graph(\n [onnx.helper.make_node(\"Shape\", [\"x\"], [\"shape\"])],\n \"seq_map_body\",\n [\n onnx.helper.make_tensor_value_info(\n \"x\", onnx.TensorProto.FLOAT, [\"H\", \"W\", \"C\"]\n )\n ],\n [onnx.helper.make_tensor_value_info(\"shape\", onnx.TensorProto.INT64, [3])],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"in_seq\"], outputs=[\"shapes\"], body=body\n)\n\nshapes = [\n np.array([40, 30, 3], dtype=np.int64),\n np.array([20, 10, 3], dtype=np.int64),\n np.array([10, 5, 3], dtype=np.int64),\n]\nx0 = [np.zeros(shape, dtype=np.float32) for shape in shapes]\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(\n onnx.TensorProto.FLOAT, [\"H\", \"W\", \"C\"]\n )\n ),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT64, [3])\n ),\n]\nexpect(\n node,\n inputs=[x0],\n outputs=[shapes],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_extract_shapes\",\n)" + }, + { + "summary": "sequence_map_identity_1_sequence", + "code": "body = onnx.helper.make_graph(\n [onnx.helper.make_node(\"Identity\", [\"in0\"], [\"out0\"])],\n \"seq_map_body\",\n [onnx.helper.make_tensor_value_info(\"in0\", onnx.TensorProto.FLOAT, [\"N\"])],\n [onnx.helper.make_tensor_value_info(\"out0\", onnx.TensorProto.FLOAT, [\"M\"])],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"x\"], outputs=[\"y\"], body=body\n)\n\nx = [np.random.uniform(0.0, 1.0, 10).astype(np.float32) for _ in range(3)]\ny = x\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n]\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_identity_1_sequence\",\n)" + }, + { + "summary": "sequence_map_identity_1_sequence_1_tensor", + "code": "body = onnx.helper.make_graph(\n [\n onnx.helper.make_node(\"Identity\", [\"in0\"], [\"out0\"]),\n onnx.helper.make_node(\"Identity\", [\"in1\"], [\"out1\"]),\n ],\n \"seq_map_body\",\n [\n onnx.helper.make_tensor_value_info(\n \"in0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"in1\", onnx.TensorProto.FLOAT, [\"M\"]\n ),\n ],\n [\n onnx.helper.make_tensor_value_info(\n \"out0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"out1\", onnx.TensorProto.FLOAT, [\"M\"]\n ),\n ],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"x0\", \"x1\"], outputs=[\"y0\", \"y1\"], body=body\n)\n\nx0 = [\n np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)\n for _ in range(3)\n]\nx1 = np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)\ny0 = x0\ny1 = [x1 for _ in range(3)]\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"M\"]),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"M\"])\n ),\n]\nexpect(\n node,\n inputs=[x0, x1],\n outputs=[y0, y1],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_identity_1_sequence_1_tensor\",\n)" + }, + { + "summary": "sequence_map_identity_2_sequences", + "code": "body = onnx.helper.make_graph(\n [\n onnx.helper.make_node(\"Identity\", [\"in0\"], [\"out0\"]),\n onnx.helper.make_node(\"Identity\", [\"in1\"], [\"out1\"]),\n ],\n \"seq_map_body\",\n [\n onnx.helper.make_tensor_value_info(\n \"in0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"in1\", onnx.TensorProto.FLOAT, [\"M\"]\n ),\n ],\n [\n onnx.helper.make_tensor_value_info(\n \"out0\", onnx.TensorProto.FLOAT, [\"N\"]\n ),\n onnx.helper.make_tensor_value_info(\n \"out1\", onnx.TensorProto.FLOAT, [\"M\"]\n ),\n ],\n)\n\nnode = onnx.helper.make_node(\n \"SequenceMap\", inputs=[\"x0\", \"x1\"], outputs=[\"y0\", \"y1\"], body=body\n)\n\nx0 = [\n np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)\n for _ in range(3)\n]\nx1 = [\n np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)\n for _ in range(3)\n]\ny0 = x0\ny1 = x1\ninput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"M\"])\n ),\n]\noutput_type_protos = [\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"N\"])\n ),\n onnx.helper.make_sequence_type_proto(\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [\"M\"])\n ),\n]\nexpect(\n node,\n inputs=[x0, x1],\n outputs=[y0, y1],\n input_type_protos=input_type_protos,\n output_type_protos=output_type_protos,\n name=\"test_sequence_map_identity_2_sequences\",\n)" + } + ] + }, + { + "name": "Shape", + "module": "ai.onnx", + "version": 1, + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "shape", + "type": "T1", + "description": "Shape of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "shape", + "code": "x = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ntest_shape(\"_example\", x) # preserve names of original test cases\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n\ntest_shape(\"\", x) # preserve names of original test cases\n\ntest_shape(\"_start_1\", x, start=1)\n\ntest_shape(\"_end_1\", x, end=1)\n\ntest_shape(\"_start_negative_1\", x, start=-1)\n\ntest_shape(\"_end_negative_1\", x, end=-1)\n\ntest_shape(\"_start_1_end_negative_1\", x, start=1, end=-1)\n\ntest_shape(\"_start_1_end_2\", x, start=1, end=2)\n\ntest_shape(\"_clip_start\", x, start=-10)\n\ntest_shape(\"_clip_end\", x, end=10)" + } + ] + }, + { + "name": "Shape", + "module": "ai.onnx", + "version": 13, + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "shape", + "type": "T1", + "description": "Shape of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "shape", + "code": "x = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ntest_shape(\"_example\", x) # preserve names of original test cases\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n\ntest_shape(\"\", x) # preserve names of original test cases\n\ntest_shape(\"_start_1\", x, start=1)\n\ntest_shape(\"_end_1\", x, end=1)\n\ntest_shape(\"_start_negative_1\", x, start=-1)\n\ntest_shape(\"_end_negative_1\", x, end=-1)\n\ntest_shape(\"_start_1_end_negative_1\", x, start=1, end=-1)\n\ntest_shape(\"_start_1_end_2\", x, start=1, end=2)\n\ntest_shape(\"_clip_start\", x, start=-10)\n\ntest_shape(\"_clip_end\", x, end=10)" + } + ] + }, + { + "name": "Shape", + "module": "ai.onnx", + "version": 15, + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\nOptional attributes start and end can be used to compute a slice of the input tensor's shape.\nIf start axis is omitted, the slice starts from axis 0.\nThe end axis, if specified, is exclusive (and the returned value will not include the size of that axis).\nIf the end axis is omitted, the axes upto the last one will be included.\nNegative axes indicate counting back from the last axis.\nNote that axes will be clamped to the range [0, r-1], where r is the\nrank of the input tensor if they are out-of-range (after adding r in the case of\nnegative axis). Thus, specifying any end value > r is equivalent to specifying an end\nvalue of r, and specifying any start value < -r is equivalent to specifying a start\nvalue of 0.\n\nExamples:\n\n```\nInput tensor with shape: [2, 3, 4]\nNo attributes specified.\nOutput: [2, 3, 4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: -1\nOutput: [4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nend: -1\nOutput: [2, 3]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: 1\nend: 2\nOutput: [3]\n```\n", + "attributes": [ + { + "name": "end", + "type": "int64", + "required": false, + "description": "(Optional) Ending axis for slicing the shape. Negative value means counting dimensions from the back. If omitted, sizes of all axes upto (including) the last one will be included." + }, + { + "name": "start", + "type": "int64", + "required": false, + "description": "(Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "shape", + "type": "T1", + "description": "Shape of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "shape", + "code": "x = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ntest_shape(\"_example\", x) # preserve names of original test cases\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n\ntest_shape(\"\", x) # preserve names of original test cases\n\ntest_shape(\"_start_1\", x, start=1)\n\ntest_shape(\"_end_1\", x, end=1)\n\ntest_shape(\"_start_negative_1\", x, start=-1)\n\ntest_shape(\"_end_negative_1\", x, end=-1)\n\ntest_shape(\"_start_1_end_negative_1\", x, start=1, end=-1)\n\ntest_shape(\"_start_1_end_2\", x, start=1, end=2)\n\ntest_shape(\"_clip_start\", x, start=-10)\n\ntest_shape(\"_clip_end\", x, end=10)" + } + ] + }, + { + "name": "Shape", + "module": "ai.onnx", + "version": 19, + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\nOptional attributes start and end can be used to compute a slice of the input tensor's shape.\nIf start axis is omitted, the slice starts from axis 0.\nThe end axis, if specified, is exclusive (and the returned value will not include the size of that axis).\nIf the end axis is omitted, the axes upto the last one will be included.\nNegative axes indicate counting back from the last axis.\nNote that axes will be clamped to the range [0, r-1], where r is the\nrank of the input tensor if they are out-of-range (after adding r in the case of\nnegative axis). Thus, specifying any end value > r is equivalent to specifying an end\nvalue of r, and specifying any start value < -r is equivalent to specifying a start\nvalue of 0.\n\nExamples:\n\n```\nInput tensor with shape: [2, 3, 4]\nNo attributes specified.\nOutput: [2, 3, 4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: -1\nOutput: [4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nend: -1\nOutput: [2, 3]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: 1\nend: 2\nOutput: [3]\n```\n", + "attributes": [ + { + "name": "end", + "type": "int64", + "required": false, + "description": "(Optional) Ending axis for slicing the shape. Negative value means counting dimensions from the back. If omitted, sizes of all axes upto (including) the last one will be included." + }, + { + "name": "start", + "type": "int64", + "required": false, + "description": "(Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "shape", + "type": "T1", + "description": "Shape of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "shape", + "code": "x = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ntest_shape(\"_example\", x) # preserve names of original test cases\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n\ntest_shape(\"\", x) # preserve names of original test cases\n\ntest_shape(\"_start_1\", x, start=1)\n\ntest_shape(\"_end_1\", x, end=1)\n\ntest_shape(\"_start_negative_1\", x, start=-1)\n\ntest_shape(\"_end_negative_1\", x, end=-1)\n\ntest_shape(\"_start_1_end_negative_1\", x, start=1, end=-1)\n\ntest_shape(\"_start_1_end_2\", x, start=1, end=2)\n\ntest_shape(\"_clip_start\", x, start=-10)\n\ntest_shape(\"_clip_end\", x, end=10)" + } + ] + }, + { + "name": "Shape", + "module": "ai.onnx", + "version": 21, + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\nOptional attributes start and end can be used to compute a slice of the input tensor's shape.\nIf start axis is omitted, the slice starts from axis 0.\nThe end axis, if specified, is exclusive (and the returned value will not include the size of that axis).\nIf the end axis is omitted, the axes upto the last one will be included.\nNegative axes indicate counting back from the last axis.\nNote that axes will be clamped to the range [0, r-1], where r is the\nrank of the input tensor if they are out-of-range (after adding r in the case of\nnegative axis). Thus, specifying any end value > r is equivalent to specifying an end\nvalue of r, and specifying any start value < -r is equivalent to specifying a start\nvalue of 0.\n\nExamples:\n\n```\nInput tensor with shape: [2, 3, 4]\nNo attributes specified.\nOutput: [2, 3, 4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: -1\nOutput: [4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nend: -1\nOutput: [2, 3]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: 1\nend: 2\nOutput: [3]\n```\n", + "attributes": [ + { + "name": "end", + "type": "int64", + "required": false, + "description": "(Optional) Ending axis for slicing the shape. Negative value means counting dimensions from the back. If omitted, sizes of all axes upto (including) the last one will be included." + }, + { + "name": "start", + "type": "int64", + "required": false, + "description": "(Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "shape", + "type": "T1", + "description": "Shape of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "shape", + "code": "x = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ntest_shape(\"_example\", x) # preserve names of original test cases\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\n\ntest_shape(\"\", x) # preserve names of original test cases\n\ntest_shape(\"_start_1\", x, start=1)\n\ntest_shape(\"_end_1\", x, end=1)\n\ntest_shape(\"_start_negative_1\", x, start=-1)\n\ntest_shape(\"_end_negative_1\", x, end=-1)\n\ntest_shape(\"_start_1_end_negative_1\", x, start=1, end=-1)\n\ntest_shape(\"_start_1_end_2\", x, start=1, end=2)\n\ntest_shape(\"_clip_start\", x, start=-10)\n\ntest_shape(\"_clip_end\", x, end=10)" + } + ] + }, + { + "name": "Shrink", + "module": "ai.onnx", + "version": 9, + "description": "Shrink takes one input data (Tensor) and produces one Tensor output,\nhaving same datatype and shape with input. It has two attributes, lambd and\nbias. The formula of this operator is: If x < -lambd, y = x + bias;\nIf x > lambd, y = x - bias; Otherwise, y = 0.\n", + "attributes": [ + { + "name": "bias", + "type": "float32", + "required": false, + "description": "The bias value added to output. Default is 0." + }, + { + "name": "lambd", + "type": "float32", + "required": false, + "default": 0.5, + "description": "The lambd value for the Shrink formulation. Default is 0.5." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input data as Tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to only numeric types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "hard_shrink", + "code": "node = onnx.helper.make_node(\n \"Shrink\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n lambd=1.5,\n)\nX = np.arange(-2.0, 2.1, dtype=np.float32)\nY = np.array([-2, 0, 0, 0, 2], dtype=np.float32)\nexpect(node, inputs=[X], outputs=[Y], name=\"test_shrink_hard\")" + }, + { + "summary": "soft_shrink", + "code": "node = onnx.helper.make_node(\n \"Shrink\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n lambd=1.5,\n bias=1.5,\n)\nX = np.arange(-2.0, 2.1, dtype=np.float32)\nY = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)\nexpect(node, inputs=[X], outputs=[Y], name=\"test_shrink_soft\")" + } + ] + }, + { + "name": "Sigmoid", + "module": "ai.onnx", + "version": 1, + "description": "Sigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sigmoid", + "code": "node = onnx.helper.make_node(\n \"Sigmoid\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = 1.0 / (\n 1.0 + np.exp(np.negative(x))\n) # expected output [0.26894143, 0.5, 0.7310586]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x)))\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid\")" + } + ], + "category": "Activation" + }, + { + "name": "Sigmoid", + "module": "ai.onnx", + "version": 6, + "description": "Sigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sigmoid", + "code": "node = onnx.helper.make_node(\n \"Sigmoid\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = 1.0 / (\n 1.0 + np.exp(np.negative(x))\n) # expected output [0.26894143, 0.5, 0.7310586]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x)))\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid\")" + } + ], + "category": "Activation" + }, + { + "name": "Sigmoid", + "module": "ai.onnx", + "version": 13, + "description": "Sigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sigmoid", + "code": "node = onnx.helper.make_node(\n \"Sigmoid\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = 1.0 / (\n 1.0 + np.exp(np.negative(x))\n) # expected output [0.26894143, 0.5, 0.7310586]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x)))\nexpect(node, inputs=[x], outputs=[y], name=\"test_sigmoid\")" + } + ], + "category": "Activation" + }, + { + "name": "Sign", + "module": "ai.onnx", + "version": 9, + "description": "Calculate the sign of the given input tensor element-wise.\nIf input > 0, output 1. if input < 0, output -1. if input == 0, output 0.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The sign of the input tensor computed element-wise. It has the same shape and type of the input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sign", + "code": "node = onnx.helper.make_node(\n \"Sign\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(range(-5, 6)).astype(np.float32)\ny = np.sign(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sign\")" + } + ] + }, + { + "name": "Sign", + "module": "ai.onnx", + "version": 13, + "description": "Calculate the sign of the given input tensor element-wise.\nIf input > 0, output 1. if input < 0, output -1. if input == 0, output 0.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The sign of the input tensor computed element-wise. It has the same shape and type of the input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sign", + "code": "node = onnx.helper.make_node(\n \"Sign\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(range(-5, 6)).astype(np.float32)\ny = np.sign(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sign\")" + } + ] + }, + { + "name": "Sin", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the sine of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The sine of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sin", + "code": "node = onnx.helper.make_node(\n \"Sin\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.sin(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sin_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.sin(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sin\")" + } + ] + }, + { + "name": "Sinh", + "module": "ai.onnx", + "version": 9, + "description": "Calculates the hyperbolic sine of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic sine values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sinh", + "code": "node = onnx.helper.make_node(\n \"Sinh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.sinh(x) # expected output [-1.17520118, 0., 1.17520118]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sinh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.sinh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sinh\")" + } + ] + }, + { + "name": "Size", + "module": "ai.onnx", + "version": 1, + "description": "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "size", + "type": "T1", + "description": "Total number of elements of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output to int64 tensor, which should be a scalar though.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "size", + "code": "node = onnx.helper.make_node(\n \"Size\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ny = np.array(6).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.size).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size\")" + } + ] + }, + { + "name": "Size", + "module": "ai.onnx", + "version": 13, + "description": "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "size", + "type": "T1", + "description": "Total number of elements of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain output to int64 tensor, which should be a scalar though.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "size", + "code": "node = onnx.helper.make_node(\n \"Size\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ny = np.array(6).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.size).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size\")" + } + ] + }, + { + "name": "Size", + "module": "ai.onnx", + "version": 19, + "description": "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "size", + "type": "T1", + "description": "Total number of elements of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)" + ] + }, + { + "description": "Constrain output to int64 tensor, which should be a scalar though.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "size", + "code": "node = onnx.helper.make_node(\n \"Size\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ny = np.array(6).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.size).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size\")" + } + ] + }, + { + "name": "Size", + "module": "ai.onnx", + "version": 21, + "description": "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "size", + "type": "T1", + "description": "Total number of elements of the input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + }, + { + "description": "Constrain output to int64 tensor, which should be a scalar though.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "size", + "code": "node = onnx.helper.make_node(\n \"Size\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array(\n [\n [1, 2, 3],\n [4, 5, 6],\n ]\n).astype(np.float32)\ny = np.array(6).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.size).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_size\")" + } + ] + }, + { + "name": "Slice", + "module": "ai.onnx", + "version": 1, + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `axes`, `starts` and `ends` attributes to specify the start and end\ndimension for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represent number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX`.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n result = [\n [5, 6, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "Axes that `starts` and `ends` apply to. It's optional. If not present, will be treated as [0, 1, ..., len(`starts`) - 1]." + }, + { + "name": "ends", + "type": "int64[]", + "required": true, + "description": "Ending indices (exclusive) of corresponding axis in axes`" + }, + { + "name": "starts", + "type": "int64[]", + "required": true, + "description": "Starting indices of corresponding axis in `axes`" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of data to extract slices from." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Sliced data tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "slice", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(\n node, inputs=[x, starts, ends, axes, steps], outputs=[y], name=\"test_slice\"\n)" + }, + { + "summary": "slice_default_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node, inputs=[x, starts, ends], outputs=[y], name=\"test_slice_default_axes\"\n)" + }, + { + "summary": "slice_default_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_default_steps\",\n)" + }, + { + "summary": "slice_end_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_end_out_of_bounds\",\n)" + }, + { + "summary": "slice_neg", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg\",\n)" + }, + { + "summary": "slice_neg_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2]).astype(np.int64)\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg_steps\",\n)" + }, + { + "summary": "slice_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_negative_axes\",\n)" + }, + { + "summary": "slice_start_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_start_out_of_bounds\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Slice", + "module": "ai.onnx", + "version": 10, + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end\ndimension and step for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represent number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX`.\nIf a negative value is passed for step, it represents slicing backward.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n steps = [1, 2]\n result = [\n [5, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of data to extract slices from." + }, + { + "name": "starts", + "type": "Tind", + "description": "1-D tensor of starting indices of corresponding axis in `axes`" + }, + { + "name": "ends", + "type": "Tind", + "description": "1-D tensor of ending indices (exclusive) of corresponding axis in `axes`" + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `starts` and `ends` apply to." + }, + { + "name": "steps", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of slice step of corresponding axis in `axes`. Default to 1. " + } + ], + "min_input": 3, + "max_input": 5, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Sliced data tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "3 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "slice", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(\n node, inputs=[x, starts, ends, axes, steps], outputs=[y], name=\"test_slice\"\n)" + }, + { + "summary": "slice_default_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node, inputs=[x, starts, ends], outputs=[y], name=\"test_slice_default_axes\"\n)" + }, + { + "summary": "slice_default_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_default_steps\",\n)" + }, + { + "summary": "slice_end_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_end_out_of_bounds\",\n)" + }, + { + "summary": "slice_neg", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg\",\n)" + }, + { + "summary": "slice_neg_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2]).astype(np.int64)\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg_steps\",\n)" + }, + { + "summary": "slice_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_negative_axes\",\n)" + }, + { + "summary": "slice_start_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_start_out_of_bounds\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Slice", + "module": "ai.onnx", + "version": 11, + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end\ndimension and step for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represents number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX`\nwhen slicing forward and 'INT_MIN' when slicing backward.\nIf a negative value is passed for step, it represents slicing backward.\nHowever step value cannot be 0.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n steps = [1, 2]\n result = [\n [5, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of data to extract slices from." + }, + { + "name": "starts", + "type": "Tind", + "description": "1-D tensor of starting indices of corresponding axis in `axes`" + }, + { + "name": "ends", + "type": "Tind", + "description": "1-D tensor of ending indices (exclusive) of corresponding axis in `axes`" + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `starts` and `ends` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + }, + { + "name": "steps", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of slice step of corresponding axis in `axes`. Negative value means slicing backward. 'steps' cannot be 0. Defaults to 1." + } + ], + "min_input": 3, + "max_input": 5, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Sliced data tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "3 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "slice", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(\n node, inputs=[x, starts, ends, axes, steps], outputs=[y], name=\"test_slice\"\n)" + }, + { + "summary": "slice_default_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node, inputs=[x, starts, ends], outputs=[y], name=\"test_slice_default_axes\"\n)" + }, + { + "summary": "slice_default_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_default_steps\",\n)" + }, + { + "summary": "slice_end_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_end_out_of_bounds\",\n)" + }, + { + "summary": "slice_neg", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg\",\n)" + }, + { + "summary": "slice_neg_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2]).astype(np.int64)\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg_steps\",\n)" + }, + { + "summary": "slice_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_negative_axes\",\n)" + }, + { + "summary": "slice_start_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_start_out_of_bounds\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Slice", + "module": "ai.onnx", + "version": 13, + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding\n\nSlice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor\nof its input `data` tensor.\n\nAn effective `starts[i]`, `ends[i]`, and `steps[i]` must be computed for each `i`\nin `[0, ... r-1]` where `r = rank(input)` as follows:\n\nIf `axes` are omitted, they are set to `[0, ..., r-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\n\nThe effective values are initialized as `start[i] = 0`, `ends[i] = dims[i]` where\n`dims` are the dimensions of `input` and `steps[i] = 1`.\n\nAll negative elements of `axes` are made non-negative by adding `r` to them, where\n`r =rank(input)`.\n\nAll negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them,\nwhere `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted\n`starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping\nand `[0, dims[axes[i]]-1]` for negative stepping.\n\nThe clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must\naccommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping\n`ends[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it\nis clamped to `[-1, dims[axes[i]]-1]`.\n\nFinally, `steps[axes[i]] = steps[i]`.\n\nFor slicing to the end of a dimension with unknown size, it is recommended to pass\nin `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.\n\nExample 1:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\naxes = [0, 1]\nstarts = [1, 0]\nends = [2, 3]\nsteps = [1, 2]\nresult = [\n [5, 7],\n]\n```\n\nExample 2:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\nstarts = [0, 1]\nends = [-1, 1000]\nresult = [\n [2, 3, 4],\n]\n```\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensor of data to extract slices from." + }, + { + "name": "starts", + "type": "Tind", + "description": "1-D tensor of starting indices of corresponding axis in `axes`" + }, + { + "name": "ends", + "type": "Tind", + "description": "1-D tensor of ending indices (exclusive) of corresponding axis in `axes`" + }, + { + "name": "axes", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of axes that `starts` and `ends` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated." + }, + { + "name": "steps", + "type": "Tind", + "option": "optional", + "description": "1-D tensor of slice step of corresponding axis in `axes`. Negative value means slicing backward. 'steps' cannot be 0. Defaults to 1s." + } + ], + "min_input": 3, + "max_input": 5, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Sliced data tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "3 - 5", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain indices to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "slice", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(\n node, inputs=[x, starts, ends, axes, steps], outputs=[y], name=\"test_slice\"\n)" + }, + { + "summary": "slice_default_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node, inputs=[x, starts, ends], outputs=[y], name=\"test_slice_default_axes\"\n)" + }, + { + "summary": "slice_default_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_default_steps\",\n)" + }, + { + "summary": "slice_end_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_end_out_of_bounds\",\n)" + }, + { + "summary": "slice_neg", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg\",\n)" + }, + { + "summary": "slice_neg_steps", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2]).astype(np.int64)\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_neg_steps\",\n)" + }, + { + "summary": "slice_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes],\n outputs=[y],\n name=\"test_slice_negative_axes\",\n)" + }, + { + "summary": "slice_start_out_of_bounds", + "code": "node = onnx.helper.make_node(\n \"Slice\",\n inputs=[\"x\", \"starts\", \"ends\", \"axes\", \"steps\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(\n node,\n inputs=[x, starts, ends, axes, steps],\n outputs=[y],\n name=\"test_slice_start_out_of_bounds\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Softmax", + "module": "ai.onnx", + "version": 1, + "description": "The operator computes the softmax (normalized exponential) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the softmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "softmax", + "code": "node = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[0.09003058, 0.24472848, 0.66524094]]\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_example\")" + }, + { + "summary": "softmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[0.032058604 0.08714432 0.23688284 0.6439143 ]\n# [0.032058604 0.08714432 0.23688284 0.6439143 ]]\ny = softmax(x)\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = softmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = softmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = softmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "Softmax", + "module": "ai.onnx", + "version": 11, + "description": "The operator computes the softmax (normalized exponential) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the softmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as input tensor (the original size without coercion)." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "softmax", + "code": "node = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[0.09003058, 0.24472848, 0.66524094]]\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_example\")" + }, + { + "summary": "softmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[0.032058604 0.08714432 0.23688284 0.6439143 ]\n# [0.032058604 0.08714432 0.23688284 0.6439143 ]]\ny = softmax(x)\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = softmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = softmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = softmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "Softmax", + "module": "ai.onnx", + "version": 13, + "description": "The operator computes the normalized exponential values for the given input:\n\n Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1) \n\nThe \"axis\" attribute indicates the dimension along which Softmax\nwill be performed. The output tensor has the same shape\nand contains the Softmax values of the corresponding input.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "\nDescribes the dimension Softmax will be performed on.\nNegative value means counting dimensions\nfrom the back. Accepted range is [-r, r-1] where r = rank(input).\n" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The input tensor of rank >= axis." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The output values with the same shape as the input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "softmax", + "code": "node = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[0.09003058, 0.24472848, 0.66524094]]\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_example\")" + }, + { + "summary": "softmax_axis", + "code": "x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output\n# [[0.032058604 0.08714432 0.23688284 0.6439143 ]\n# [0.032058604 0.08714432 0.23688284 0.6439143 ]]\ny = softmax(x)\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_large_number\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=0,\n)\ny = softmax(x, axis=0)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_0\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=1,\n)\ny = softmax(x, axis=1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_1\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=2,\n)\ny = softmax(x, axis=2)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_axis_2\")\n\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n axis=-1,\n)\ny = softmax(x, axis=-1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_negative_axis\")\n\n# default axis is -1\nnode = onnx.helper.make_node(\n \"Softmax\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softmax_default_axis\")" + } + ], + "category": "Activation" + }, + { + "name": "SoftmaxCrossEntropyLoss", + "module": "ai.onnx", + "version": 12, + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\nshape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can calculated as follows:\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n\nloss is zero for the case when label-value equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\nwhere tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]].\n", + "attributes": [ + { + "name": "ignore_index", + "type": "int64", + "required": false, + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean(default). 'none': no reduction will be applied, 'sum': the output will be summed. 'mean': the sum of the output will be divided by the number of elements in the output." + } + ], + "inputs": [ + { + "name": "scores", + "type": "T", + "description": "The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of dimensions." + }, + { + "name": "labels", + "type": "Tind", + "description": "The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, ..., Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index." + }, + { + "name": "weights", + "type": "T", + "option": "optional", + "description": "A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Weighted loss float Tensor. If reduction is 'none', this has the shape of [batch_size], or [batch_size, D1, D2, ..., Dk] in case of K-dimensional loss. Otherwise, it is a scalar." + }, + { + "name": "log_prob", + "type": "T", + "option": "optional", + "description": "Log probability tensor. If the output of softmax is prob, its value is log(prob)." + } + ], + "min_output": 1, + "max_output": 2, + "inputs_range": "2 - 3", + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain target to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1_mean_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii_log_prob", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1_mean_weight_negative_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\nlabels[0][0][0][0] = -5\n\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3_none_no_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii_log_prob", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\nlabels[0][0][0][0] = -5\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N)).astype(np.int64)\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3_sum_weight_high_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii_log_prob", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N)).astype(np.int64)\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3_sum_weight_high_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(x, labels, weight=weight, reduction=reduction)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3d4d5_mean_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3d4d5_mean_weight_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nsce = softmaxcrossentropy(x, labels, reduction=reduction)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3d4d5_none_no_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3d4d5_none_no_weight_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_mean\")" + }, + { + "summary": "softmaxcrossentropy_mean_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, y)\n\n# Check results\nexpect(node, inputs=[x, y], outputs=[sce], name=\"test_sce_mean_3d\")" + }, + { + "summary": "softmaxcrossentropy_mean_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, y, get_log_prob=True)\n\n# Check results\nexpect(\n node,\n inputs=[x, y],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, get_log_prob=True)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node, inputs=[x, labels], outputs=[sce], name=\"test_sce_mean_no_weight_ii\"\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_mean_no_weight_ii_3d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_4d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_mean_no_weight_ii_4d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_4d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_4d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii_3d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_4d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, weight=weights, ignore_index=ignore_index\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii_4d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_4d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n reduction=reduction,\n weight=weights,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_4d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_none", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=\"none\")\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_none\")" + }, + { + "summary": "softmaxcrossentropy_none_log_prob", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=\"none\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_none_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_none_weights", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, reduction=\"none\")\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_none_weights\",\n)" + }, + { + "summary": "softmaxcrossentropy_none_weights_log_prob", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, reduction=\"none\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_none_weights_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_sum", + "code": "# Define operator attributes.\nreduction = \"sum\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=\"sum\")\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_sum\")" + }, + { + "summary": "softmaxcrossentropy_sum_log_prob", + "code": "# Define operator attributes.\nreduction = \"sum\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=\"sum\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_sum_log_prob\",\n)" + } + ] + }, + { + "name": "SoftmaxCrossEntropyLoss", + "module": "ai.onnx", + "version": 13, + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\n* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can calculated as follows:\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\n```\nor\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n```\n\nloss is zero for the case when label-value equals ignore_index.\n```\nl[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n```\n\nwhere:\n```\np = Softmax(scores)\ny = Log(p)\nc = labels[i][d1][d2]...[dk]\n```\n\nFinally, L is optionally reduced:\n\n* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\n* If reduction = 'sum', the output is scalar: Sum(L).\n* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`,\n where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.\n", + "attributes": [ + { + "name": "ignore_index", + "type": "int64", + "required": false, + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value." + }, + { + "name": "reduction", + "type": "string", + "required": false, + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean(default). 'none': no reduction will be applied, 'sum': the output will be summed. 'mean': the sum of the output will be divided by the number of elements in the output." + } + ], + "inputs": [ + { + "name": "scores", + "type": "T", + "description": "The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of dimensions." + }, + { + "name": "labels", + "type": "Tind", + "description": "The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, ..., Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index." + }, + { + "name": "weights", + "type": "T", + "option": "optional", + "description": "A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones." + } + ], + "min_input": 2, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Weighted loss float Tensor. If reduction is 'none', this has the shape of [batch_size], or [batch_size, D1, D2, ..., Dk] in case of K-dimensional loss. Otherwise, it is a scalar." + }, + { + "name": "log_prob", + "type": "T", + "option": "optional", + "description": "Log probability tensor. If the output of softmax is prob, its value is log(prob)." + } + ], + "min_output": 1, + "max_output": 2, + "inputs_range": "2 - 3", + "outputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + }, + { + "description": "Constrain target to integer types", + "type_param_str": "Tind", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1_mean_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1_mean_weight_negative_ii_log_prob", + "code": "reduction = \"mean\"\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1_mean_weight_negative_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\nlabels[0][0][0][0] = -5\n\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3_none_no_weight_negative_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ii_log_prob", + "code": "reduction = \"none\"\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(\n np.int64\n)\nlabels[0][0][0][0] = -5\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N)).astype(np.int64)\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3_sum_weight_high_ii\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ii_log_prob", + "code": "reduction = \"sum\"\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N)).astype(np.int64)\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3_sum_weight_high_ii_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(x, labels, weight=weight, reduction=reduction)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3d4d5_mean_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob", + "code": "reduction = \"mean\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weight, reduction=reduction, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels, weight],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3d4d5_mean_weight_log_prob\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nsce = softmaxcrossentropy(x, labels, reduction=reduction)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_NCd1d2d3d4d5_none_no_weight\",\n)" + }, + { + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob", + "code": "reduction = \"none\"\n\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(\n 0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)\n).astype(np.int64)\n\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, get_log_prob=True\n)\n\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_NCd1d2d3d4d5_none_no_weight_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_mean\")" + }, + { + "summary": "softmaxcrossentropy_mean_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, y)\n\n# Check results\nexpect(node, inputs=[x, y], outputs=[sce], name=\"test_sce_mean_3d\")" + }, + { + "summary": "softmaxcrossentropy_mean_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, y, get_log_prob=True)\n\n# Check results\nexpect(\n node,\n inputs=[x, y],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, get_log_prob=True)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node, inputs=[x, labels], outputs=[sce], name=\"test_sce_mean_no_weight_ii\"\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_mean_no_weight_ii_3d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_4d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[sce],\n name=\"test_sce_mean_no_weight_ii_4d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_4d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_4d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_no_weights_ii_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_no_weight_ii_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_3d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii_3d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_3d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_3d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_4d", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(\n x, labels, reduction=reduction, weight=weights, ignore_index=ignore_index\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_mean_weight_ii_4d\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_4d_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x,\n labels,\n reduction=reduction,\n weight=weights,\n ignore_index=ignore_index,\n get_log_prob=True,\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_4d_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_ii_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n ignore_index=ignore_index,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_ii_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_mean_weights_log_prob", + "code": "# Define operator attributes.\nreduction = \"mean\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_mean_weight_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_none", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=\"none\")\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_none\")" + }, + { + "summary": "softmaxcrossentropy_none_log_prob", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=\"none\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_none_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_none_weights", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, reduction=\"none\")\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[sce],\n name=\"test_sce_none_weights\",\n)" + }, + { + "summary": "softmaxcrossentropy_none_weights_log_prob", + "code": "# Define operator attributes.\nreduction = \"none\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\", \"w\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, weight=weights, reduction=\"none\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels, weights],\n outputs=[loss, log_prob],\n name=\"test_sce_none_weights_log_prob\",\n)" + }, + { + "summary": "softmaxcrossentropy_sum", + "code": "# Define operator attributes.\nreduction = \"sum\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=\"sum\")\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name=\"test_sce_sum\")" + }, + { + "summary": "softmaxcrossentropy_sum_log_prob", + "code": "# Define operator attributes.\nreduction = \"sum\"\n\n# Create operator.\nnode = onnx.helper.make_node(\n \"SoftmaxCrossEntropyLoss\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\", \"log_prob\"],\n reduction=reduction,\n)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(\n x, labels, reduction=\"sum\", get_log_prob=True\n)\n\n# Check results\nexpect(\n node,\n inputs=[x, labels],\n outputs=[loss, log_prob],\n name=\"test_sce_sum_log_prob\",\n)" + } + ] + }, + { + "name": "Softplus", + "module": "ai.onnx", + "version": 1, + "description": "Softplus takes one input data (Tensor) and produces one output data\n(Tensor) where the softplus function, y = ln(exp(x) + 1), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "1D input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "1D input tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "softplus", + "code": "node = onnx.helper.make_node(\n \"Softplus\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.log(\n np.exp(x) + 1\n) # expected output [0.31326166, 0.69314718, 1.31326163]\nexpect(node, inputs=[x], outputs=[y], name=\"test_softplus_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.log(np.exp(x) + 1)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softplus\")" + } + ], + "category": "Activation" + }, + { + "name": "Softsign", + "module": "ai.onnx", + "version": 1, + "description": "Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The softsign (x/(1+|x|)) values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "softsign", + "code": "node = onnx.helper.make_node(\n \"Softsign\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-0.5, 0, 0.5]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_softsign_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x / (1 + np.abs(x))\nexpect(node, inputs=[x], outputs=[y], name=\"test_softsign\")" + } + ], + "category": "Activation" + }, + { + "name": "SpaceToDepth", + "module": "ai.onnx", + "version": 1, + "description": "SpaceToDepth rearranges blocks of spatial data into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the height and width dimensions\nare moved to the depth dimension.\n", + "attributes": [ + { + "name": "blocksize", + "type": "int64", + "required": true, + "description": "Blocks of [blocksize, blocksize] are moved." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "example", + "code": "node = onnx.helper.make_node(\n \"SpaceToDepth\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n blocksize=2,\n)\n\n# (1, 1, 4, 6) input tensor\nx = np.array(\n [\n [\n [\n [0, 6, 1, 7, 2, 8],\n [12, 18, 13, 19, 14, 20],\n [3, 9, 4, 10, 5, 11],\n [15, 21, 16, 22, 17, 23],\n ]\n ]\n ]\n).astype(np.float32)\n\n# (1, 4, 2, 3) output tensor\ny = np.array(\n [\n [\n [[0, 1, 2], [3, 4, 5]],\n [[6, 7, 8], [9, 10, 11]],\n [[12, 13, 14], [15, 16, 17]],\n [[18, 19, 20], [21, 22, 23]],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_spacetodepth_example\")" + }, + { + "summary": "spacetodepth", + "code": "b, c, h, w = shape = (2, 2, 6, 6)\nblocksize = 2\nnode = onnx.helper.make_node(\n \"SpaceToDepth\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n blocksize=blocksize,\n)\nx = np.random.random_sample(shape).astype(np.float32)\ntmp = np.reshape(\n x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize]\n)\ntmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])\ny = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])\nexpect(node, inputs=[x], outputs=[y], name=\"test_spacetodepth\")" + } + ] + }, + { + "name": "SpaceToDepth", + "module": "ai.onnx", + "version": 13, + "description": "SpaceToDepth rearranges blocks of spatial data into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the height and width dimensions\nare moved to the depth dimension.\n", + "attributes": [ + { + "name": "blocksize", + "type": "int64", + "required": true, + "description": "Blocks of [blocksize, blocksize] are moved." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize]." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "example", + "code": "node = onnx.helper.make_node(\n \"SpaceToDepth\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n blocksize=2,\n)\n\n# (1, 1, 4, 6) input tensor\nx = np.array(\n [\n [\n [\n [0, 6, 1, 7, 2, 8],\n [12, 18, 13, 19, 14, 20],\n [3, 9, 4, 10, 5, 11],\n [15, 21, 16, 22, 17, 23],\n ]\n ]\n ]\n).astype(np.float32)\n\n# (1, 4, 2, 3) output tensor\ny = np.array(\n [\n [\n [[0, 1, 2], [3, 4, 5]],\n [[6, 7, 8], [9, 10, 11]],\n [[12, 13, 14], [15, 16, 17]],\n [[18, 19, 20], [21, 22, 23]],\n ]\n ]\n).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name=\"test_spacetodepth_example\")" + }, + { + "summary": "spacetodepth", + "code": "b, c, h, w = shape = (2, 2, 6, 6)\nblocksize = 2\nnode = onnx.helper.make_node(\n \"SpaceToDepth\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n blocksize=blocksize,\n)\nx = np.random.random_sample(shape).astype(np.float32)\ntmp = np.reshape(\n x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize]\n)\ntmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])\ny = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])\nexpect(node, inputs=[x], outputs=[y], name=\"test_spacetodepth\")" + } + ] + }, + { + "name": "Split", + "module": "ai.onnx", + "version": 1, + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. The lengths of the split can be specified using argument 'axis' or\noptional second input blob to the operator. Otherwise, the tensor is split\nto equal sized parts.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on" + }, + { + "name": "split", + "type": "int64[]", + "required": false, + "description": "length of each output" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + }, + { + "name": "split", + "type": "T", + "option": "optional", + "description": "Optional list of output lengths (see also arg 'split')" + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "outputs...", + "type": "T", + "list": true, + "description": "One or more outputs forming list of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - 2", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "1d_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "1d_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset18\",\n)" + }, + { + "summary": "1d_uneven_split_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\", \"output_4\"],\n num_outputs=4,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n np.array([7.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_1d_uneven_split_opset18\",\n)" + }, + { + "summary": "2d_opset13", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\"], axis=1\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "2d_opset18", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n num_outputs=2,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset18\",\n)" + }, + { + "summary": "2d_uneven_split_opset18", + "code": "node_input = np.array(\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=1,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),\n np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_2d_uneven_split_opset18\",\n)" + }, + { + "summary": "default_values_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\", \"output_3\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "default_values_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset18\",\n)" + }, + { + "summary": "zero_size_splits_opset13", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "zero_size_splits_opset18", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset18\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Split", + "module": "ai.onnx", + "version": 2, + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\nOtherwise, the tensor is split to equal sized parts.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on. " + }, + { + "name": "split", + "type": "int64[]", + "required": false, + "description": "length of each output" + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "T", + "list": true, + "description": "One or more outputs forming list of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "1d_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "1d_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset18\",\n)" + }, + { + "summary": "1d_uneven_split_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\", \"output_4\"],\n num_outputs=4,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n np.array([7.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_1d_uneven_split_opset18\",\n)" + }, + { + "summary": "2d_opset13", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\"], axis=1\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "2d_opset18", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n num_outputs=2,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset18\",\n)" + }, + { + "summary": "2d_uneven_split_opset18", + "code": "node_input = np.array(\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=1,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),\n np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_2d_uneven_split_opset18\",\n)" + }, + { + "summary": "default_values_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\", \"output_3\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "default_values_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset18\",\n)" + }, + { + "summary": "zero_size_splits_opset13", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "zero_size_splits_opset18", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset18\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Split", + "module": "ai.onnx", + "version": 11, + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\nOtherwise, the tensor is split to equal sized parts.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input)." + }, + { + "name": "split", + "type": "int64[]", + "required": false, + "description": "length of each output. Values should be >= 0." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "outputs", + "type": "T", + "list": true, + "description": "One or more outputs forming list of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 2147483647, + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "1d_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "1d_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset18\",\n)" + }, + { + "summary": "1d_uneven_split_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\", \"output_4\"],\n num_outputs=4,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n np.array([7.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_1d_uneven_split_opset18\",\n)" + }, + { + "summary": "2d_opset13", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\"], axis=1\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "2d_opset18", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n num_outputs=2,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset18\",\n)" + }, + { + "summary": "2d_uneven_split_opset18", + "code": "node_input = np.array(\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=1,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),\n np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_2d_uneven_split_opset18\",\n)" + }, + { + "summary": "default_values_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\", \"output_3\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "default_values_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset18\",\n)" + }, + { + "summary": "zero_size_splits_opset13", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "zero_size_splits_opset18", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset18\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Split", + "module": "ai.onnx", + "version": 13, + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. Lengths of the parts can be specified using input 'split'.\nOtherwise, the tensor is split to equal sized parts.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input)." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + }, + { + "name": "split", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at 'axis' specified." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "outputs", + "type": "T", + "list": true, + "description": "One or more outputs forming list of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - 2", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "1d_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "1d_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset18\",\n)" + }, + { + "summary": "1d_uneven_split_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\", \"output_4\"],\n num_outputs=4,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n np.array([7.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_1d_uneven_split_opset18\",\n)" + }, + { + "summary": "2d_opset13", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\"], axis=1\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "2d_opset18", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n num_outputs=2,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset18\",\n)" + }, + { + "summary": "2d_uneven_split_opset18", + "code": "node_input = np.array(\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=1,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),\n np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_2d_uneven_split_opset18\",\n)" + }, + { + "summary": "default_values_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\", \"output_3\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "default_values_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset18\",\n)" + }, + { + "summary": "zero_size_splits_opset13", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "zero_size_splits_opset18", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset18\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "Split", + "module": "ai.onnx", + "version": 18, + "description": "Split a tensor into a list of tensors, along the specified 'axis'.\nEither input 'split' or the attribute 'num_outputs' should be specified, but not both.\nIf the attribute 'num_outputs' is specified, then the tensor is split into equal sized parts.\nIf the tensor is not evenly splittable into `num_outputs`, the last chunk will be smaller.\nIf the input 'split' is specified, it indicates the sizes of each output in the split.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input)." + }, + { + "name": "num_outputs", + "type": "int64", + "required": false, + "description": "Number of outputs to split parts of the tensor into. If the tensor is not evenly splittable the last chunk will be smaller." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + }, + { + "name": "split", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at 'axis' specified." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "outputs", + "type": "T", + "list": true, + "description": "One or more outputs forming list of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 2147483647, + "inputs_range": "1 - 2", + "outputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "1d_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "1d_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=0,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_1d_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=0,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_1d_opset18\",\n)" + }, + { + "summary": "1d_uneven_split_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\", \"output_4\"],\n num_outputs=4,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n np.array([7.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_1d_uneven_split_opset18\",\n)" + }, + { + "summary": "2d_opset13", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\"], axis=1\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "2d_opset18", + "code": "node_input = np.array(\n [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n num_outputs=2,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_2d\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\"],\n axis=1,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),\n np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(\n np.float32\n ),\n]\n\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_2d_opset18\",\n)" + }, + { + "summary": "2d_uneven_split_opset18", + "code": "node_input = np.array(\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n ]\n).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n axis=1,\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),\n np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),\n np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),\n]\n\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_2d_uneven_split_opset18\",\n)" + }, + { + "summary": "default_values_opset13", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\"], outputs=[\"output_1\", \"output_2\", \"output_3\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "default_values_opset18", + "code": "node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n num_outputs=3,\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0]).astype(np.float32),\n np.array([5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input],\n outputs=expected_outputs,\n name=\"test_split_equal_parts_default_axis_opset18\",\n)\n\nsplit = np.array([2, 4]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\", inputs=[\"input\", \"split\"], outputs=[\"output_1\", \"output_2\"]\n)\n\nexpected_outputs = [\n np.array([1.0, 2.0]).astype(np.float32),\n np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_variable_parts_default_axis_opset18\",\n)" + }, + { + "summary": "zero_size_splits_opset13", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset13\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 13)],\n)" + }, + { + "summary": "zero_size_splits_opset18", + "code": "# 1-dimensional tensor with dimension_size=0\nnode_input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nsplit = np.array([0, 0, 0]).astype(np.int64)\nnode = onnx.helper.make_node(\n \"Split\",\n inputs=[\"input\", \"split\"],\n outputs=[\"output_1\", \"output_2\", \"output_3\"],\n)\n\nexpected_outputs = [\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n np.array([]).astype(np.float32),\n]\nexpect(\n node,\n inputs=[node_input, split],\n outputs=expected_outputs,\n name=\"test_split_zero_size_splits_opset18\",\n)" + } + ], + "category": "Tensor" + }, + { + "name": "SplitToSequence", + "module": "ai.onnx", + "version": 11, + "description": "Split a tensor into a sequence of tensors, along the specified 'axis'.\nLengths of the parts can be specified using the optional argument 'split'.\nIf the argument `split' is not specified, a default scalar value of 1\nis used as the value of `split'.\n'split' must contain only positive numbers.\n'split' is either a scalar (tensor of empty shape), or a 1-D tensor.\nIf 'split' is a scalar, then 'input' will be split into chunks all of size 'split'\nif possible. The last chunk alone may be smaller than 'split' if the 'input' size\nalong the given axis 'axis' is not divisible by 'split'.\nIf 'split' is a 1-dimensional tensor, the input tensor is split into 'size(split)' chunks,\nwith lengths of the parts on 'axis' specified in 'split'. In this scenario, the sum of entries\nin 'split' must be equal to the dimension size of input tensor on 'axis'.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1]." + }, + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the split dimension or not. Default 1, which means we keep split dimension. If input 'split' is specified, this attribute is ignored." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "The tensor to split" + }, + { + "name": "split", + "type": "I", + "option": "optional", + "description": "Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0. " + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "output_sequence", + "type": "S", + "description": "One or more outputs forming a sequence of tensors after splitting" + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain split size to integral tensor.", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "Constrain output types to all tensor types.", + "type_param_str": "S", + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ] + } + ], + "examples": [ + { + "summary": "nokeepdims", + "code": "data = np.arange(18).reshape((3, 6)).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"SplitToSequence\",\n [\"data\"],\n [\"seq\"],\n axis=1,\n keepdims=0,\n)\n\nexpected_outputs = [[data[:, i] for i in range(data.shape[1])]]\n\nexpect(\n node,\n inputs=[data],\n outputs=expected_outputs,\n name=\"test_split_to_sequence_nokeepdims\",\n)" + }, + { + "summary": "with_split_1", + "code": "data = np.arange(18).reshape((3, 6)).astype(np.float32)\nsplit = np.array(2, dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"SplitToSequence\", [\"data\", \"split\"], [\"seq\"], axis=1\n)\n\nexpected_outputs = [\n [\n np.array([[0.0, 1.0], [6.0, 7.0], [12.0, 13.0]], dtype=np.float32),\n np.array([[2.0, 3.0], [8.0, 9.0], [14.0, 15.0]], dtype=np.float32),\n np.array([[4.0, 5.0], [10.0, 11.0], [16.0, 17.0]], dtype=np.float32),\n ]\n]\n\nexpect(\n node,\n inputs=[data, split],\n outputs=expected_outputs,\n name=\"test_split_to_sequence_1\",\n)" + }, + { + "summary": "with_split_2", + "code": "data = np.arange(18).reshape((3, 6)).astype(np.float32)\nsplit = np.array([1, 2], dtype=np.int64)\n\nnode = onnx.helper.make_node(\n \"SplitToSequence\", [\"data\", \"split\"], [\"seq\"], axis=0\n)\n\nexpected_outputs = [\n [\n data[:1],\n data[1:],\n ]\n]\n\nexpect(\n node,\n inputs=[data, split],\n outputs=expected_outputs,\n name=\"test_split_to_sequence_2\",\n)" + } + ] + }, + { + "name": "Sqrt", + "module": "ai.onnx", + "version": 1, + "description": "Square root takes one input data (Tensor) and produces one output data\n(Tensor) where the square root is, y = x^0.5, is applied to\nthe tensor elementwise. If x is negative, then it will return NaN.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sqrt", + "code": "node = onnx.helper.make_node(\n \"Sqrt\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 4, 9]).astype(np.float32)\ny = np.sqrt(x) # expected output [1., 2., 3.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt_example\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.sqrt(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt\")" + } + ] + }, + { + "name": "Sqrt", + "module": "ai.onnx", + "version": 6, + "description": "Square root takes one input data (Tensor) and produces one output data\n(Tensor) where the square root is, y = x^0.5, is applied to\nthe tensor elementwise. If x is negative, then it will return NaN.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sqrt", + "code": "node = onnx.helper.make_node(\n \"Sqrt\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 4, 9]).astype(np.float32)\ny = np.sqrt(x) # expected output [1., 2., 3.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt_example\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.sqrt(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt\")" + } + ] + }, + { + "name": "Sqrt", + "module": "ai.onnx", + "version": 13, + "description": "Square root takes one input data (Tensor) and produces one output data\n(Tensor) where the square root is, y = x^0.5, is applied to\nthe tensor elementwise. If x is negative, then it will return NaN.\n", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sqrt", + "code": "node = onnx.helper.make_node(\n \"Sqrt\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([1, 4, 9]).astype(np.float32)\ny = np.sqrt(x) # expected output [1., 2., 3.]\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt_example\")\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.sqrt(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_sqrt\")" + } + ] + }, + { + "name": "Squeeze", + "module": "ai.onnx", + "version": 1, + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes a parameter `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "List of non-negative integers, indicate the dimensions to squeeze." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensors with at least max(dims) dimensions." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "squeezed", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "squeeze", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\naxes = np.array([0], dtype=np.int64)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze\")" + }, + { + "summary": "squeeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2], dtype=np.int64)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze_negative_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Squeeze", + "module": "ai.onnx", + "version": 11, + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes a parameter `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": false, + "description": "List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensors with at least max(dims) dimensions." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "squeezed", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "squeeze", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\naxes = np.array([0], dtype=np.int64)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze\")" + }, + { + "summary": "squeeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2], dtype=np.int64)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze_negative_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Squeeze", + "module": "ai.onnx", + "version": 13, + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes an input `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensors with at least max(dims) dimensions." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "squeezed", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "squeeze", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\naxes = np.array([0], dtype=np.int64)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze\")" + }, + { + "summary": "squeeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2], dtype=np.int64)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze_negative_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Squeeze", + "module": "ai.onnx", + "version": 21, + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes an input `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Tensors with at least max(dims) dimensions." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "squeezed", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types up to IRv10.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "squeeze", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\naxes = np.array([0], dtype=np.int64)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze\")" + }, + { + "summary": "squeeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Squeeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2], dtype=np.int64)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_squeeze_negative_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "StringConcat", + "module": "ai.onnx", + "version": 20, + "description": "StringConcat concatenates string tensors elementwise (with NumPy-style broadcasting support)", + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Tensor to prepend in concatenation" + }, + { + "name": "Y", + "type": "T", + "description": "Tensor to append in concatenation" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "Concatenated string tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Inputs and outputs must be UTF-8 strings", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(string)" + ] + } + ], + "examples": [ + { + "summary": "stringconcat", + "code": "node = onnx.helper.make_node(\n \"StringConcat\",\n inputs=[\"x\", \"y\"],\n outputs=[\"result\"],\n)\nx = np.array([\"abc\", \"def\"]).astype(\"object\")\ny = np.array([\".com\", \".net\"]).astype(\"object\")\nresult = np.array([\"abc.com\", \"def.net\"]).astype(\"object\")\n\nexpect(node, inputs=[x, y], outputs=[result], name=\"test_string_concat\")\n\nx = np.array([\"cat\", \"dog\", \"snake\"]).astype(\"object\")\ny = np.array([\"s\"]).astype(\"object\")\nresult = np.array([\"cats\", \"dogs\", \"snakes\"]).astype(\"object\")\n\nexpect(\n node,\n inputs=[x, y],\n outputs=[result],\n name=\"test_string_concat_broadcasting\",\n)\n\nx = np.array(\"cat\").astype(\"object\")\ny = np.array(\"s\").astype(\"object\")\nresult = np.array(\"cats\").astype(\"object\")\n\nexpect(\n node,\n inputs=[x, y],\n outputs=[result],\n name=\"test_string_concat_zero_dimensional\",\n)\n\nx = np.array([\"abc\", \"\"]).astype(\"object\")\ny = np.array([\"\", \"abc\"]).astype(\"object\")\nresult = np.array([\"abc\", \"abc\"]).astype(\"object\")\n\nexpect(\n node,\n inputs=[x, y],\n outputs=[result],\n name=\"test_string_concat_empty_string\",\n)\n\nx = np.array([\"\u7684\", \"\u4e2d\"]).astype(\"object\")\ny = np.array([\"\u7684\", \"\u4e2d\"]).astype(\"object\")\nresult = np.array([\"\u7684\u7684\", \"\u4e2d\u4e2d\"]).astype(\"object\")\n\nexpect(\n node,\n inputs=[x, y],\n outputs=[result],\n name=\"test_string_concat_utf8\",\n)" + } + ] + }, + { + "name": "StringNormalizer", + "module": "ai.onnx", + "version": 10, + "description": "StringNormalization performs string operations for basic cleaning.\nThis operator has only one input (denoted by X) and only one output\n(denoted by Y). This operator first examines the elements in the X,\nand removes elements specified in \"stopwords\" attribute.\nAfter removing stop words, the intermediate result can be further lowercased,\nuppercased, or just returned depending the \"case_change_action\" attribute.\nThis operator only accepts [C]- and [1, C]-tensor.\nIf all elements in X are dropped, the output will be the empty value of string tensor with shape [1]\nif input shape is [C] and shape [1, 1] if input shape is [1, C].\n", + "attributes": [ + { + "name": "case_change_action", + "type": "string", + "required": false, + "default": "NONE", + "description": "string enum that cases output to be lowercased/uppercases/unchanged. Valid values are \"LOWER\", \"UPPER\", \"NONE\". Default is \"NONE\"" + }, + { + "name": "is_case_sensitive", + "type": "int64", + "required": false, + "description": "Boolean. Whether the identification of stop words in X is case-sensitive. Default is false" + }, + { + "name": "locale", + "type": "string", + "required": false, + "description": "Environment dependent string that denotes the locale according to which output strings needs to be upper/lowercased.Default en_US or platform specific equivalent as decided by the implementation." + }, + { + "name": "stopwords", + "type": "string[]", + "required": false, + "description": "List of stop words. If not set, no word would be removed from X." + } + ], + "inputs": [ + { + "name": "X", + "type": "tensor(string)", + "description": "UTF-8 strings to normalize" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(string)", + "description": "UTF-8 Normalized strings" + } + ], + "min_output": 1, + "max_output": 1, + "examples": [ + { + "summary": "monday_casesensintive_lower", + "code": "input = np.array([\"monday\", \"tuesday\", \"wednesday\", \"thursday\"]).astype(object)\noutput = np.array([\"tuesday\", \"wednesday\", \"thursday\"]).astype(object)\nstopwords = [\"monday\"]\n\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n case_change_action=\"LOWER\",\n is_case_sensitive=1,\n stopwords=stopwords,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_export_monday_casesensintive_lower\",\n)" + }, + { + "summary": "monday_casesensintive_nochangecase", + "code": "input = np.array([\"monday\", \"tuesday\", \"wednesday\", \"thursday\"]).astype(object)\noutput = np.array([\"tuesday\", \"wednesday\", \"thursday\"]).astype(object)\nstopwords = [\"monday\"]\n\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n is_case_sensitive=1,\n stopwords=stopwords,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_export_monday_casesensintive_nochangecase\",\n)" + }, + { + "summary": "monday_casesensintive_upper", + "code": "input = np.array([\"monday\", \"tuesday\", \"wednesday\", \"thursday\"]).astype(object)\noutput = np.array([\"TUESDAY\", \"WEDNESDAY\", \"THURSDAY\"]).astype(object)\nstopwords = [\"monday\"]\n\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n case_change_action=\"UPPER\",\n is_case_sensitive=1,\n stopwords=stopwords,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_export_monday_casesensintive_upper\",\n)" + }, + { + "summary": "monday_empty_output", + "code": "input = np.array([\"monday\", \"monday\"]).astype(object)\noutput = np.array([\"\"]).astype(object)\nstopwords = [\"monday\"]\n\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n case_change_action=\"UPPER\",\n is_case_sensitive=1,\n stopwords=stopwords,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_export_monday_empty_output\",\n)" + }, + { + "summary": "monday_insensintive_upper_twodim", + "code": "input = (\n np.array(\n [\"Monday\", \"tuesday\", \"wednesday\", \"Monday\", \"tuesday\", \"wednesday\"]\n )\n .astype(object)\n .reshape([1, 6])\n)\n\n# It does upper case cecedille, accented E\n# and german umlaut but fails\n# with german eszett\noutput = (\n np.array([\"TUESDAY\", \"WEDNESDAY\", \"TUESDAY\", \"WEDNESDAY\"])\n .astype(object)\n .reshape([1, 4])\n)\nstopwords = [\"monday\"]\n\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n case_change_action=\"UPPER\",\n stopwords=stopwords,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_export_monday_insensintive_upper_twodim\",\n)" + }, + { + "summary": "nostopwords_nochangecase", + "code": "input = np.array([\"monday\", \"tuesday\"]).astype(object)\noutput = input\n\n# No stopwords. This is a NOOP\nnode = onnx.helper.make_node(\n \"StringNormalizer\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n is_case_sensitive=1,\n)\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_strnormalizer_nostopwords_nochangecase\",\n)" + } + ] + }, + { + "name": "StringSplit", + "module": "ai.onnx", + "version": 20, + "description": "StringSplit splits a string tensor's elements into substrings based on a delimiter attribute and a maxsplit attribute.\n\nThe first output of this operator is a tensor of strings representing the substrings from splitting each input string on the `delimiter` substring. This tensor has one additional rank compared to the input tensor in order to store the substrings for each input element (where the input tensor is not empty). Note that, in order to ensure the same number of elements are present in the final dimension, this tensor will pad empty strings as illustrated in the examples below. Consecutive delimiters are not grouped together and are deemed to delimit empty strings, except if the `delimiter` is unspecified or is the empty string (\"\"). In the case where the `delimiter` is unspecified or the empty string, consecutive whitespace characters are regarded as a single separator and leading or trailing whitespace is removed in the output.\n\nThe second output tensor represents the number of substrings generated. `maxsplit` can be used to limit the number of splits performed - after the `maxsplit`th split if the string is not fully split, the trailing suffix of input string after the final split point is also added. For elements where fewer splits are possible than specified in `maxsplit`, it has no effect.", + "attributes": [ + { + "name": "delimiter", + "type": "string", + "required": false, + "description": "Delimiter to split on. If left unset or set to the empty string (\"\"), the input is split on consecutive whitespace." + }, + { + "name": "maxsplit", + "type": "int64", + "required": false, + "description": "Maximum number of splits (from left to right). If left unset (or if the number of possible splits are less than maxsplit), it will make as many splits as possible. Note that the maximum possible number of substrings returned with `maxsplit` specified is `maxsplit+1` since the remaining suffix after the `maxsplit`th split is included in the output." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Tensor of strings to split." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Tensor of substrings representing the outcome of splitting the strings in the input on the delimiter. Note that to ensure the same number of elements are present in the final rank, this tensor will pad any necessary empty strings." + }, + { + "name": "Z", + "type": "T3", + "description": "The number of substrings generated for each input element." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "The input must be a UTF-8 string tensor", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)" + ] + }, + { + "description": "Tensor of substrings.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)" + ] + }, + { + "description": "The number of substrings generated.", + "type_param_str": "T3", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "basic", + "code": "node = onnx.helper.make_node(\n \"StringSplit\",\n inputs=[\"x\"],\n outputs=[\"substrings\", \"length\"],\n delimiter=\".\",\n maxsplit=None,\n)\n\nx = np.array([\"abc.com\", \"def.net\"]).astype(object)\n\nsubstrings = np.array([[\"abc\", \"com\"], [\"def\", \"net\"]]).astype(object)\n\nlength = np.array([2, 2], dtype=np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[substrings, length],\n name=\"test_string_split_basic\",\n)" + }, + { + "summary": "consecutive_delimiters", + "code": "node = onnx.helper.make_node(\n \"StringSplit\",\n inputs=[\"x\"],\n outputs=[\"substrings\", \"length\"],\n delimiter=\"-\",\n maxsplit=None,\n)\n\nx = np.array([\"o-n-n--x-\", \"o-n----nx\"]).astype(object)\n\nsubstrings = np.array(\n [[\"o\", \"n\", \"n\", \"\", \"x\", \"\"], [\"o\", \"n\", \"\", \"\", \"\", \"nx\"]]\n).astype(object)\n\nlength = np.array([6, 6], dtype=np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[substrings, length],\n name=\"test_string_split_consecutive_delimiters\",\n)" + }, + { + "summary": "empty_string_delimiter", + "code": "for delimiter, test_name in (\n (\"\", \"test_string_split_empty_string_delimiter\"),\n (None, \"test_string_split_no_delimiter\"),\n):\n node = onnx.helper.make_node(\n \"StringSplit\",\n inputs=[\"x\"],\n outputs=[\"substrings\", \"length\"],\n delimiter=delimiter,\n maxsplit=None,\n )\n\n x = np.array(\n [\"hello world !\", \" hello world !\", \" hello world ! \"]\n ).astype(object)\n\n substrings = np.array(\n [\n [\"hello\", \"world\", \"!\"],\n [\"hello\", \"world\", \"!\"],\n [\"hello\", \"world\", \"!\"],\n ]\n ).astype(object)\n\n length = np.array([3, 3, 3], dtype=np.int64)\n\n expect(\n node,\n inputs=[x],\n outputs=[substrings, length],\n name=test_name,\n )" + }, + { + "summary": "empty_string_split", + "code": "node = onnx.helper.make_node(\n \"StringSplit\",\n inputs=[\"x\"],\n outputs=[\"substrings\", \"length\"],\n delimiter=None,\n maxsplit=None,\n)\n\nx = np.array([]).astype(object)\n\nsubstrings = np.array([]).astype(object).reshape(0, 0)\n\nlength = np.array([], dtype=np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[substrings, length],\n name=\"test_string_split_empty_tensor\",\n output_type_protos=[\n onnx.helper.make_tensor_type_proto(onnx.TensorProto.STRING, (0, None)),\n None,\n ],\n)" + }, + { + "summary": "maxsplit", + "code": "node = onnx.helper.make_node(\n \"StringSplit\",\n inputs=[\"x\"],\n outputs=[\"substrings\", \"length\"],\n maxsplit=2,\n)\n\nx = np.array(\n [[\"hello world\", \"def.net\"], [\"o n n x\", \"the quick brown fox\"]]\n).astype(object)\n\nsubstrings = np.array(\n [\n [[\"hello\", \"world\", \"\"], [\"def.net\", \"\", \"\"]],\n [[\"o\", \"n\", \"n x\"], [\"the\", \"quick\", \"brown fox\"]],\n ]\n).astype(object)\n\nlength = np.array([[2, 1], [3, 3]], np.int64)\n\nexpect(\n node,\n inputs=[x],\n outputs=[substrings, length],\n name=\"test_string_split_maxsplit\",\n)" + } + ] + }, + { + "name": "Sub", + "module": "ai.onnx", + "version": 1, + "description": "Performs element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + }, + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sub", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub\")\n\nx = np.random.randint(12, 24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(12, size=(3, 4, 5), dtype=np.uint8)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_uint8\")" + }, + { + "summary": "sub_broadcast", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_bcast\")" + } + ] + }, + { + "name": "Sub", + "module": "ai.onnx", + "version": 6, + "description": "Performs element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions. See doc for details." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Pass 1 to enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand, should share the type with the second operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same dimensions and type as A" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sub", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub\")\n\nx = np.random.randint(12, 24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(12, size=(3, 4, 5), dtype=np.uint8)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_uint8\")" + }, + { + "summary": "sub_broadcast", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_bcast\")" + } + ] + }, + { + "name": "Sub", + "module": "ai.onnx", + "version": 7, + "description": "Performs element-wise binary subtraction (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sub", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub\")\n\nx = np.random.randint(12, 24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(12, size=(3, 4, 5), dtype=np.uint8)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_uint8\")" + }, + { + "summary": "sub_broadcast", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_bcast\")" + } + ] + }, + { + "name": "Sub", + "module": "ai.onnx", + "version": 13, + "description": "Performs element-wise binary subtraction (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sub", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub\")\n\nx = np.random.randint(12, 24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(12, size=(3, 4, 5), dtype=np.uint8)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_uint8\")" + }, + { + "summary": "sub_broadcast", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_bcast\")" + } + ] + }, + { + "name": "Sub", + "module": "ai.onnx", + "version": 14, + "description": "Performs element-wise binary subtraction (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n\n(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First operand." + }, + { + "name": "B", + "type": "T", + "description": "Second operand." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T", + "description": "Result, has same element type as two inputs" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sub", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub\")\n\nx = np.random.randint(12, 24, size=(3, 4, 5), dtype=np.uint8)\ny = np.random.randint(12, size=(3, 4, 5), dtype=np.uint8)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_uint8\")" + }, + { + "summary": "sub_broadcast", + "code": "node = onnx.helper.make_node(\n \"Sub\",\n inputs=[\"x\", \"y\"],\n outputs=[\"z\"],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_sub_bcast\")" + } + ] + }, + { + "name": "Sum", + "module": "ai.onnx", + "version": 1, + "description": "Element-wise sum of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Sum." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "sum", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sum", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_sum_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_sum_one_input\")\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_sum_two_inputs\"\n)" + } + ] + }, + { + "name": "Sum", + "module": "ai.onnx", + "version": 6, + "description": "Element-wise sum of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for Sum." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "sum", + "type": "T", + "description": "Output tensor. Same dimension as inputs." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sum", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_sum_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_sum_one_input\")\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_sum_two_inputs\"\n)" + } + ] + }, + { + "name": "Sum", + "module": "ai.onnx", + "version": 8, + "description": "Element-wise sum of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for sum." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "sum", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "sum", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_sum_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_sum_one_input\")\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_sum_two_inputs\"\n)" + } + ] + }, + { + "name": "Sum", + "module": "ai.onnx", + "version": 13, + "description": "Element-wise sum of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "data_0", + "type": "T", + "list": true, + "description": "List of tensors for sum." + } + ], + "min_input": 1, + "max_input": 2147483647, + "outputs": [ + { + "name": "sum", + "type": "T", + "description": "Output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - ∞", + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "sum", + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\", \"data_2\"],\n outputs=[\"result\"],\n)\nexpect(\n node,\n inputs=[data_0, data_1, data_2],\n outputs=[result],\n name=\"test_sum_example\",\n)\n\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\"],\n outputs=[\"result\"],\n)\nexpect(node, inputs=[data_0], outputs=[data_0], name=\"test_sum_one_input\")\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n \"Sum\",\n inputs=[\"data_0\", \"data_1\"],\n outputs=[\"result\"],\n)\nexpect(\n node, inputs=[data_0, data_1], outputs=[result], name=\"test_sum_two_inputs\"\n)" + } + ] + }, + { + "name": "Tan", + "module": "ai.onnx", + "version": 7, + "description": "Calculates the tangent of the given input tensor, element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The tangent of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "tan", + "code": "node = onnx.helper.make_node(\n \"Tan\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tan_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tan(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tan\")" + } + ] + }, + { + "name": "Tanh", + "module": "ai.onnx", + "version": 1, + "description": "Calculates the hyperbolic tangent of the given input tensor element-wise.\n", + "attributes": [ + { + "name": "consumed_inputs", + "type": "int64[]", + "required": false, + "description": "legacy optimization attribute." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "1-D input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic tangent values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "tanh", + "code": "node = onnx.helper.make_node(\n \"Tanh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tanh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh\")" + } + ], + "category": "Activation" + }, + { + "name": "Tanh", + "module": "ai.onnx", + "version": 6, + "description": "Calculates the hyperbolic tangent of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic tangent values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "tanh", + "code": "node = onnx.helper.make_node(\n \"Tanh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tanh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh\")" + } + ], + "category": "Activation" + }, + { + "name": "Tanh", + "module": "ai.onnx", + "version": 13, + "description": "Calculates the hyperbolic tangent of the given input tensor element-wise.\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "The hyperbolic tangent values of the input tensor computed element-wise" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)" + ] + } + ], + "examples": [ + { + "summary": "tanh", + "code": "node = onnx.helper.make_node(\n \"Tanh\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tanh(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tanh\")" + } + ], + "category": "Activation" + }, + { + "name": "TfIdfVectorizer", + "module": "ai.onnx", + "version": 9, + "description": "This transform extracts n-grams from the input sequence and save them as a vector. Input can\nbe either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input.\nFor 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row.\nMore specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1].\nIf input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.\n\nIn contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original\nsequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips.\nIf the number of skips is 2, we should skip two tokens when scanning through the original sequence.\nLet's consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2.\nThe associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4].\nIf the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28]\nindexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.\n\nThe output vector (denoted by Y) stores the count of each n-gram;\nY[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping\nbetween index i and the corresponding n-gram's output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0],\nngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17],\nrespectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output.\nNote that we may consider all skips up to S when generating the n-grams.\n\nThe examples used above are true if mode is \"TF\". If mode is \"IDF\", all the counts larger than 1 would be truncated to 1 and\nthe i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is \"TFIDF\",\nthis operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.\n\nOnly one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor.\nIf pool_strings is set, the input must be a string tensor.\n", + "attributes": [ + { + "name": "max_gram_length", + "type": "int64", + "required": true, + "description": "Maximum n-gram length. If this value is 3, 3-grams will be used to generate the output." + }, + { + "name": "max_skip_count", + "type": "int64", + "required": true, + "description": "Maximum number of items (integers/strings) to be skipped when constructing an n-gram from X. If max_skip_count=1, min_gram_length=2, max_gram_length=3, this operator may generate 2-grams with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and skip_count=1" + }, + { + "name": "min_gram_length", + "type": "int64", + "required": true, + "description": "Minimum n-gram length. If this value is 2 and max_gram_length is 3, output may contain counts of 2-grams and 3-grams." + }, + { + "name": "mode", + "type": "string", + "required": true, + "description": "The weighting criteria. It can be one of \"TF\" (term frequency), \"IDF\" (inverse document frequency), and \"TFIDF\" (the combination of TF and IDF)" + }, + { + "name": "ngram_counts", + "type": "int64[]", + "required": true, + "description": "The starting indexes of 1-grams, 2-grams, and so on in pool. It is useful when determining the boundary between two consecutive collections of n-grams. For example, if ngram_counts is [0, 17, 36], the first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is essentially identical to CSR (or CSC) sparse matrix format, and we choose to use this due to its popularity." + }, + { + "name": "ngram_indexes", + "type": "int64[]", + "required": true, + "description": "list of int64s (type: AttributeProto::INTS). This list is parallel to the specified 'pool_*' attribute. The i-th element in ngram_indexes indicate the coordinate of the i-th n-gram in the output tensor." + }, + { + "name": "pool_int64s", + "type": "int64[]", + "required": false, + "description": "List of int64 n-grams learned from the training set. Either this or pool_strings attributes must be present but not both. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector." + }, + { + "name": "pool_strings", + "type": "string[]", + "required": false, + "description": "List of strings n-grams learned from the training set. Either this or pool_int64s attributes must be present but not both. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector." + }, + { + "name": "weights", + "type": "float32[]", + "required": false, + "description": "list of floats. This attribute stores the weight of each n-gram in pool. The i-th element in weights is the weight of the i-th n-gram in pool. Its length equals to the size of ngram_indexes. By default, weights is an all-one tensor.This attribute is used when mode is \"IDF\" or \"TFIDF\" to scale the associated word counts." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input for n-gram extraction" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T1", + "description": "Ngram results" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Input is ether string UTF-8 or int32/int64", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int32)", + "tensor(int64)" + ] + }, + { + "description": "1-D tensor of floats", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)" + ] + } + ], + "examples": [ + { + "summary": "tf_batch_onlybigrams_skip0", + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array(\n [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0]]\n).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_batch_onlybigrams_skip0\",\n)" + }, + { + "summary": "tf_batch_onlybigrams_skip5", + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array(\n [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]]\n).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_batch_onlybigrams_skip5\",\n)" + }, + { + "summary": "tf_batch_uniandbigrams_skip5", + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array(\n [[0.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]]\n).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=1,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_batch_uniandbigrams_skip5\",\n)" + }, + { + "summary": "tf_only_bigrams_skip0", + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_only_bigrams_skip0\",\n)" + }, + { + "summary": "tf_onlybigrams_levelempty", + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([1.0, 1.0, 1.0]).astype(np.float32)\n\nngram_counts = np.array([0, 0]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2]).astype(np.int64)\npool_int64s = np.array([5, 6, 7, 8, 6, 7]).astype( # unigrams none\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_onlybigrams_levelempty\",\n)" + }, + { + "summary": "tf_onlybigrams_skip5", + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 3.0, 1.0]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_onlybigrams_skip5\",\n)" + }, + { + "summary": "tf_uniandbigrams_skip5", + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0.0, 3.0, 1.0, 0.0, 1.0, 3.0, 1.0]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype( # unigrams\n np.int64\n) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode=\"TF\",\n min_gram_length=1,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s,\n)\nnode = helper.make_node_noweights()\nexpect(\n node,\n inputs=[input],\n outputs=[output],\n name=\"test_tfidfvectorizer_tf_uniandbigrams_skip5\",\n)" + } + ] + }, + { + "name": "ThresholdedRelu", + "module": "ai.onnx", + "version": 10, + "description": "ThresholdedRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise,\nis applied to the tensor elementwise.\n", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "required": false, + "default": 1.0, + "description": "Threshold value" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "Output tensor" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "default", + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\"ThresholdedRelu\", inputs=[\"x\"], outputs=[\"y\"])\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, default_alpha, np.inf)\ny[y == default_alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_thresholdedrelu_default\")" + }, + { + "summary": "thresholdedrelu", + "code": "alpha = 2.0\nnode = onnx.helper.make_node(\n \"ThresholdedRelu\", inputs=[\"x\"], outputs=[\"y\"], alpha=alpha\n)\n\nx = np.array([-1.5, 0.0, 1.2, 2.0, 2.2]).astype(np.float32)\ny = np.clip(x, alpha, np.inf) # expected output [0., 0., 0., 0., 2.2]\ny[y == alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_thresholdedrelu_example\")\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, alpha, np.inf)\ny[y == alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y], name=\"test_thresholdedrelu\")" + } + ], + "category": "Activation" + }, + { + "name": "Tile", + "module": "ai.onnx", + "version": 1, + "description": "Repeat the elements of a tensor along an axis.", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of any shape." + }, + { + "name": "tiles", + "type": "T", + "description": "Number of repeated copies to make of the input tensor." + }, + { + "name": "axis", + "type": "T", + "description": "Axis along which to repeat." + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of same shape and type as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain tiles and axis's type to int64 tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "tile", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.random.rand(2, 3, 4, 5).astype(np.float32)\n\nrepeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n\nz = np.tile(x, repeats)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile\")" + }, + { + "summary": "tile_precomputed", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.array([[0, 1], [2, 3]], dtype=np.float32)\n\nrepeats = np.array([2, 2], dtype=np.int64)\n\nz = np.array(\n [[0, 1, 0, 1], [2, 3, 2, 3], [0, 1, 0, 1], [2, 3, 2, 3]], dtype=np.float32\n)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile_precomputed\")" + } + ], + "category": "Shape" + }, + { + "name": "Tile", + "module": "ai.onnx", + "version": 6, + "description": "Constructs a tensor by tiling a given tensor.\nThis is the same as function `tile` in Numpy, but no broadcast.\nFor example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of any shape." + }, + { + "name": "repeats", + "type": "T1", + "description": "1D int64 tensor of the same length as input's dimension number, includes numbers of repeated copies along input's dimensions." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of the same dimensions and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain repeat's type to int64 tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "tile", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.random.rand(2, 3, 4, 5).astype(np.float32)\n\nrepeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n\nz = np.tile(x, repeats)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile\")" + }, + { + "summary": "tile_precomputed", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.array([[0, 1], [2, 3]], dtype=np.float32)\n\nrepeats = np.array([2, 2], dtype=np.int64)\n\nz = np.array(\n [[0, 1, 0, 1], [2, 3, 2, 3], [0, 1, 0, 1], [2, 3, 2, 3]], dtype=np.float32\n)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile_precomputed\")" + } + ], + "category": "Shape" + }, + { + "name": "Tile", + "module": "ai.onnx", + "version": 13, + "description": "Constructs a tensor by tiling a given tensor.\nThis is the same as function `tile` in Numpy, but no broadcast.\nFor example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]\n", + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of any shape." + }, + { + "name": "repeats", + "type": "T1", + "description": "1D int64 tensor of the same length as input's dimension number, includes numbers of repeated copies along input's dimensions." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of the same dimensions and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + }, + { + "description": "Constrain repeat's type to int64 tensors.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "tile", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.random.rand(2, 3, 4, 5).astype(np.float32)\n\nrepeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n\nz = np.tile(x, repeats)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile\")" + }, + { + "summary": "tile_precomputed", + "code": "node = onnx.helper.make_node(\"Tile\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\nx = np.array([[0, 1], [2, 3]], dtype=np.float32)\n\nrepeats = np.array([2, 2], dtype=np.int64)\n\nz = np.array(\n [[0, 1, 0, 1], [2, 3, 2, 3], [0, 1, 0, 1], [2, 3, 2, 3]], dtype=np.float32\n)\n\nexpect(node, inputs=[x, repeats], outputs=[z], name=\"test_tile_precomputed\")" + } + ], + "category": "Shape" + }, + { + "name": "TopK", + "module": "ai.onnx", + "version": 1, + "description": "Retrieve the top-K elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "Dimension on which to do the sort." + }, + { + "name": "k", + "type": "int64", + "required": true, + "description": "Number of top elements to retrieve" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Values", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor" + }, + { + "name": "Indices", + "type": "I", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "top_k", + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node, inputs=[X, K], outputs=[values_ref, indices_ref], name=\"test_top_k\"\n)" + }, + { + "summary": "top_k_negative_axis", + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_negative_axis\",\n)" + }, + { + "summary": "top_k_smallest", + "code": "axis = 1\nlargest = 0\nsorted = 1 # noqa: A001\nk = 3\n\nnode = onnx.helper.make_node(\n \"TopK\",\n inputs=[\"x\", \"k\"],\n outputs=[\"values\", \"indices\"],\n axis=axis,\n largest=largest,\n sorted=sorted,\n)\n\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n# print(indices_ref)\n# [[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_smallest\",\n)" + } + ] + }, + { + "name": "TopK", + "module": "ai.onnx", + "version": 10, + "description": "Retrieve the top-K elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n\nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "Dimension on which to do the sort." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]" + }, + { + "name": "K", + "type": "tensor(int64)", + "description": "A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Values", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor" + }, + { + "name": "Indices", + "type": "I", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "top_k", + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node, inputs=[X, K], outputs=[values_ref, indices_ref], name=\"test_top_k\"\n)" + }, + { + "summary": "top_k_negative_axis", + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_negative_axis\",\n)" + }, + { + "summary": "top_k_smallest", + "code": "axis = 1\nlargest = 0\nsorted = 1 # noqa: A001\nk = 3\n\nnode = onnx.helper.make_node(\n \"TopK\",\n inputs=[\"x\", \"k\"],\n outputs=[\"values\", \"indices\"],\n axis=axis,\n largest=largest,\n sorted=sorted,\n)\n\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n# print(indices_ref)\n# [[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_smallest\",\n)" + } + ] + }, + { + "name": "TopK", + "module": "ai.onnx", + "version": 11, + "description": "Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n\n* Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n* Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n\n* If \"largest\" is 1 (the default value) then the k largest elements are returned.\n* If \"sorted\" is 1 (the default value) then the resulting k elements will be sorted.\n* If \"sorted\" is 0, order of returned 'Values' and 'Indices' are undefined.\n\nGiven two equivalent values, this operator uses the indices along the axis as\na tiebreaker. That is, the element with the lower index will appear first.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "default": -1, + "description": "Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "largest", + "type": "int64", + "required": false, + "default": 1, + "description": "Whether to return the top-K largest or smallest elements." + }, + { + "name": "sorted", + "type": "int64", + "required": false, + "default": 1, + "description": "Whether to return the elements in sorted order." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]" + }, + { + "name": "K", + "type": "tensor(int64)", + "description": "A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve" + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Values", + "type": "T", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor" + }, + { + "name": "Indices", + "type": "I", + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + }, + { + "description": "Constrain index tensor to int64", + "type_param_str": "I", + "allowed_type_strs": [ + "tensor(int64)" + ] + } + ], + "examples": [ + { + "summary": "top_k", + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node, inputs=[X, K], outputs=[values_ref, indices_ref], name=\"test_top_k\"\n)" + }, + { + "summary": "top_k_negative_axis", + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n \"TopK\", inputs=[\"x\", \"k\"], outputs=[\"values\", \"indices\"], axis=axis\n)\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n# [[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_negative_axis\",\n)" + }, + { + "summary": "top_k_smallest", + "code": "axis = 1\nlargest = 0\nsorted = 1 # noqa: A001\nk = 3\n\nnode = onnx.helper.make_node(\n \"TopK\",\n inputs=[\"x\", \"k\"],\n outputs=[\"values\", \"indices\"],\n axis=axis,\n largest=largest,\n sorted=sorted,\n)\n\nX = np.array(\n [\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n ],\n dtype=np.float32,\n)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n# [[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n# print(indices_ref)\n# [[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(\n node,\n inputs=[X, K],\n outputs=[values_ref, indices_ref],\n name=\"test_top_k_smallest\",\n)" + } + ] + }, + { + "name": "Transpose", + "module": "ai.onnx", + "version": 1, + "description": "Transpose the input tensor similar to numpy.transpose. For example, when\nperm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape\nwill be (2, 1, 3).\n", + "attributes": [ + { + "name": "perm", + "type": "int64[]", + "required": false, + "description": "A list of integers. By default, reverse the dimensions, otherwise permute the axes according to the values given." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "transposed", + "type": "T", + "description": "Transposed output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "all_permutations", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\npermutations = list(itertools.permutations(np.arange(len(shape))))\n\nfor i, permutation in enumerate(permutations):\n node = onnx.helper.make_node(\n \"Transpose\",\n inputs=[\"data\"],\n outputs=[\"transposed\"],\n perm=permutation,\n )\n transposed = np.transpose(data, permutation)\n expect(\n node,\n inputs=[data],\n outputs=[transposed],\n name=f\"test_transpose_all_permutations_{i}\",\n )" + }, + { + "summary": "default", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Transpose\", inputs=[\"data\"], outputs=[\"transposed\"]\n)\n\ntransposed = np.transpose(data)\nexpect(node, inputs=[data], outputs=[transposed], name=\"test_transpose_default\")" + } + ], + "category": "Transform" + }, + { + "name": "Transpose", + "module": "ai.onnx", + "version": 13, + "description": "Transpose the input tensor similar to numpy.transpose. For example, when\nperm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape\nwill be (2, 1, 3).\n", + "attributes": [ + { + "name": "perm", + "type": "int64[]", + "required": false, + "description": "A list of integers. By default, reverse the dimensions, otherwise permute the axes according to the values given." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "transposed", + "type": "T", + "description": "Transposed output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "all_permutations", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\npermutations = list(itertools.permutations(np.arange(len(shape))))\n\nfor i, permutation in enumerate(permutations):\n node = onnx.helper.make_node(\n \"Transpose\",\n inputs=[\"data\"],\n outputs=[\"transposed\"],\n perm=permutation,\n )\n transposed = np.transpose(data, permutation)\n expect(\n node,\n inputs=[data],\n outputs=[transposed],\n name=f\"test_transpose_all_permutations_{i}\",\n )" + }, + { + "summary": "default", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Transpose\", inputs=[\"data\"], outputs=[\"transposed\"]\n)\n\ntransposed = np.transpose(data)\nexpect(node, inputs=[data], outputs=[transposed], name=\"test_transpose_default\")" + } + ], + "category": "Transform" + }, + { + "name": "Transpose", + "module": "ai.onnx", + "version": 21, + "description": "Transpose the input tensor similar to numpy.transpose. For example, when\nperm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape\nwill be (2, 1, 3).\n", + "attributes": [ + { + "name": "perm", + "type": "int64[]", + "required": false, + "description": "A list of integers. By default, reverse the dimensions, otherwise permute the axes according to the values given. Its length must be equal to the rank of the input." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "transposed", + "type": "T", + "description": "Transposed output." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "all_permutations", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\npermutations = list(itertools.permutations(np.arange(len(shape))))\n\nfor i, permutation in enumerate(permutations):\n node = onnx.helper.make_node(\n \"Transpose\",\n inputs=[\"data\"],\n outputs=[\"transposed\"],\n perm=permutation,\n )\n transposed = np.transpose(data, permutation)\n expect(\n node,\n inputs=[data],\n outputs=[transposed],\n name=f\"test_transpose_all_permutations_{i}\",\n )" + }, + { + "summary": "default", + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\n\nnode = onnx.helper.make_node(\n \"Transpose\", inputs=[\"data\"], outputs=[\"transposed\"]\n)\n\ntransposed = np.transpose(data)\nexpect(node, inputs=[data], outputs=[transposed], name=\"test_transpose_default\")" + } + ], + "category": "Transform" + }, + { + "name": "TreeEnsembleClassifier", + "module": "ai.onnx.ml", + "version": 1, + "description": "Tree Ensemble classifier. Returns the top class for each of N inputs.
\n The attributes named 'nodes_X' form a sequence of tuples, associated by\n index into the sequences, which must all be of equal length. These tuples\n define the nodes.
\n Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves.\n A leaf may have multiple votes, where each vote is weighted by\n the associated class_weights index.
\n One and only one of classlabels_strings or classlabels_int64s\n will be defined. The class_ids are indices into this list.\n", + "attributes": [ + { + "name": "base_values", + "type": "float32[]", + "required": false, + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "class_ids", + "type": "int64[]", + "required": false, + "description": "The index of the class list that each weight is for." + }, + { + "name": "class_nodeids", + "type": "int64[]", + "required": false, + "description": "node id that this weight is for." + }, + { + "name": "class_treeids", + "type": "int64[]", + "required": false, + "description": "The id of the tree that this node is in." + }, + { + "name": "class_weights", + "type": "float32[]", + "required": false, + "description": "The weight for the class in class_id." + }, + { + "name": "classlabels_int64s", + "type": "int64[]", + "required": false, + "description": "Class labels if using integer labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "classlabels_strings", + "type": "string[]", + "required": false, + "description": "Class labels if using string labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "nodes_falsenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is false." + }, + { + "name": "nodes_featureids", + "type": "int64[]", + "required": false, + "description": "Feature id for each node." + }, + { + "name": "nodes_hitrates", + "type": "float32[]", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_missing_value_tracks_true", + "type": "int64[]", + "required": false, + "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the default value is false (0) for all nodes." + }, + { + "name": "nodes_modes", + "type": "string[]", + "required": false, + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'" + }, + { + "name": "nodes_nodeids", + "type": "int64[]", + "required": false, + "description": "Node id for each node. Ids may restart at zero for each tree, but it not required to." + }, + { + "name": "nodes_treeids", + "type": "int64[]", + "required": false, + "description": "Tree id for each node." + }, + { + "name": "nodes_truenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is true." + }, + { + "name": "nodes_values", + "type": "float32[]", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.'" + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input of shape [N,F]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "N, Top class for each point" + }, + { + "name": "Z", + "type": "tensor(float)", + "description": "The class score for each class, for each point, a tensor of shape [N,E]." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + }, + { + "description": "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "TreeEnsembleClassifier", + "module": "ai.onnx.ml", + "version": 3, + "description": "Tree Ensemble classifier. Returns the top class for each of N inputs.
\n The attributes named 'nodes_X' form a sequence of tuples, associated by\n index into the sequences, which must all be of equal length. These tuples\n define the nodes.
\n Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves.\n A leaf may have multiple votes, where each vote is weighted by\n the associated class_weights index.
\n One and only one of classlabels_strings or classlabels_int64s\n will be defined. The class_ids are indices into this list.\n All fields ending with _as_tensor can be used instead of the\n same parameter without the suffix if the element type is double and not float.\n", + "attributes": [ + { + "name": "base_values", + "type": "float32[]", + "required": false, + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "base_values_as_tensor", + "type": "tensor", + "required": false, + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "class_ids", + "type": "int64[]", + "required": false, + "description": "The index of the class list that each weight is for." + }, + { + "name": "class_nodeids", + "type": "int64[]", + "required": false, + "description": "node id that this weight is for." + }, + { + "name": "class_treeids", + "type": "int64[]", + "required": false, + "description": "The id of the tree that this node is in." + }, + { + "name": "class_weights", + "type": "float32[]", + "required": false, + "description": "The weight for the class in class_id." + }, + { + "name": "class_weights_as_tensor", + "type": "tensor", + "required": false, + "description": "The weight for the class in class_id." + }, + { + "name": "classlabels_int64s", + "type": "int64[]", + "required": false, + "description": "Class labels if using integer labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "classlabels_strings", + "type": "string[]", + "required": false, + "description": "Class labels if using string labels.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "nodes_falsenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is false." + }, + { + "name": "nodes_featureids", + "type": "int64[]", + "required": false, + "description": "Feature id for each node." + }, + { + "name": "nodes_hitrates", + "type": "float32[]", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_hitrates_as_tensor", + "type": "tensor", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_missing_value_tracks_true", + "type": "int64[]", + "required": false, + "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the default value is false (0) for all nodes." + }, + { + "name": "nodes_modes", + "type": "string[]", + "required": false, + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'" + }, + { + "name": "nodes_nodeids", + "type": "int64[]", + "required": false, + "description": "Node id for each node. Ids may restart at zero for each tree, but it not required to." + }, + { + "name": "nodes_treeids", + "type": "int64[]", + "required": false, + "description": "Tree id for each node." + }, + { + "name": "nodes_truenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is true." + }, + { + "name": "nodes_values", + "type": "float32[]", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "nodes_values_as_tensor", + "type": "tensor", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.'" + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input of shape [N,F]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "N, Top class for each point" + }, + { + "name": "Z", + "type": "tensor(float)", + "description": "The class score for each class, for each point, a tensor of shape [N,E]." + } + ], + "min_output": 2, + "max_output": 2, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + }, + { + "description": "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ] + } + ] + }, + { + "name": "TreeEnsembleRegressor", + "module": "ai.onnx.ml", + "version": 1, + "description": "Tree Ensemble regressor. Returns the regressed values for each input in N.
\n All args with nodes_ are fields of a tuple of tree nodes, and\n it is assumed they are the same length, and an index i will decode the\n tuple across these inputs. Each node id can appear only once\n for each tree id.
\n All fields prefixed with target_ are tuples of votes at the leaves.
\n A leaf may have multiple votes, where each vote is weighted by\n the associated target_weights index.
\n All trees must have their node ids start at 0 and increment by 1.
\n Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF\n", + "attributes": [ + { + "name": "aggregate_function", + "type": "string", + "required": false, + "default": "SUM", + "description": "Defines how to aggregate leaf values within a target.
One of 'AVERAGE,' 'SUM,' 'MIN,' 'MAX.'" + }, + { + "name": "base_values", + "type": "float32[]", + "required": false, + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "n_targets", + "type": "int64", + "required": false, + "description": "The total number of targets." + }, + { + "name": "nodes_falsenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is false" + }, + { + "name": "nodes_featureids", + "type": "int64[]", + "required": false, + "description": "Feature id for each node." + }, + { + "name": "nodes_hitrates", + "type": "float32[]", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_missing_value_tracks_true", + "type": "int64[]", + "required": false, + "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the default value is false (0) for all nodes." + }, + { + "name": "nodes_modes", + "type": "string[]", + "required": false, + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'" + }, + { + "name": "nodes_nodeids", + "type": "int64[]", + "required": false, + "description": "Node id for each node. Node ids must restart at zero for each tree and increase sequentially." + }, + { + "name": "nodes_treeids", + "type": "int64[]", + "required": false, + "description": "Tree id for each node." + }, + { + "name": "nodes_truenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is true" + }, + { + "name": "nodes_values", + "type": "float32[]", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'" + }, + { + "name": "target_ids", + "type": "int64[]", + "required": false, + "description": "The index of the target that each weight is for" + }, + { + "name": "target_nodeids", + "type": "int64[]", + "required": false, + "description": "The node id of each weight" + }, + { + "name": "target_treeids", + "type": "int64[]", + "required": false, + "description": "The id of the tree that each node is in." + }, + { + "name": "target_weights", + "type": "float32[]", + "required": false, + "description": "The weight for each target" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input of shape [N,F]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "N classes" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "TreeEnsembleRegressor", + "module": "ai.onnx.ml", + "version": 3, + "description": "Tree Ensemble regressor. Returns the regressed values for each input in N.
\n All args with nodes_ are fields of a tuple of tree nodes, and\n it is assumed they are the same length, and an index i will decode the\n tuple across these inputs. Each node id can appear only once\n for each tree id.
\n All fields prefixed with target_ are tuples of votes at the leaves.
\n A leaf may have multiple votes, where each vote is weighted by\n the associated target_weights index.
\n All fields ending with _as_tensor can be used instead of the\n same parameter without the suffix if the element type is double and not float.\n All trees must have their node ids start at 0 and increment by 1.
\n Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF\n", + "attributes": [ + { + "name": "aggregate_function", + "type": "string", + "required": false, + "default": "SUM", + "description": "Defines how to aggregate leaf values within a target.
One of 'AVERAGE,' 'SUM,' 'MIN,' 'MAX.'" + }, + { + "name": "base_values", + "type": "float32[]", + "required": false, + "description": "Base values for regression, added to final prediction after applying aggregate_function; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "base_values_as_tensor", + "type": "tensor", + "required": false, + "description": "Base values for regression, added to final prediction after applying aggregate_function; the size must be the same as the classes or can be left unassigned (assumed 0)" + }, + { + "name": "n_targets", + "type": "int64", + "required": false, + "description": "The total number of targets." + }, + { + "name": "nodes_falsenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is false" + }, + { + "name": "nodes_featureids", + "type": "int64[]", + "required": false, + "description": "Feature id for each node." + }, + { + "name": "nodes_hitrates", + "type": "float32[]", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_hitrates_as_tensor", + "type": "tensor", + "required": false, + "description": "Popularity of each node, used for performance and may be omitted." + }, + { + "name": "nodes_missing_value_tracks_true", + "type": "int64[]", + "required": false, + "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the default value is false (0) for all nodes." + }, + { + "name": "nodes_modes", + "type": "string[]", + "required": false, + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'" + }, + { + "name": "nodes_nodeids", + "type": "int64[]", + "required": false, + "description": "Node id for each node. Node ids must restart at zero for each tree and increase sequentially." + }, + { + "name": "nodes_treeids", + "type": "int64[]", + "required": false, + "description": "Tree id for each node." + }, + { + "name": "nodes_truenodeids", + "type": "int64[]", + "required": false, + "description": "Child node if expression is true" + }, + { + "name": "nodes_values", + "type": "float32[]", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "nodes_values_as_tensor", + "type": "tensor", + "required": false, + "description": "Thresholds to do the splitting on for each node." + }, + { + "name": "post_transform", + "type": "string", + "required": false, + "default": "NONE", + "description": "Indicates the transform to apply to the score.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'" + }, + { + "name": "target_ids", + "type": "int64[]", + "required": false, + "description": "The index of the target that each weight is for" + }, + { + "name": "target_nodeids", + "type": "int64[]", + "required": false, + "description": "The node id of each weight" + }, + { + "name": "target_treeids", + "type": "int64[]", + "required": false, + "description": "The id of the tree that each node is in." + }, + { + "name": "target_weights", + "type": "float32[]", + "required": false, + "description": "The weight for each target" + }, + { + "name": "target_weights_as_tensor", + "type": "tensor", + "required": false, + "description": "The weight for each target" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "Input of shape [N,F]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "tensor(float)", + "description": "N classes" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ] + } + ] + }, + { + "name": "Trilu", + "module": "ai.onnx", + "version": 14, + "description": "Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s).\nThe attribute \"upper\" determines whether the upper or lower part is retained. If set to true,\nthe upper triangular matrix is retained. Lower triangular matrix is retained otherwise.\nDefault value for the \"upper\" attribute is true.\nTrilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists\nof the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal.\nAll other elements in the matrix are set to zero.\nIf k = 0, the triangular part on and above/below the main diagonal is retained.\nIf upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it.\nA negative k value retains the main diagonal and |k| diagonals below it.\nIf upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it.\nA negative k value excludes the main diagonal and (|k|-1) diagonals below it.\n", + "attributes": [ + { + "name": "upper", + "type": "int64", + "required": false, + "default": 1, + "description": "Boolean. Indicates whether upper or lower part of matrix is retained. Default is true." + } + ], + "inputs": [ + { + "name": "input", + "type": "T", + "description": "Input tensor of rank 2 or higher." + }, + { + "name": "k", + "type": "tensor(int64)", + "option": "optional", + "description": "A 0-D tensor containing a single value corresponding to the number diagonals above or below the main diagonal to exclude or include. Default value is 0 if it's not specified." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Output tensor of the same type and shape as the input tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "tril", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 0, 0, 0, 0],\n# [1, 2, 0, 0, 0],\n# [9, 4, 1, 0, 0],\n# [4, 3, 4, 2, 0]]\ny = tril_reference_implementation(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tril\")" + }, + { + "summary": "tril_neg", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(-1).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[0, 0, 0, 0, 0],\n# [1, 0, 0, 0, 0],\n# [9, 4, 0, 0, 0],\n# [4, 3, 4, 0, 0]]\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_neg\")" + }, + { + "summary": "tril_one_row", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(3, 1, 5)).astype(np.int64)\n# X:\n# [[[6, 2, 4, 1, 6]],\n#\n# [[8, 3, 8, 7, 0]],\n#\n# [[2, 2, 9, 5, 9]]]\n# expect result:\n# [[[6, 0, 0, 0, 0]],\n#\n# [[8, 0, 0, 0, 0]],\n#\n# [[2, 0, 0, 0, 0]]]\ny = tril_reference_implementation(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tril_one_row_neg\")" + }, + { + "summary": "tril_out_neg", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(-7).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0]]\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_out_neg\")" + }, + { + "summary": "tril_out_pos", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(6).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_out_pos\")" + }, + { + "summary": "tril_pos", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(2).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 7, 3, 0, 0],\n# [1, 2, 8, 6, 0],\n# [9, 4, 1, 8, 7],\n# [4, 3, 4, 2, 4]]\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_pos\")" + }, + { + "summary": "tril_square", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)\n# X:\n# [[[0, 4, 3],\n# [2, 0, 9],\n# [8, 2, 5]],\n#\n# [[2, 7, 2],\n# [2, 6, 0],\n# [2, 6, 5]]]\n# expect result:\n# [[[0, 0, 0],\n# [2, 0, 0],\n# [8, 2, 5]],\n#\n# [[2, 0, 0],\n# [2, 6, 0],\n# [2, 6, 5]]]\ny = tril_reference_implementation(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_tril_square\")" + }, + { + "summary": "tril_square_neg", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)\nk = np.array(-1).astype(np.int64)\n# X:\n# [[[0, 4, 3],\n# [2, 0, 9],\n# [8, 2, 5]],\n#\n# [[2, 7, 2],\n# [2, 6, 0],\n# [2, 6, 5]]]\n# expect result:\n# [[[0, 0, 0],\n# [2, 0, 0],\n# [8, 2, 0]],\n#\n# [[0, 0, 0],\n# [2, 0, 0],\n# [2, 6, 0]]]\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_square_neg\")" + }, + { + "summary": "tril_zero", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n upper=0,\n)\n\nx = np.random.randint(10, size=(3, 0, 5)).astype(np.int64)\nk = np.array(6).astype(np.int64)\n# X:\n# []\n# expect result:\n# []\ny = tril_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_tril_zero\")" + }, + { + "summary": "triu", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 7, 3, 7, 9],\n# [0, 2, 8, 6, 9],\n# [0, 0, 0, 8, 7],\n# [0, 0, 0, 2, 4]]\ny = triu_reference_implementation(x)\nexpect(node, inputs=[x], outputs=[y], name=\"test_triu\")" + }, + { + "summary": "triu_neg", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(-1).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [0, 4, 0, 8, 7],\n# [0, 0, 4, 2, 4]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_neg\")" + }, + { + "summary": "triu_one_row", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(3, 1, 5)).astype(np.int64)\nk = np.array(1).astype(np.int64)\n# X:\n# [[[1, 4, 9, 7, 1]],\n#\n# [[9, 2, 8, 8, 4]],\n#\n# [[3, 9, 7, 4, 2]]]\n# expect result:\n# [[[0, 4, 9, 7, 1]],\n#\n# [[0, 2, 8, 8, 4]],\n#\n# [[0, 9, 7, 4, 2]]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_one_row\")" + }, + { + "summary": "triu_out_neg_out", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(-7).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_out_neg_out\")" + }, + { + "summary": "triu_out_pos", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(6).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_out_pos\")" + }, + { + "summary": "triu_pos", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(4, 5)).astype(np.int64)\nk = np.array(2).astype(np.int64)\n# X:\n# [[4, 7, 3, 7, 9],\n# [1, 2, 8, 6, 9],\n# [9, 4, 0, 8, 7],\n# [4, 3, 4, 2, 4]]\n# expect result:\n# [[0, 0, 3, 7, 9],\n# [0, 0, 0, 6, 9],\n# [0, 0, 0, 0, 7],\n# [0, 0, 0, 0, 0]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_pos\")" + }, + { + "summary": "triu_square", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)\ny = triu_reference_implementation(x)\n# X:\n# [[[4, 6, 9],\n# [7, 5, 4],\n# [8, 1, 2]],\n#\n# [[1, 4, 9],\n# [9, 6, 3],\n# [8, 9, 8]]]\n# expect result:\n# [[[4, 6, 9],\n# [0, 5, 4],\n# [0, 0, 2]],\n#\n# [[1, 4, 9],\n# [0, 6, 3],\n# [0, 0, 8]]]\nexpect(node, inputs=[x], outputs=[y], name=\"test_triu_square\")" + }, + { + "summary": "triu_square_neg", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)\nk = np.array(-1).astype(np.int64)\n# X:\n# [[[4, 6, 9],\n# [7, 5, 4],\n# [8, 1, 2]],\n#\n# [[1, 4, 9],\n# [9, 6, 3],\n# [8, 9, 8]]]\n# expect result:\n# [[[4, 6, 9],\n# [7, 5, 4],\n# [0, 1, 2]],\n#\n# [[1, 4, 9],\n# [9, 6, 3],\n# [0, 9, 8]]]\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_square_neg\")" + }, + { + "summary": "triu_zero", + "code": "node = onnx.helper.make_node(\n \"Trilu\",\n inputs=[\"x\", \"k\"],\n outputs=[\"y\"],\n)\n\nx = np.random.randint(10, size=(0, 5)).astype(np.int64)\nk = np.array(6).astype(np.int64)\n# X:\n# []\n# expect result:\n# []\ny = triu_reference_implementation(x, int(k))\nexpect(node, inputs=[x, k], outputs=[y], name=\"test_triu_zero\")" + } + ] + }, + { + "name": "Unique", + "module": "ai.onnx", + "version": 11, + "description": "Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned.\nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned.\n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs.\nThe first output tensor 'Y' contains all unique values or subtensors of the input.\nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurrence in 'X'.\nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'.\nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input.\n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n```\ninput_X = [2, 1, 1, 3, 4, 3]\nattribute_sorted = 0\nattribute_axis = None\noutput_Y = [2, 1, 3, 4]\noutput_indices = [0, 1, 3, 4]\noutput_inverse_indices = [0, 1, 1, 2, 3, 2]\noutput_counts = [1, 2, 2, 1]\n```\n\nExample 2:\n```\ninput_X = [[1, 3], [2, 3]]\nattribute_sorted = 1\nattribute_axis = None\noutput_Y = [1, 2, 3]\noutput_indices = [0, 2, 1]\noutput_inverse_indices = [0, 2, 1, 2]\noutput_counts = [1, 1, 2]\n```\n\nExample 3:\n```\ninput_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\nattribute_sorted = 1\nattribute_axis = 0\noutput_Y = [[1, 0, 0], [2, 3, 4]]\noutput_indices = [0, 2]\noutput_inverse_indices = [0, 0, 1]\noutput_counts = [2, 1]\n```\n\nExample 4:\n```\ninput_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\nattribute_sorted = 1\nattribute_axis = 1\n```\n\nintermediate data are presented below for better understanding:\nthere are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n```\nA: [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n```\n\nthere are 3 unique subtensors:\n```\n[[1, 1], [1, 1]],\n[[0, 1], [0, 1]],\n[[2, 1], [2, 1]].\n```\n\nsorted unique subtensors:\n```\nB: [[0, 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n```\n\noutput_Y is constructed from B:\n```\n[[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n```\n\noutput_indices is to map from B to A:\n```\n[1, 0, 2]\n```\n\noutput_inverse_indices is to map from A to B:\n```\n[1, 0, 2, 0]\n```\n\noutput_counts:\n```\n[2, 1, 1]\n```\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "(Optional) The dimension to apply unique. If not specified, the unique elements of the flattened input are returned. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input)." + }, + { + "name": "sorted", + "type": "int64", + "required": false, + "default": 1, + "description": "(Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default)." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "A N-D input tensor that is to be processed." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "A tensor of the same type as 'X' containing all the unique values or subtensors sliced along a provided 'axis' in 'X', either sorted or maintained in the same order they occur in input 'X'" + }, + { + "name": "indices", + "type": "tensor(int64)", + "option": "optional", + "description": "A 1-D INT64 tensor containing indices of 'Y' elements' first occurrence in 'X'. When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. When 'axis' is not provided, it contains indices to values in the flattened input tensor. " + }, + { + "name": "inverse_indices", + "type": "tensor(int64)", + "option": "optional", + "description": "A 1-D INT64 tensor containing, for elements of 'X', its corresponding indices in 'Y'. When 'axis' is provided, it contains indices to subtensors in output 'Y' on the 'axis'. When 'axis' is not provided, it contains indices to values in output 'Y'. " + }, + { + "name": "counts", + "type": "tensor(int64)", + "option": "optional", + "description": "A 1-D INT64 tensor containing the count of each element of 'Y' in input 'X'" + } + ], + "min_output": 1, + "max_output": 4, + "outputs_range": "1 - 4", + "type_constraints": [ + { + "description": "Input can be of any tensor type.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "not_sorted_without_axis", + "code": "node_not_sorted = onnx.helper.make_node(\n \"Unique\",\n inputs=[\"X\"],\n outputs=[\"Y\", \"indices\", \"inverse_indices\", \"counts\"],\n sorted=0,\n)\n# numpy unique does not retain original order (it sorts the output unique values)\n# https://github.com/numpy/numpy/issues/8621\n# we need to recover unsorted output and indices\nx = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True)\n\n# prepare index mapping from sorted to unsorted\nargsorted_indices = np.argsort(indices)\ninverse_indices_map = dict(\n zip(argsorted_indices, np.arange(len(argsorted_indices)))\n)\n\nindices = indices[argsorted_indices]\ny = np.take(x, indices, axis=0)\ninverse_indices = np.asarray(\n [inverse_indices_map[i] for i in inverse_indices], dtype=np.int64\n)\ncounts = counts[argsorted_indices]\nindices, inverse_indices, counts = specify_int64(\n indices, inverse_indices, counts\n)\n# print(y)\n# [2.0, 1.0, 3.0, 4.0]\n# print(indices)\n# [0 1 3 4]\n# print(inverse_indices)\n# [0, 1, 1, 2, 3, 2]\n# print(counts)\n# [1, 2, 2, 1]\n\nexpect(\n node_not_sorted,\n inputs=[x],\n outputs=[y, indices, inverse_indices, counts],\n name=\"test_unique_not_sorted_without_axis\",\n)" + }, + { + "summary": "sorted_with_axis", + "code": "node_sorted = onnx.helper.make_node(\n \"Unique\",\n inputs=[\"X\"],\n outputs=[\"Y\", \"indices\", \"inverse_indices\", \"counts\"],\n sorted=1,\n axis=0,\n)\n\nx = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=0)\nindices, inverse_indices, counts = specify_int64(\n indices, inverse_indices, counts\n)\n# print(y)\n# [[1. 0. 0.]\n# [2. 3. 4.]]\n# print(indices)\n# [0 2]\n# print(inverse_indices)\n# [0 0 1]\n# print(counts)\n# [2 1]\n\nexpect(\n node_sorted,\n inputs=[x],\n outputs=[y, indices, inverse_indices, counts],\n name=\"test_unique_sorted_with_axis\",\n)" + }, + { + "summary": "sorted_with_axis_3d", + "code": "node_sorted = onnx.helper.make_node(\n \"Unique\",\n inputs=[\"X\"],\n outputs=[\"Y\", \"indices\", \"inverse_indices\", \"counts\"],\n sorted=1,\n axis=1,\n)\n\nx = np.array(\n [\n [[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],\n [[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],\n ],\n dtype=np.float32,\n)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=1)\nindices, inverse_indices, counts = specify_int64(\n indices, inverse_indices, counts\n)\n# print(y)\n# [[[0. 1.]\n# [1. 1.]\n# [2. 1.]]\n# [[0. 1.]\n# [1. 1.]\n# [2. 1.]]]\n# print(indices)\n# [1 0 2]\n# print(inverse_indices)\n# [1 0 2 0]\n# print(counts)\n# [2 1 1]\nexpect(\n node_sorted,\n inputs=[x],\n outputs=[y, indices, inverse_indices, counts],\n name=\"test_unique_sorted_with_axis_3d\",\n)" + }, + { + "summary": "sorted_with_negative_axis", + "code": "node_sorted = onnx.helper.make_node(\n \"Unique\",\n inputs=[\"X\"],\n outputs=[\"Y\", \"indices\", \"inverse_indices\", \"counts\"],\n sorted=1,\n axis=-1,\n)\n\nx = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 3]], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=-1)\nindices, inverse_indices, counts = specify_int64(\n indices, inverse_indices, counts\n)\n# print(y)\n# [[0. 1.]\n# [0. 1.]\n# [3. 2.]]\n# print(indices)\n# [1 0]\n# print(inverse_indices)\n# [1 0 0]\n# print(counts)\n# [2 1]\n\nexpect(\n node_sorted,\n inputs=[x],\n outputs=[y, indices, inverse_indices, counts],\n name=\"test_unique_sorted_with_negative_axis\",\n)" + }, + { + "summary": "sorted_without_axis", + "code": "node_sorted = onnx.helper.make_node(\n \"Unique\",\n inputs=[\"X\"],\n outputs=[\"Y\", \"indices\", \"inverse_indices\", \"counts\"],\n)\n\nx = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True)\nindices, inverse_indices, counts = specify_int64(\n indices, inverse_indices, counts\n)\nexpect(\n node_sorted,\n inputs=[x],\n outputs=[y, indices, inverse_indices, counts],\n name=\"test_unique_sorted_without_axis\",\n)" + } + ] + }, + { + "name": "Unsqueeze", + "module": "ai.onnx", + "version": 1, + "description": "Insert single-dimensional entries to the shape of a tensor.\nTakes one required argument `axes`, a list of dimensions that will be inserted.\nDimension indices in `axes` are as seen in the output tensor. For example:\n Given a tensor such that tensor with shape [3, 4, 5], then\n Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": true, + "description": "List of non-negative integers, indicate the dimensions to be inserted" + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Original tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "expanded", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "unsqueeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2]).astype(np.int64)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_negative_axes\")" + }, + { + "summary": "unsqueeze_one_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n axes = np.array([i]).astype(np.int64)\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(\n node,\n inputs=[x, axes],\n outputs=[y],\n name=\"test_unsqueeze_axis_\" + str(i),\n )" + }, + { + "summary": "unsqueeze_three_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([2, 4, 5]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_three_axes\")" + }, + { + "summary": "unsqueeze_two_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([1, 4]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_two_axes\")" + }, + { + "summary": "unsqueeze_unsorted_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([5, 4, 2]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_unsorted_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Unsqueeze", + "module": "ai.onnx", + "version": 11, + "description": "Insert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required argument `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example:\n Given an input tensor (`data`) of shape [3, 4, 5], then\n Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe attribute `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values in `axes` does not matter and can come in any order.\n\n", + "attributes": [ + { + "name": "axes", + "type": "int64[]", + "required": true, + "description": "List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded)." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Original tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "expanded", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "unsqueeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2]).astype(np.int64)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_negative_axes\")" + }, + { + "summary": "unsqueeze_one_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n axes = np.array([i]).astype(np.int64)\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(\n node,\n inputs=[x, axes],\n outputs=[y],\n name=\"test_unsqueeze_axis_\" + str(i),\n )" + }, + { + "summary": "unsqueeze_three_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([2, 4, 5]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_three_axes\")" + }, + { + "summary": "unsqueeze_two_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([1, 4]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_two_axes\")" + }, + { + "summary": "unsqueeze_unsorted_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([5, 4, 2]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_unsorted_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Unsqueeze", + "module": "ai.onnx", + "version": 13, + "description": "Insert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example, given an input tensor (`data`) of shape [3, 4, 5], then\nUnsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values in `axes` does not matter and can come in any order.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Original tensor" + }, + { + "name": "axes", + "type": "tensor(int64)", + "description": "List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded)." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "expanded", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "unsqueeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2]).astype(np.int64)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_negative_axes\")" + }, + { + "summary": "unsqueeze_one_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n axes = np.array([i]).astype(np.int64)\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(\n node,\n inputs=[x, axes],\n outputs=[y],\n name=\"test_unsqueeze_axis_\" + str(i),\n )" + }, + { + "summary": "unsqueeze_three_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([2, 4, 5]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_three_axes\")" + }, + { + "summary": "unsqueeze_two_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([1, 4]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_two_axes\")" + }, + { + "summary": "unsqueeze_unsorted_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([5, 4, 2]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_unsorted_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Unsqueeze", + "module": "ai.onnx", + "version": 21, + "description": "Insert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example, given an input tensor (`data`) of shape [3, 4, 5], then\nUnsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values in `axes` does not matter and can come in any order.\n", + "inputs": [ + { + "name": "data", + "type": "T", + "description": "Original tensor" + }, + { + "name": "axes", + "type": "tensor(int64)", + "description": "List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded)." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "expanded", + "type": "T", + "description": "Reshaped tensor with same data as input." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types up to IRv10.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)", + "tensor(float8e4m3fn)", + "tensor(float8e4m3fnuz)", + "tensor(float8e5m2)", + "tensor(float8e5m2fnuz)", + "tensor(uint4)", + "tensor(int4)" + ] + } + ], + "examples": [ + { + "summary": "unsqueeze_negative_axes", + "code": "node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\naxes = np.array([-2]).astype(np.int64)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_negative_axes\")" + }, + { + "summary": "unsqueeze_one_axis", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n axes = np.array([i]).astype(np.int64)\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(\n node,\n inputs=[x, axes],\n outputs=[y],\n name=\"test_unsqueeze_axis_\" + str(i),\n )" + }, + { + "summary": "unsqueeze_three_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([2, 4, 5]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_three_axes\")" + }, + { + "summary": "unsqueeze_two_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([1, 4]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_two_axes\")" + }, + { + "summary": "unsqueeze_unsorted_axes", + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\naxes = np.array([5, 4, 2]).astype(np.int64)\n\nnode = onnx.helper.make_node(\n \"Unsqueeze\",\n inputs=[\"x\", \"axes\"],\n outputs=[\"y\"],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x, axes], outputs=[y], name=\"test_unsqueeze_unsorted_axes\")" + } + ], + "category": "Transform" + }, + { + "name": "Upsample", + "module": "ai.onnx", + "version": 1, + "status": "experimental", + "description": "Upsample the input tensor.\nThe width and height of the output tensor are:\n output_width = floor(input_width * width_scale),\n output_height = floor(input_height * height_scale).\nExample:\n Given `data` tensor, width_scale, height_scale, mode,\n Upsample the input 4-D tensor in nearest mode:\n data = [[[\n [1, 2],\n [3, 4]\n ]]]\n width_scale = 2\n height_scale = 2\n mode = \"nearest\"\n output = [[[\n [1, 1, 2, 2],\n [1, 1, 2, 2],\n [3, 3, 4, 4],\n [3, 3, 4, 4]\n ]]]\n", + "attributes": [ + { + "name": "height_scale", + "type": "float32", + "required": true, + "description": "The scale along height dimension. It takes value greater than or equal to 1." + }, + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Two interpolation modes: nearest(default), bilinear" + }, + { + "name": "width_scale", + "type": "float32", + "required": true, + "description": "The scale along width dimension. It takes value greater than or equal to 1." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "4-D tensor, [N,C,H,W]" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "4-D tensor after resizing, [N,C,H,W]" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain output types to bool, int32, int64, float16, float, double tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ] + } + ], + "examples": [ + { + "summary": "nearest", + "code": "node = onnx.helper.make_node(\n \"Upsample\",\n inputs=[\"X\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array(\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_upsample_nearest\",\n opset_imports=[helper.make_opsetid(\"\", 9)],\n)" + } + ], + "category": "Data" + }, + { + "name": "Upsample", + "module": "ai.onnx", + "version": 7, + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)" + }, + { + "name": "scales", + "type": "float32[]", + "required": true, + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'." + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "N-D tensor" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "nearest", + "code": "node = onnx.helper.make_node(\n \"Upsample\",\n inputs=[\"X\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array(\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_upsample_nearest\",\n opset_imports=[helper.make_opsetid(\"\", 9)],\n)" + } + ], + "category": "Data" + }, + { + "name": "Upsample", + "module": "ai.onnx", + "version": 9, + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "N-D tensor" + }, + { + "name": "scales", + "type": "tensor(float)", + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "nearest", + "code": "node = onnx.helper.make_node(\n \"Upsample\",\n inputs=[\"X\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array(\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_upsample_nearest\",\n opset_imports=[helper.make_opsetid(\"\", 9)],\n)" + } + ], + "category": "Data" + }, + { + "name": "Upsample", + "module": "ai.onnx", + "version": 10, + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "attributes": [ + { + "name": "mode", + "type": "string", + "required": false, + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)" + } + ], + "inputs": [ + { + "name": "X", + "type": "T", + "description": "N-D tensor" + }, + { + "name": "scales", + "type": "tensor(float)", + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "Y", + "type": "T", + "description": "N-D tensor after resizing" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "nearest", + "code": "node = onnx.helper.make_node(\n \"Upsample\",\n inputs=[\"X\", \"scales\"],\n outputs=[\"Y\"],\n mode=\"nearest\",\n)\n\ndata = np.array(\n [\n [\n [\n [1, 2],\n [3, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array(\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ]\n ]\n ],\n dtype=np.float32,\n)\n\nexpect(\n node,\n inputs=[data, scales],\n outputs=[output],\n name=\"test_upsample_nearest\",\n opset_imports=[helper.make_opsetid(\"\", 9)],\n)" + } + ], + "category": "Data" + }, + { + "name": "Where", + "module": "ai.onnx", + "version": 9, + "description": "Return elements, either from X or Y, depending on condition.\nWhere behaves like\n[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html)\nwith three parameters.\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "condition", + "type": "B", + "description": "When True (nonzero), yield X, otherwise yield Y" + }, + { + "name": "X", + "type": "T", + "description": "values selected at indices where condition is True" + }, + { + "name": "Y", + "type": "T", + "description": "values selected at indices where condition is False" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of shape equal to the broadcasted shape of condition, X, and Y." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to boolean tensors.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "long", + "code": "node = onnx.helper.make_node(\n \"Where\",\n inputs=[\"condition\", \"x\", \"y\"],\n outputs=[\"z\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.int64)\ny = np.array([[9, 8], [7, 6]], dtype=np.int64)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(\n node, inputs=[condition, x, y], outputs=[z], name=\"test_where_long_example\"\n)" + }, + { + "summary": "where", + "code": "node = onnx.helper.make_node(\n \"Where\",\n inputs=[\"condition\", \"x\", \"y\"],\n outputs=[\"z\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.float32)\ny = np.array([[9, 8], [7, 6]], dtype=np.float32)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(node, inputs=[condition, x, y], outputs=[z], name=\"test_where_example\")" + } + ] + }, + { + "name": "Where", + "module": "ai.onnx", + "version": 16, + "description": "Return elements, either from X or Y, depending on condition.\nWhere behaves like\n[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html)\nwith three parameters.\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "inputs": [ + { + "name": "condition", + "type": "B", + "description": "When True (nonzero), yield X, otherwise yield Y" + }, + { + "name": "X", + "type": "T", + "description": "values selected at indices where condition is True" + }, + { + "name": "Y", + "type": "T", + "description": "values selected at indices where condition is False" + } + ], + "min_input": 3, + "max_input": 3, + "outputs": [ + { + "name": "output", + "type": "T", + "description": "Tensor of shape equal to the broadcasted shape of condition, X, and Y." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain to boolean tensors.", + "type_param_str": "B", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain input and output types to all tensor types (including bfloat).", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(bfloat16)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ] + } + ], + "examples": [ + { + "summary": "long", + "code": "node = onnx.helper.make_node(\n \"Where\",\n inputs=[\"condition\", \"x\", \"y\"],\n outputs=[\"z\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.int64)\ny = np.array([[9, 8], [7, 6]], dtype=np.int64)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(\n node, inputs=[condition, x, y], outputs=[z], name=\"test_where_long_example\"\n)" + }, + { + "summary": "where", + "code": "node = onnx.helper.make_node(\n \"Where\",\n inputs=[\"condition\", \"x\", \"y\"],\n outputs=[\"z\"],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.float32)\ny = np.array([[9, 8], [7, 6]], dtype=np.float32)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(node, inputs=[condition, x, y], outputs=[z], name=\"test_where_example\")" + } + ] + }, + { + "name": "Xor", + "module": "ai.onnx", + "version": 1, + "description": "Returns the tensor resulted from performing the `xor` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "attributes": [ + { + "name": "axis", + "type": "int64", + "required": false, + "description": "If set, defines the broadcast dimensions." + }, + { + "name": "broadcast", + "type": "int64", + "required": false, + "description": "Enable broadcasting" + } + ], + "inputs": [ + { + "name": "A", + "type": "T", + "description": "Left input tensor for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Right input tensor for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "xor", + "code": "node = onnx.helper.make_node(\n \"Xor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"xor\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor4d\")" + }, + { + "summary": "xor_broadcast", + "code": "node = onnx.helper.make_node(\n \"Xor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"xor\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v4d\")" + } + ] + }, + { + "name": "Xor", + "module": "ai.onnx", + "version": 7, + "description": "Returns the tensor resulted from performing the `xor` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "inputs": [ + { + "name": "A", + "type": "T", + "description": "First input operand for the logical operator." + }, + { + "name": "B", + "type": "T", + "description": "Second input operand for the logical operator." + } + ], + "min_input": 2, + "max_input": 2, + "outputs": [ + { + "name": "C", + "type": "T1", + "description": "Result tensor." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "Constrain input to boolean tensor.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(bool)" + ] + }, + { + "description": "Constrain output to boolean tensor.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "xor", + "code": "node = onnx.helper.make_node(\n \"Xor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"xor\"],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(bool)\ny = (np.random.randn(3, 4) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor2d\")\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor3d\")\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor4d\")" + }, + { + "summary": "xor_broadcast", + "code": "node = onnx.helper.make_node(\n \"Xor\",\n inputs=[\"x\", \"y\"],\n outputs=[\"xor\"],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast3v1d\")\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(bool)\ny = (np.random.randn(4, 5) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast3v2d\")\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v2d\")\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v3d\")\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z], name=\"test_xor_bcast4v4d\")" + } + ] + }, + { + "name": "ZipMap", + "module": "ai.onnx.ml", + "version": 1, + "description": "Creates a map from the input and the attributes.
\n The values are provided by the input tensor, while the keys are specified by the attributes.\n Must provide keys in either classlabels_strings or classlabels_int64s (but not both).
\n The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.
\n", + "attributes": [ + { + "name": "classlabels_int64s", + "type": "int64[]", + "required": false, + "description": "The keys when using int keys.
One and only one of the 'classlabels_*' attributes must be defined." + }, + { + "name": "classlabels_strings", + "type": "string[]", + "required": false, + "description": "The keys when using string keys.
One and only one of the 'classlabels_*' attributes must be defined." + } + ], + "inputs": [ + { + "name": "X", + "type": "tensor(float)", + "description": "The input values" + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Z", + "type": "T", + "description": "The output map" + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The output will be a sequence of string or integer maps to float.", + "type_param_str": "T", + "allowed_type_strs": [ + "seq(map(string, float))", + "seq(map(int64, float))" + ] + } + ] + }, + { + "name": "FusedConv", + "module": "com.microsoft", + "version": 1, + "inputs": [ + { + "name": "input", + "type": "T" + }, + { + "name": "weights", + "type": "T" + }, + { + "name": "bias", + "type": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": "T" + } + ], + "category": "Layer" + } +] \ No newline at end of file diff --git a/onnx-proto.js b/onnx-proto.js new file mode 100644 index 00000000000..b30460eb213 --- /dev/null +++ b/onnx-proto.js @@ -0,0 +1,1755 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('onnx'); + +$root.onnx = {}; + +$root.onnx.Version = { + "_START_VERSION": 0, + "IR_VERSION_2017_10_10": 1, + "IR_VERSION_2017_10_30": 2, + "IR_VERSION_2017_11_3": 3, + "IR_VERSION_2019_1_22": 4, + "IR_VERSION_2019_3_18": 5, + "IR_VERSION_2019_9_19": 6, + "IR_VERSION_2020_5_8": 7, + "IR_VERSION_2021_7_30": 8, + "IR_VERSION_2023_5_5": 9, + "IR_VERSION": 10 +}; + +$root.onnx.AttributeProto = class AttributeProto { + + constructor() { + this.floats = []; + this.ints = []; + this.strings = []; + this.tensors = []; + this.graphs = []; + this.sparse_tensors = []; + this.type_protos = []; + } + + static decode(reader, length) { + const message = new $root.onnx.AttributeProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 21: + message.ref_attr_name = reader.string(); + break; + case 13: + message.doc_string = reader.string(); + break; + case 20: + message.type = reader.int32(); + break; + case 2: + message.f = reader.float(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.s = reader.bytes(); + break; + case 5: + message.t = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 6: + message.g = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 22: + message.sparse_tensor = $root.onnx.SparseTensorProto.decode(reader, reader.uint32()); + break; + case 14: + message.tp = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + case 7: + message.floats = reader.floats(message.floats, tag); + break; + case 8: + message.ints = reader.array(message.ints, () => reader.int64(), tag); + break; + case 9: + message.strings.push(reader.bytes()); + break; + case 10: + message.tensors.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + case 11: + message.graphs.push($root.onnx.GraphProto.decode(reader, reader.uint32())); + break; + case 23: + message.sparse_tensors.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + case 15: + message.type_protos.push($root.onnx.TypeProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.AttributeProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "ref_attr_name": + message.ref_attr_name = reader.string(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "type": + message.type = reader.enum($root.onnx.AttributeProto.AttributeType); + break; + case "f": + message.f = reader.float(); + break; + case "i": + message.i = reader.int64(); + break; + case "s": + message.s = reader.bytes(); + break; + case "t": + message.t = $root.onnx.TensorProto.decodeText(reader); + break; + case "g": + message.g = $root.onnx.GraphProto.decodeText(reader); + break; + case "sparse_tensor": + message.sparse_tensor = $root.onnx.SparseTensorProto.decodeText(reader); + break; + case "tp": + message.tp = $root.onnx.TypeProto.decodeText(reader); + break; + case "floats": + reader.array(message.floats, () => reader.float()); + break; + case "ints": + reader.array(message.ints, () => reader.int64()); + break; + case "strings": + reader.array(message.strings, () => reader.bytes()); + break; + case "tensors": + message.tensors.push($root.onnx.TensorProto.decodeText(reader)); + break; + case "graphs": + message.graphs.push($root.onnx.GraphProto.decodeText(reader)); + break; + case "sparse_tensors": + message.sparse_tensors.push($root.onnx.SparseTensorProto.decodeText(reader)); + break; + case "type_protos": + message.type_protos.push($root.onnx.TypeProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.AttributeProto.prototype.name = ""; +$root.onnx.AttributeProto.prototype.ref_attr_name = ""; +$root.onnx.AttributeProto.prototype.doc_string = ""; +$root.onnx.AttributeProto.prototype.type = 0; +$root.onnx.AttributeProto.prototype.f = 0; +$root.onnx.AttributeProto.prototype.i = protobuf.Int64.create(0); +$root.onnx.AttributeProto.prototype.s = new Uint8Array([]); +$root.onnx.AttributeProto.prototype.t = null; +$root.onnx.AttributeProto.prototype.g = null; +$root.onnx.AttributeProto.prototype.sparse_tensor = null; +$root.onnx.AttributeProto.prototype.tp = null; + +$root.onnx.AttributeProto.AttributeType = { + "UNDEFINED": 0, + "FLOAT": 1, + "INT": 2, + "STRING": 3, + "TENSOR": 4, + "GRAPH": 5, + "SPARSE_TENSOR": 11, + "TYPE_PROTO": 13, + "FLOATS": 6, + "INTS": 7, + "STRINGS": 8, + "TENSORS": 9, + "GRAPHS": 10, + "SPARSE_TENSORS": 12, + "TYPE_PROTOS": 14 +}; + +$root.onnx.ValueInfoProto = class ValueInfoProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.ValueInfoProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + case 3: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.ValueInfoProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = $root.onnx.TypeProto.decodeText(reader); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.ValueInfoProto.prototype.name = ""; +$root.onnx.ValueInfoProto.prototype.type = null; +$root.onnx.ValueInfoProto.prototype.doc_string = ""; + +$root.onnx.NodeProto = class NodeProto { + + constructor() { + this.input = []; + this.output = []; + this.attribute = []; + } + + static decode(reader, length) { + const message = new $root.onnx.NodeProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input.push(reader.string()); + break; + case 2: + message.output.push(reader.string()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.op_type = reader.string(); + break; + case 7: + message.domain = reader.string(); + break; + case 5: + message.attribute.push($root.onnx.AttributeProto.decode(reader, reader.uint32())); + break; + case 6: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.NodeProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "op_type": + message.op_type = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "attribute": + message.attribute.push($root.onnx.AttributeProto.decodeText(reader)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.NodeProto.prototype.name = ""; +$root.onnx.NodeProto.prototype.op_type = ""; +$root.onnx.NodeProto.prototype.domain = ""; +$root.onnx.NodeProto.prototype.doc_string = ""; + +$root.onnx.TrainingInfoProto = class TrainingInfoProto { + + constructor() { + this.initialization_binding = []; + this.update_binding = []; + } + + static decode(reader, length) { + const message = new $root.onnx.TrainingInfoProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.initialization = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 2: + message.algorithm = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 3: + message.initialization_binding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 4: + message.update_binding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TrainingInfoProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "initialization": + message.initialization = $root.onnx.GraphProto.decodeText(reader); + break; + case "algorithm": + message.algorithm = $root.onnx.GraphProto.decodeText(reader); + break; + case "initialization_binding": + message.initialization_binding.push($root.onnx.StringStringEntryProto.decodeText(reader)); + break; + case "update_binding": + message.update_binding.push($root.onnx.StringStringEntryProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TrainingInfoProto.prototype.initialization = null; +$root.onnx.TrainingInfoProto.prototype.algorithm = null; + +$root.onnx.ModelProto = class ModelProto { + + constructor() { + this.opset_import = []; + this.metadata_props = []; + this.training_info = []; + this.functions = []; + } + + static decode(reader, length) { + const message = new $root.onnx.ModelProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ir_version = reader.int64(); + break; + case 8: + message.opset_import.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + case 2: + message.producer_name = reader.string(); + break; + case 3: + message.producer_version = reader.string(); + break; + case 4: + message.domain = reader.string(); + break; + case 5: + message.model_version = reader.int64(); + break; + case 6: + message.doc_string = reader.string(); + break; + case 7: + message.graph = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 14: + message.metadata_props.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 20: + message.training_info.push($root.onnx.TrainingInfoProto.decode(reader, reader.uint32())); + break; + case 25: + message.functions.push($root.onnx.FunctionProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.ModelProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "ir_version": + message.ir_version = reader.int64(); + break; + case "opset_import": + message.opset_import.push($root.onnx.OperatorSetIdProto.decodeText(reader)); + break; + case "producer_name": + message.producer_name = reader.string(); + break; + case "producer_version": + message.producer_version = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "model_version": + message.model_version = reader.int64(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "graph": + message.graph = $root.onnx.GraphProto.decodeText(reader); + break; + case "metadata_props": + message.metadata_props.push($root.onnx.StringStringEntryProto.decodeText(reader)); + break; + case "training_info": + message.training_info.push($root.onnx.TrainingInfoProto.decodeText(reader)); + break; + case "functions": + message.functions.push($root.onnx.FunctionProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.ModelProto.prototype.ir_version = protobuf.Int64.create(0); +$root.onnx.ModelProto.prototype.producer_name = ""; +$root.onnx.ModelProto.prototype.producer_version = ""; +$root.onnx.ModelProto.prototype.domain = ""; +$root.onnx.ModelProto.prototype.model_version = protobuf.Int64.create(0); +$root.onnx.ModelProto.prototype.doc_string = ""; +$root.onnx.ModelProto.prototype.graph = null; + +$root.onnx.StringStringEntryProto = class StringStringEntryProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.StringStringEntryProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.StringStringEntryProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.StringStringEntryProto.prototype.key = ""; +$root.onnx.StringStringEntryProto.prototype.value = ""; + +$root.onnx.TensorAnnotation = class TensorAnnotation { + + constructor() { + this.quant_parameter_tensor_names = []; + } + + static decode(reader, length) { + const message = new $root.onnx.TensorAnnotation(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_name = reader.string(); + break; + case 2: + message.quant_parameter_tensor_names.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TensorAnnotation(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor_name": + message.tensor_name = reader.string(); + break; + case "quant_parameter_tensor_names": + message.quant_parameter_tensor_names.push($root.onnx.StringStringEntryProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TensorAnnotation.prototype.tensor_name = ""; + +$root.onnx.GraphProto = class GraphProto { + + constructor() { + this.node = []; + this.initializer = []; + this.sparse_initializer = []; + this.input = []; + this.output = []; + this.value_info = []; + this.quantization_annotation = []; + } + + static decode(reader, length) { + const message = new $root.onnx.GraphProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + case 2: + message.name = reader.string(); + break; + case 5: + message.initializer.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + case 15: + message.sparse_initializer.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + case 10: + message.doc_string = reader.string(); + break; + case 11: + message.input.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 12: + message.output.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 13: + message.value_info.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 14: + message.quantization_annotation.push($root.onnx.TensorAnnotation.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.GraphProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node": + message.node.push($root.onnx.NodeProto.decodeText(reader)); + break; + case "name": + message.name = reader.string(); + break; + case "initializer": + message.initializer.push($root.onnx.TensorProto.decodeText(reader)); + break; + case "sparse_initializer": + message.sparse_initializer.push($root.onnx.SparseTensorProto.decodeText(reader)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "input": + message.input.push($root.onnx.ValueInfoProto.decodeText(reader)); + break; + case "output": + message.output.push($root.onnx.ValueInfoProto.decodeText(reader)); + break; + case "value_info": + message.value_info.push($root.onnx.ValueInfoProto.decodeText(reader)); + break; + case "quantization_annotation": + message.quantization_annotation.push($root.onnx.TensorAnnotation.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.GraphProto.prototype.name = ""; +$root.onnx.GraphProto.prototype.doc_string = ""; + +$root.onnx.TensorProto = class TensorProto { + + constructor() { + this.dims = []; + this.float_data = []; + this.int32_data = []; + this.string_data = []; + this.int64_data = []; + this.external_data = []; + this.double_data = []; + this.uint64_data = []; + } + + static decode(reader, length) { + const message = new $root.onnx.TensorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + case 2: + message.data_type = reader.int32(); + break; + case 3: + message.segment = $root.onnx.TensorProto.Segment.decode(reader, reader.uint32()); + break; + case 4: + message.float_data = reader.floats(message.float_data, tag); + break; + case 5: + message.int32_data = reader.array(message.int32_data, () => reader.int32(), tag); + break; + case 6: + message.string_data.push(reader.bytes()); + break; + case 7: + message.int64_data = reader.array(message.int64_data, () => reader.int64(), tag); + break; + case 8: + message.name = reader.string(); + break; + case 12: + message.doc_string = reader.string(); + break; + case 9: + message.raw_data = reader.bytes(); + break; + case 13: + message.external_data.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 14: + message.data_location = reader.int32(); + break; + case 10: + message.double_data = reader.doubles(message.double_data, tag); + break; + case 11: + message.uint64_data = reader.array(message.uint64_data, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TensorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + case "data_type": + message.data_type = reader.int32(); + break; + case "segment": + message.segment = $root.onnx.TensorProto.Segment.decodeText(reader); + break; + case "float_data": + reader.array(message.float_data, () => reader.float()); + break; + case "int32_data": + reader.array(message.int32_data, () => reader.int32()); + break; + case "string_data": + reader.array(message.string_data, () => reader.bytes()); + break; + case "int64_data": + reader.array(message.int64_data, () => reader.int64()); + break; + case "name": + message.name = reader.string(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "raw_data": + message.raw_data = reader.bytes(); + break; + case "external_data": + message.external_data.push($root.onnx.StringStringEntryProto.decodeText(reader)); + break; + case "data_location": + message.data_location = reader.enum($root.onnx.TensorProto.DataLocation); + break; + case "double_data": + reader.array(message.double_data, () => reader.double()); + break; + case "uint64_data": + reader.array(message.uint64_data, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TensorProto.prototype.data_type = 0; +$root.onnx.TensorProto.prototype.segment = null; +$root.onnx.TensorProto.prototype.name = ""; +$root.onnx.TensorProto.prototype.doc_string = ""; +$root.onnx.TensorProto.prototype.raw_data = new Uint8Array([]); +$root.onnx.TensorProto.prototype.data_location = 0; + +$root.onnx.TensorProto.DataType = { + "UNDEFINED": 0, + "FLOAT": 1, + "UINT8": 2, + "INT8": 3, + "UINT16": 4, + "INT16": 5, + "INT32": 6, + "INT64": 7, + "STRING": 8, + "BOOL": 9, + "FLOAT16": 10, + "DOUBLE": 11, + "UINT32": 12, + "UINT64": 13, + "COMPLEX64": 14, + "COMPLEX128": 15, + "BFLOAT16": 16, + "FLOAT8E4M3FN": 17, + "FLOAT8E4M3FNUZ": 18, + "FLOAT8E5M2": 19, + "FLOAT8E5M2FNUZ": 20, + "UINT4": 21, + "INT4": 22 +}; + +$root.onnx.TensorProto.Segment = class Segment { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TensorProto.Segment(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.begin = reader.int64(); + break; + case 2: + message.end = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TensorProto.Segment(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "begin": + message.begin = reader.int64(); + break; + case "end": + message.end = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TensorProto.Segment.prototype.begin = protobuf.Int64.create(0); +$root.onnx.TensorProto.Segment.prototype.end = protobuf.Int64.create(0); + +$root.onnx.TensorProto.DataLocation = { + "DEFAULT": 0, + "EXTERNAL": 1 +}; + +$root.onnx.SparseTensorProto = class SparseTensorProto { + + constructor() { + this.dims = []; + } + + static decode(reader, length) { + const message = new $root.onnx.SparseTensorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 2: + message.indices = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 3: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.SparseTensorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values = $root.onnx.TensorProto.decodeText(reader); + break; + case "indices": + message.indices = $root.onnx.TensorProto.decodeText(reader); + break; + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.SparseTensorProto.prototype.values = null; +$root.onnx.SparseTensorProto.prototype.indices = null; + +$root.onnx.TensorShapeProto = class TensorShapeProto { + + constructor() { + this.dim = []; + } + + static decode(reader, length) { + const message = new $root.onnx.TensorShapeProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim.push($root.onnx.TensorShapeProto.Dimension.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TensorShapeProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + message.dim.push($root.onnx.TensorShapeProto.Dimension.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TensorShapeProto.Dimension = class Dimension { + + constructor() { + } + + get value() { + $root.onnx.TensorShapeProto.Dimension.valueSet = $root.onnx.TensorShapeProto.Dimension.valueSet || new Set([ "dim_value", "dim_param"]); + return Object.keys(this).find((key) => $root.onnx.TensorShapeProto.Dimension.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.onnx.TensorShapeProto.Dimension(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim_value = reader.int64(); + break; + case 2: + message.dim_param = reader.string(); + break; + case 3: + message.denotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TensorShapeProto.Dimension(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim_value": + message.dim_value = reader.int64(); + break; + case "dim_param": + message.dim_param = reader.string(); + break; + case "denotation": + message.denotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TensorShapeProto.Dimension.prototype.denotation = ""; + +$root.onnx.TypeProto = class TypeProto { + + constructor() { + } + + get value() { + $root.onnx.TypeProto.valueSet = $root.onnx.TypeProto.valueSet || new Set([ "tensor_type", "sequence_type", "map_type", "optional_type", "sparse_tensor_type", "opaque_type"]); + return Object.keys(this).find((key) => $root.onnx.TypeProto.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_type = $root.onnx.TypeProto.Tensor.decode(reader, reader.uint32()); + break; + case 4: + message.sequence_type = $root.onnx.TypeProto.Sequence.decode(reader, reader.uint32()); + break; + case 5: + message.map_type = $root.onnx.TypeProto.Map.decode(reader, reader.uint32()); + break; + case 9: + message.optional_type = $root.onnx.TypeProto.Optional.decode(reader, reader.uint32()); + break; + case 8: + message.sparse_tensor_type = $root.onnx.TypeProto.SparseTensor.decode(reader, reader.uint32()); + break; + case 7: + message.opaque_type = $root.onnx.TypeProto.Opaque.decode(reader, reader.uint32()); + break; + case 6: + message.denotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor_type": + message.tensor_type = $root.onnx.TypeProto.Tensor.decodeText(reader); + break; + case "sequence_type": + message.sequence_type = $root.onnx.TypeProto.Sequence.decodeText(reader); + break; + case "map_type": + message.map_type = $root.onnx.TypeProto.Map.decodeText(reader); + break; + case "optional_type": + message.optional_type = $root.onnx.TypeProto.Optional.decodeText(reader); + break; + case "sparse_tensor_type": + message.sparse_tensor_type = $root.onnx.TypeProto.SparseTensor.decodeText(reader); + break; + case "opaque_type": + message.opaque_type = $root.onnx.TypeProto.Opaque.decodeText(reader); + break; + case "denotation": + message.denotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.prototype.denotation = ""; + +$root.onnx.TypeProto.Tensor = class Tensor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.Tensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = reader.int32(); + break; + case 2: + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.Tensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = reader.int32(); + break; + case "shape": + message.shape = $root.onnx.TensorShapeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.Tensor.prototype.elem_type = 0; +$root.onnx.TypeProto.Tensor.prototype.shape = null; + +$root.onnx.TypeProto.Sequence = class Sequence { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.Sequence(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.Sequence(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = $root.onnx.TypeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.Sequence.prototype.elem_type = null; + +$root.onnx.TypeProto.Map = class Map { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.Map(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key_type = reader.int32(); + break; + case 2: + message.value_type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.Map(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key_type": + message.key_type = reader.int32(); + break; + case "value_type": + message.value_type = $root.onnx.TypeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.Map.prototype.key_type = 0; +$root.onnx.TypeProto.Map.prototype.value_type = null; + +$root.onnx.TypeProto.Optional = class Optional { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.Optional(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.Optional(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = $root.onnx.TypeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.Optional.prototype.elem_type = null; + +$root.onnx.TypeProto.SparseTensor = class SparseTensor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.SparseTensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = reader.int32(); + break; + case 2: + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.SparseTensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = reader.int32(); + break; + case "shape": + message.shape = $root.onnx.TensorShapeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.SparseTensor.prototype.elem_type = 0; +$root.onnx.TypeProto.SparseTensor.prototype.shape = null; + +$root.onnx.TypeProto.Opaque = class Opaque { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.TypeProto.Opaque(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.domain = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.TypeProto.Opaque(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "domain": + message.domain = reader.string(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.TypeProto.Opaque.prototype.domain = ""; +$root.onnx.TypeProto.Opaque.prototype.name = ""; + +$root.onnx.OperatorSetIdProto = class OperatorSetIdProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.OperatorSetIdProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.domain = reader.string(); + break; + case 2: + message.version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.OperatorSetIdProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "domain": + message.domain = reader.string(); + break; + case "version": + message.version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.OperatorSetIdProto.prototype.domain = ""; +$root.onnx.OperatorSetIdProto.prototype.version = protobuf.Int64.create(0); + +$root.onnx.OperatorStatus = { + "EXPERIMENTAL": 0, + "STABLE": 1 +}; + +$root.onnx.FunctionProto = class FunctionProto { + + constructor() { + this.input = []; + this.output = []; + this.attribute = []; + this.attribute_proto = []; + this.node = []; + this.opset_import = []; + } + + static decode(reader, length) { + const message = new $root.onnx.FunctionProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 4: + message.input.push(reader.string()); + break; + case 5: + message.output.push(reader.string()); + break; + case 6: + message.attribute.push(reader.string()); + break; + case 11: + message.attribute_proto.push($root.onnx.AttributeProto.decode(reader, reader.uint32())); + break; + case 7: + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + case 8: + message.doc_string = reader.string(); + break; + case 9: + message.opset_import.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + case 10: + message.domain = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.FunctionProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "output": + reader.array(message.output, () => reader.string()); + break; + case "attribute": + reader.array(message.attribute, () => reader.string()); + break; + case "attribute_proto": + message.attribute_proto.push($root.onnx.AttributeProto.decodeText(reader)); + break; + case "node": + message.node.push($root.onnx.NodeProto.decodeText(reader)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "opset_import": + message.opset_import.push($root.onnx.OperatorSetIdProto.decodeText(reader)); + break; + case "domain": + message.domain = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.FunctionProto.prototype.name = ""; +$root.onnx.FunctionProto.prototype.doc_string = ""; +$root.onnx.FunctionProto.prototype.domain = ""; + +$root.onnx.OperatorProto = class OperatorProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.onnx.OperatorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op_type = reader.string(); + break; + case 2: + message.since_version = reader.int64(); + break; + case 3: + message.status = reader.int32(); + break; + case 10: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.OperatorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "op_type": + message.op_type = reader.string(); + break; + case "since_version": + message.since_version = reader.int64(); + break; + case "status": + message.status = reader.enum($root.onnx.OperatorStatus); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.OperatorProto.prototype.op_type = ""; +$root.onnx.OperatorProto.prototype.since_version = protobuf.Int64.create(0); +$root.onnx.OperatorProto.prototype.status = 0; +$root.onnx.OperatorProto.prototype.doc_string = ""; + +$root.onnx.OperatorSetProto = class OperatorSetProto { + + constructor() { + this.operator = []; + this.functions = []; + } + + static decode(reader, length) { + const message = new $root.onnx.OperatorSetProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.magic = reader.string(); + break; + case 2: + message.ir_version = reader.int64(); + break; + case 3: + message.ir_version_prerelease = reader.string(); + break; + case 7: + message.ir_build_metadata = reader.string(); + break; + case 4: + message.domain = reader.string(); + break; + case 5: + message.opset_version = reader.int64(); + break; + case 6: + message.doc_string = reader.string(); + break; + case 8: + message.operator.push($root.onnx.OperatorProto.decode(reader, reader.uint32())); + break; + case 9: + message.functions.push($root.onnx.FunctionProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.onnx.OperatorSetProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "magic": + message.magic = reader.string(); + break; + case "ir_version": + message.ir_version = reader.int64(); + break; + case "ir_version_prerelease": + message.ir_version_prerelease = reader.string(); + break; + case "ir_build_metadata": + message.ir_build_metadata = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "opset_version": + message.opset_version = reader.int64(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "operator": + message.operator.push($root.onnx.OperatorProto.decodeText(reader)); + break; + case "functions": + message.functions.push($root.onnx.FunctionProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.onnx.OperatorSetProto.prototype.magic = ""; +$root.onnx.OperatorSetProto.prototype.ir_version = protobuf.Int64.create(0); +$root.onnx.OperatorSetProto.prototype.ir_version_prerelease = ""; +$root.onnx.OperatorSetProto.prototype.ir_build_metadata = ""; +$root.onnx.OperatorSetProto.prototype.domain = ""; +$root.onnx.OperatorSetProto.prototype.opset_version = protobuf.Int64.create(0); +$root.onnx.OperatorSetProto.prototype.doc_string = ""; diff --git a/onnx-schema.js b/onnx-schema.js new file mode 100644 index 00000000000..e9a2b850986 --- /dev/null +++ b/onnx-schema.js @@ -0,0 +1,445 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('ort'); + +$root.onnxruntime = $root.onnxruntime || {}; + +$root.onnxruntime.fbs = $root.onnxruntime.fbs || {}; + +$root.onnxruntime.fbs.AttributeType = { + UNDEFINED: 0, + FLOAT: 1, + INT: 2, + STRING: 3, + TENSOR: 4, + GRAPH: 5, + FLOATS: 6, + INTS: 7, + STRINGS: 8, + TENSORS: 9, + GRAPHS: 10, + SPARSE_TENSOR: 11, + SPARSE_TENSORS: 12 +}; + +$root.onnxruntime.fbs.Shape = class Shape { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Shape(); + $.dim = reader.tableArray(position, 4, $root.onnxruntime.fbs.Dimension.decode); + return $; + } +}; + +$root.onnxruntime.fbs.Dimension = class Dimension { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Dimension(); + $.value = reader.table(position, 4, $root.onnxruntime.fbs.DimensionValue.decode); + $.denotation = reader.string_(position, 6, null); + return $; + } +}; + +$root.onnxruntime.fbs.DimensionValueType = { + UNKNOWN: 0, + VALUE: 1, + PARAM: 2 +}; + +$root.onnxruntime.fbs.DimensionValue = class DimensionValue { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.DimensionValue(); + $.dim_type = reader.int8_(position, 4, 0); + $.dim_value = reader.int64_(position, 6, 0); + $.dim_param = reader.string_(position, 8, null); + return $; + } +}; + +$root.onnxruntime.fbs.TensorDataType = { + UNDEFINED: 0, + FLOAT: 1, + UINT8: 2, + INT8: 3, + UINT16: 4, + INT16: 5, + INT32: 6, + INT64: 7, + STRING: 8, + BOOL: 9, + FLOAT16: 10, + DOUBLE: 11, + UINT32: 12, + UINT64: 13, + COMPLEX64: 14, + COMPLEX128: 15, + BFLOAT16: 16, + FLOAT8E4M3FN: 17, + FLOAT8E4M3FNUZ: 18, + FLOAT8E5M2: 19, + FLOAT8E5M2FNUZ: 20 +}; + +$root.onnxruntime.fbs.TensorTypeAndShape = class TensorTypeAndShape { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.TensorTypeAndShape(); + $.elem_type = reader.int32_(position, 4, 0); + $.shape = reader.table(position, 6, $root.onnxruntime.fbs.Shape.decode); + return $; + } +}; + +$root.onnxruntime.fbs.MapType = class MapType { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.MapType(); + $.key_type = reader.int32_(position, 4, 0); + $.value_type = reader.table(position, 6, $root.onnxruntime.fbs.TypeInfo.decode); + return $; + } +}; + +$root.onnxruntime.fbs.SequenceType = class SequenceType { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.SequenceType(); + $.elem_type = reader.table(position, 4, $root.onnxruntime.fbs.TypeInfo.decode); + return $; + } +}; + +$root.onnxruntime.fbs.NodeType = { + Primitive: 0, + Fused: 1 +}; + +$root.onnxruntime.fbs.EdgeEnd = class EdgeEnd { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.EdgeEnd(); + $.node_index = reader.uint32(position + 0); + $.src_arg_index = reader.int32(position + 4); + $.dst_arg_index = reader.int32(position + 8); + return $; + } +}; + +$root.onnxruntime.fbs.NodeEdge = class NodeEdge { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.NodeEdge(); + $.node_index = reader.uint32_(position, 4, 0); + $.input_edges = reader.structArray(position, 6, $root.onnxruntime.fbs.EdgeEnd.decode); + $.output_edges = reader.structArray(position, 8, $root.onnxruntime.fbs.EdgeEnd.decode); + return $; + } +}; + +$root.onnxruntime.fbs.Node = class Node { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Node(); + $.name = reader.string_(position, 4, null); + $.doc_string = reader.string_(position, 6, null); + $.domain = reader.string_(position, 8, null); + $.since_version = reader.int32_(position, 10, 0); + $.index = reader.uint32_(position, 12, 0); + $.op_type = reader.string_(position, 14, null); + $.type = reader.int32_(position, 16, 0); + $.execution_provider_type = reader.string_(position, 18, null); + $.inputs = reader.strings_(position, 20); + $.outputs = reader.strings_(position, 22); + $.attributes = reader.tableArray(position, 24, $root.onnxruntime.fbs.Attribute.decode); + $.input_arg_counts = reader.typedArray(position, 26, Int32Array); + $.implicit_inputs = reader.strings_(position, 28); + return $; + } +}; + +$root.onnxruntime.fbs.ValueInfo = class ValueInfo { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.ValueInfo(); + $.name = reader.string_(position, 4, null); + $.doc_string = reader.string_(position, 6, null); + $.type = reader.table(position, 8, $root.onnxruntime.fbs.TypeInfo.decode); + return $; + } +}; + +$root.onnxruntime.fbs.TypeInfoValue = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.onnxruntime.fbs.TensorTypeAndShape.decode(reader, position); + case 2: return $root.onnxruntime.fbs.SequenceType.decode(reader, position); + case 3: return $root.onnxruntime.fbs.MapType.decode(reader, position); + default: return undefined; + } + } +}; + +$root.onnxruntime.fbs.TypeInfo = class TypeInfo { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.TypeInfo(); + $.denotation = reader.string_(position, 4, null); + $.value = reader.union(position, 6, $root.onnxruntime.fbs.TypeInfoValue.decode); + return $; + } +}; + +$root.onnxruntime.fbs.OperatorSetId = class OperatorSetId { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.OperatorSetId(); + $.domain = reader.string_(position, 4, null); + $.version = reader.int64_(position, 6, 0); + return $; + } +}; + +$root.onnxruntime.fbs.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Tensor(); + $.name = reader.string_(position, 4, null); + $.doc_string = reader.string_(position, 6, null); + $.dims = reader.int64s_(position, 8); + $.data_type = reader.int32_(position, 10, 0); + $.raw_data = reader.typedArray(position, 12, Uint8Array); + $.string_data = reader.strings_(position, 14); + return $; + } +}; + +$root.onnxruntime.fbs.SparseTensor = class SparseTensor { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.SparseTensor(); + $.values = reader.table(position, 4, $root.onnxruntime.fbs.Tensor.decode); + $.indices = reader.table(position, 6, $root.onnxruntime.fbs.Tensor.decode); + $.dims = reader.int64s_(position, 8); + return $; + } +}; + +$root.onnxruntime.fbs.Attribute = class Attribute { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Attribute(); + $.name = reader.string_(position, 4, null); + $.doc_string = reader.string_(position, 6, null); + $.type = reader.int32_(position, 8, 0); + $.f = reader.float32_(position, 10, 0); + $.i = reader.int64_(position, 12, 0); + $.s = reader.string_(position, 14, null); + $.t = reader.table(position, 16, $root.onnxruntime.fbs.Tensor.decode); + $.g = reader.table(position, 18, $root.onnxruntime.fbs.Graph.decode); + $.floats = reader.typedArray(position, 20, Float32Array); + $.ints = reader.int64s_(position, 22); + $.strings = reader.strings_(position, 24); + $.tensors = reader.tableArray(position, 26, $root.onnxruntime.fbs.Tensor.decode); + $.graphs = reader.tableArray(position, 28, $root.onnxruntime.fbs.Graph.decode); + return $; + } +}; + +$root.onnxruntime.fbs.NodesToOptimizeIndices = class NodesToOptimizeIndices { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.NodesToOptimizeIndices(); + $.node_indices = reader.typedArray(position, 4, Uint32Array); + $.num_inputs = reader.uint32_(position, 6, 0); + $.num_outputs = reader.uint32_(position, 8, 0); + $.has_variadic_input = reader.bool_(position, 10, false); + $.has_variadic_output = reader.bool_(position, 12, false); + $.num_variadic_inputs = reader.uint32_(position, 14, 0); + $.num_variadic_outputs = reader.uint32_(position, 16, 0); + return $; + } +}; + +$root.onnxruntime.fbs.DeprecatedNodeIndexAndKernelDefHash = class DeprecatedNodeIndexAndKernelDefHash { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.DeprecatedNodeIndexAndKernelDefHash(); + $.node_index = reader.uint32_(position, 4, 0); + $.kernel_def_hash = reader.uint64_(position, 6, 0); + return $; + } +}; + +$root.onnxruntime.fbs.RuntimeOptimizationRecord = class RuntimeOptimizationRecord { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.RuntimeOptimizationRecord(); + $.action_id = reader.string_(position, 4, null); + $.nodes_to_optimize_indices = reader.table(position, 6, $root.onnxruntime.fbs.NodesToOptimizeIndices.decode); + $.produced_nodes = reader.tableArray(position, 8, $root.onnxruntime.fbs.DeprecatedNodeIndexAndKernelDefHash.decode); + $.produced_op_ids = reader.strings_(position, 10); + return $; + } +}; + +$root.onnxruntime.fbs.RuntimeOptimizationRecordContainerEntry = class RuntimeOptimizationRecordContainerEntry { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.RuntimeOptimizationRecordContainerEntry(); + $.optimizer_name = reader.string_(position, 4, null); + $.runtime_optimization_records = reader.tableArray(position, 6, $root.onnxruntime.fbs.RuntimeOptimizationRecord.decode); + return $; + } +}; + +$root.onnxruntime.fbs.RuntimeOptimizations = class RuntimeOptimizations { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.RuntimeOptimizations(); + $.records = reader.tableArray(position, 4, $root.onnxruntime.fbs.RuntimeOptimizationRecordContainerEntry.decode); + return $; + } +}; + +$root.onnxruntime.fbs.Graph = class Graph { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Graph(); + $.initializers = reader.tableArray(position, 4, $root.onnxruntime.fbs.Tensor.decode); + $.node_args = reader.tableArray(position, 6, $root.onnxruntime.fbs.ValueInfo.decode); + $.nodes = reader.tableArray(position, 8, $root.onnxruntime.fbs.Node.decode); + $.max_node_index = reader.uint32_(position, 10, 0); + $.node_edges = reader.tableArray(position, 12, $root.onnxruntime.fbs.NodeEdge.decode); + $.inputs = reader.strings_(position, 14); + $.outputs = reader.strings_(position, 16); + $.sparse_initializers = reader.tableArray(position, 18, $root.onnxruntime.fbs.SparseTensor.decode); + $.runtime_optimizations = reader.table(position, 20, $root.onnxruntime.fbs.RuntimeOptimizations.decode); + return $; + } +}; + +$root.onnxruntime.fbs.StringStringEntry = class StringStringEntry { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.StringStringEntry(); + $.key = reader.string_(position, 4, null); + $.value = reader.string_(position, 6, null); + return $; + } +}; + +$root.onnxruntime.fbs.Model = class Model { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.Model(); + $.ir_version = reader.int64_(position, 4, 0); + $.opset_import = reader.tableArray(position, 6, $root.onnxruntime.fbs.OperatorSetId.decode); + $.producer_name = reader.string_(position, 8, null); + $.producer_version = reader.string_(position, 10, null); + $.domain = reader.string_(position, 12, null); + $.model_version = reader.int64_(position, 14, 0); + $.doc_string = reader.string_(position, 16, null); + $.graph = reader.table(position, 18, $root.onnxruntime.fbs.Graph.decode); + $.graph_doc_string = reader.string_(position, 20, null); + $.metadata_props = reader.tableArray(position, 22, $root.onnxruntime.fbs.StringStringEntry.decode); + return $; + } +}; + +$root.onnxruntime.fbs.DeprecatedKernelCreateInfos = class DeprecatedKernelCreateInfos { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.DeprecatedKernelCreateInfos(); + $.node_indices = reader.typedArray(position, 4, Uint32Array); + $.kernel_def_hashes = reader.uint64s_(position, 6); + return $; + } +}; + +$root.onnxruntime.fbs.DeprecatedSubGraphSessionState = class DeprecatedSubGraphSessionState { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.DeprecatedSubGraphSessionState(); + $.graph_id = reader.string_(position, 4, null); + $.session_state = reader.table(position, 6, $root.onnxruntime.fbs.DeprecatedSessionState.decode); + return $; + } +}; + +$root.onnxruntime.fbs.DeprecatedSessionState = class DeprecatedSessionState { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.DeprecatedSessionState(); + $.kernels = reader.table(position, 4, $root.onnxruntime.fbs.DeprecatedKernelCreateInfos.decode); + $.sub_graph_session_states = reader.tableArray(position, 6, $root.onnxruntime.fbs.DeprecatedSubGraphSessionState.decode); + return $; + } +}; + +$root.onnxruntime.fbs.ArgType = { + INPUT: 0, + OUTPUT: 1 +}; + +$root.onnxruntime.fbs.ArgTypeAndIndex = class ArgTypeAndIndex { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.ArgTypeAndIndex(); + $.arg_type = reader.int8_(position, 4, 0); + $.index = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.onnxruntime.fbs.KernelTypeStrArgsEntry = class KernelTypeStrArgsEntry { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.KernelTypeStrArgsEntry(); + $.kernel_type_str = reader.string_(position, 4, null); + $.args = reader.tableArray(position, 6, $root.onnxruntime.fbs.ArgTypeAndIndex.decode); + return $; + } +}; + +$root.onnxruntime.fbs.OpIdKernelTypeStrArgsEntry = class OpIdKernelTypeStrArgsEntry { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.OpIdKernelTypeStrArgsEntry(); + $.op_id = reader.string_(position, 4, null); + $.kernel_type_str_args = reader.tableArray(position, 6, $root.onnxruntime.fbs.KernelTypeStrArgsEntry.decode); + return $; + } +}; + +$root.onnxruntime.fbs.KernelTypeStrResolver = class KernelTypeStrResolver { + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.KernelTypeStrResolver(); + $.op_kernel_type_str_args = reader.tableArray(position, 4, $root.onnxruntime.fbs.OpIdKernelTypeStrArgsEntry.decode); + return $; + } +}; + +$root.onnxruntime.fbs.InferenceSession = class InferenceSession { + + static identifier(reader) { + return reader.identifier === 'ORTM'; + } + + static create(reader) { + return $root.onnxruntime.fbs.InferenceSession.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.onnxruntime.fbs.InferenceSession(); + $.ort_version = reader.string_(position, 4, null); + $.model = reader.table(position, 6, $root.onnxruntime.fbs.Model.decode); + $.session_state = reader.table(position, 8, $root.onnxruntime.fbs.DeprecatedSessionState.decode); + $.kernel_type_str_resolver = reader.table(position, 10, $root.onnxruntime.fbs.KernelTypeStrResolver.decode); + return $; + } +}; diff --git a/onnx.js b/onnx.js new file mode 100644 index 00000000000..3db9f558312 --- /dev/null +++ b/onnx.js @@ -0,0 +1,2832 @@ + +import * as protobuf from './protobuf.js'; +import * as flatbuffers from './flatbuffers.js'; +import * as text from './text.js'; + +const onnx = {}; + +onnx.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extensions = [ + 'saved_model.pb', 'predict_net.pb', 'init_net.pb', + 'predict_net.pbtxt', 'init_net.pbtxt', 'predict_net.prototxt', 'init_net.prototxt' + ]; + if (extensions.some((extension) => identifier.endsWith(extension))) { + return undefined; + } + const entries = [ + onnx.OrtReader, + onnx.ProtoReader, + onnx.TextReader, + onnx.JsonReader, + onnx.PickleReader + ]; + for (const entry of entries) { + const reader = entry.open(context); + if (reader) { + return reader; + } + } + return undefined; + } + + async open(context, target) { + await target.read(); + const model = target.model; + const format = target.format; + const metadata = await onnx.Metadata.open(context); + const locations = new Set(); + const location = (tensor) => { + if ((onnx.proto && tensor instanceof onnx.proto.SparseTensorProto) || + (onnx.schema && tensor instanceof onnx.schema.SparseTensor)) { + location(tensor.indices); + location(tensor.indices); + } else if (tensor.data_location === onnx.DataLocation.EXTERNAL && Array.isArray(tensor.external_data)) { + for (const entry of tensor.external_data) { + if (entry.key === 'location') { + locations.add(entry.value); + } + } + } + }; + const queue = [ model.graph ]; + while (queue.length > 0) { + const graph = queue.shift(); + if (Array.isArray(graph.initializer)) { + for (const initializer of graph.initializer) { + location(initializer); + } + } + if (Array.isArray(graph.sparse_initializer)) { + for (const sparse_initializer of graph.sparse_initializer) { + location(sparse_initializer); + } + } + if (Array.isArray(graph.node)) { + for (const node of graph.node) { + if (Array.isArray(node.attribute)) { + for (const attribute of node.attribute) { + if (attribute.g) { + queue.push(attribute.g); + } else if (attribute.t) { + location(attribute.t); + } else if (attribute.sparse_tensor) { + location(attribute.sparse_tensor); + } else if (Array.isArray(attribute.graphs) && attribute.graphs.length > 0) { + for (const graph of attribute.graphs) { + queue.push(graph); + } + } else if (Array.isArray(attribute.tensors) && attribute.tensors.length > 0) { + for (const tensor of attribute.tensors) { + location(tensor); + } + } else if (Array.isArray(attribute.sparse_tensors) && attribute.sparse_tensors.length > 0) { + for (const tensor of attribute.sparse_tensors) { + location(tensor); + } + } + } + } + } + } + } + const weights = new Map(); + const keys = Array.from(locations); + const promises = keys.map((location) => context.fetch(location)); + const streams = await Promise.all(promises.map((promise) => promise.then((context) => context.stream).catch(() => null))); + for (let i = 0; i < keys.length; i++) { + if (streams[i] !== null) { + weights.set(keys[i], streams[i]); + } + } + return new onnx.Model(metadata, format, model, model.graph, weights); + } +}; + +onnx.Model = class { + + constructor(metadata, format, model, graph, locations) { + this._graphs = []; + this._format = format; + this._producer = model.producer_name && model.producer_name.length > 0 ? model.producer_name + (model.producer_version && model.producer_version.length > 0 ? ` ${model.producer_version}` : '') : null; + this._domain = model.domain; + const model_version = model.model_version === undefined || typeof model.model_version === 'number' ? model.model_version : model.model_version.toNumber(); + this._version = model_version ? model_version.toString() : ''; + this._description = model.doc_string; + this._metadata = new Map(); + this._imports = null; + const imports = new Map(); + if (model.opset_import && model.opset_import.length > 0) { + for (const opset_import of model.opset_import) { + const domain = opset_import.domain || 'ai.onnx'; + const version = opset_import.version ? typeof opset_import.version === 'number' ? opset_import.version: opset_import.version.toNumber() : 0; + if (!imports.has(domain) || imports.get(domain) > version) { + imports.set(domain, version); + } + } + this._imports = Array.from(imports).map(([name, version]) => `${name} v${version}`); + } + if (imports.size == 0) { + imports.set('ai.onnx', 1); + imports.set('ai.onnx.ml', 1); + } + let imageFormat = ''; + const metadata_props = model.metadata_props; + if (metadata_props) { + const metadata = new Map(metadata_props.map((entry) => [ entry.key, entry.value ])); + const converted_from = metadata.get('converted_from'); + if (converted_from) { + this._metadata.set('source', converted_from); + } + const author = metadata.get('author'); + if (author) { + this._metadata.set('author', author); + } + const company = metadata.get('company'); + if (company) { + this._metadata.set('company', company); + } + let license = metadata.get('license'); + const license_url = metadata.get('license_url'); + if (license_url) { + license = `${license ? license : license_url}`; + } + if (license) { + this._metadata.set('license', license); + } + metadata.delete('author'); + metadata.delete('company'); + metadata.delete('converted_from'); + metadata.delete('license'); + metadata.delete('license_url'); + const imageMetadata = {}; + for (const [ name, value ] of metadata) { + switch (name) { + case 'Image.BitmapPixelFormat': + case 'Image.ColorSpaceGamma': + case 'Image.NominalPixelRange': + imageMetadata[name] = value; + break; + default: + this._metadata.set(name, value); + break; + } + } + imageFormat = [ imageMetadata['Image.BitmapPixelFormat'], imageMetadata['Image.ColorSpaceGamma'], imageMetadata['Image.NominalPixelRange'] ].filter((item) => item); + } + metadata = new onnx.ModelMetadata(metadata, imports); + const context = new onnx.ModelContext(metadata, locations, imageFormat); + for (const func of model.functions || []) { + context.metadata.add(new onnx.Function(context, func)); + } + this._graphs = [ new onnx.Graph(context, graph) ]; + } + + get format() { + return this._format; + } + + get version() { + return this._version; + } + + get imports() { + return this._imports; + } + + get producer() { + return this._producer; + } + + get domain() { + return this._domain || null; + } + + get description() { + return this._description || null; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +onnx.Graph = class { + + constructor(context, graph) { + this._node = ''; + this._description = ''; + this._nodes = []; + this._inputs = []; + this._outputs = []; + this._name = graph.name || null; + this._description = graph.doc_string || ''; + context = new onnx.GraphContext(context, graph); + if (Array.isArray(graph.quantization_annotation)) { + for (const tensor_annotation of graph.quantization_annotation) { + const tensor = context.tensor(tensor_annotation.tensor_name); + tensor.annotation = new Map(); + for (const entry of tensor_annotation.quant_parameter_tensor_names) { + tensor.annotation.set(entry.key, entry.value); + } + } + } + if (Array.isArray(graph.value_info)) { + for (const valueInfo of graph.value_info) { + const tensor = context.tensor(valueInfo.name); + tensor.type = context.createType(valueInfo.type); + tensor.description = valueInfo.doc_string; + } + } + graph.input = graph.input.map((valueInfo) => { + const tensor = context.tensor(valueInfo.name); + tensor.type = context.createType(valueInfo.type); + tensor.description = valueInfo.doc_string; + return tensor; + }); + graph.output = graph.output.map((valueInfo) => { + const tensor = context.tensor(valueInfo.name); + tensor.type = context.createType(valueInfo.type); + tensor.description = valueInfo.doc_string; + return tensor; + }); + new onnx.Inference(graph.node, graph.output); + context.push(graph.node, graph.input, graph.output); + this._nodes = context.pop(); + for (const input of graph.input) { + const value = context.value(input.name); + if (!value.initializer) { + this._inputs.push(new onnx.Argument(input.name, [ value ])); + } + } + for (const output of graph.output) { + const value = context.value(output.name); + if (!value.initializer) { + this._outputs.push(new onnx.Argument(output.name, [ value ])); + } + } + } + + get name() { + return this._name; + } + + get description() { + return this._description; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + toString() { + return `graph(${this.name})`; + } +}; + +onnx.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +onnx.Value = class { + + constructor(name, type, initializer, annotation, description) { + if (typeof name !== 'string') { + throw new onnx.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + this._description = description || ''; + this._quantization = annotation ? { type: 'annotation', value: annotation } : null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get description() { + return this._description; + } + + get quantization() { + return this._quantization; + } + + get initializer() { + return this._initializer; + } +}; + +onnx.Node = class { + + constructor(context, op_type, domain, name, description, attributes, inputs, outputs) { + attributes = attributes || []; + domain = domain || 'ai.onnx'; + this._type = context.metadata.type(op_type, domain) || { name: op_type, module: domain }; + if (this.type.module !== domain && !(this._type instanceof onnx.Function)) { + this._type = Object.assign({}, this.type); + this._type.name = op_type; + this._type.module = domain; + } + this._name = name || ''; + this._description = description || ''; + this._inputs = inputs; + this._outputs = outputs; + this._attributes = attributes.map((attribute) => new onnx.Attribute(context, op_type, domain, attribute)); + this._chain = []; + const identifier = domain ? `${domain}.${op_type}` : op_type; + if (identifier === 'com.microsoft.FusedConv') { + const activation = attributes.find((attribute) => attribute.name === 'activation'); + if (activation) { + const type = context.decodeText(activation.s); + this._chain.push(new onnx.Node(context, type, '', '', '', [], [], [])); + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get description() { + return this._description; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } +}; + +onnx.Attribute = class { + + constructor(context, op_type, domain, attribute) { + this._name = attribute.name; + this._description = attribute.doc_string || ''; + this._type = null; + this._value = null; + if (attribute.ref_attr_name) { + this._value = attribute.ref_attr_name; + this._type = 'reference'; + return; + } + switch (attribute.type) { + case onnx.AttributeType.UNDEFINED: + break; + case onnx.AttributeType.FLOAT: + this._value = attribute.f; + this._type = 'float32'; + break; + case onnx.AttributeType.INT: + this._value = attribute.i; + this._type = 'int64'; + break; + case onnx.AttributeType.STRING: + switch (op_type) { + case 'Int8GivenTensorFill': + this._value = Array.from(attribute.s); + break; + default: + this._value = context.decodeText(attribute.s); + break; + } + this._type = 'string'; + break; + case onnx.AttributeType.TENSOR: + this._value = new onnx.Tensor(context, attribute.t); + this._type = 'tensor'; + break; + case onnx.AttributeType.GRAPH: + this._value = context.graph(attribute.g); + this._type = 'graph'; + break; + case onnx.AttributeType.FLOATS: + this._value = ArrayBuffer.isView(attribute.floats) ? Array.from(attribute.floats) : attribute.floats; + this._type = 'float32[]'; + break; + case onnx.AttributeType.INTS: + this._value = ArrayBuffer.isView(attribute.ints) ? Array.from(attribute.ints) : attribute.ints; + this._type = 'int64[]'; + break; + case onnx.AttributeType.STRINGS: + this._value = attribute.strings.map((s) => context.decodeText(s)); + this._type = 'string[]'; + break; + case onnx.AttributeType.TENSORS: + this._value = attribute.tensors.map((tensor) => new onnx.Tensor(context, tensor)); + this._type = 'tensor[]'; + break; + case onnx.AttributeType.GRAPHS: + this._value = attribute.graphs.map((graph) => context.graph(graph)); + this._type = 'graph[]'; + break; + case onnx.AttributeType.SPARSE_TENSOR: + this._value = new onnx.Tensor(context, attribute.sparse_tensor); + this._type = 'tensor'; + break; + case onnx.AttributeType.SPARSE_TENSORS: + this._value = attribute.sparse_tensors.map((tensor) => new onnx.Tensor(context, tensor)); + this._type = 'tensor[]'; + break; + case onnx.AttributeType.TYPE_PROTO: + this._value = context.createType(attribute.tp); + this._type = 'type'; + break; + case onnx.AttributeType.TYPE_PROTOS: + this._value = attribute.type_protos.map((type) => context.createType(type)); + this._type = 'type[]'; + break; + default: + throw new onnx.Error(`Unsupported attribute type '${attribute.type}'.`); + } + const metadata = context.metadata.attribute(op_type, domain, attribute.name); + if (metadata) { + if (Object.prototype.hasOwnProperty.call(metadata, 'default') && this._value == metadata.default) { + this._visible = false; + } + if (metadata.type === 'DataType') { + this._type = metadata.type; + this._value = context.createDataType(this._value); + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get description() { + return this._description; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +onnx.Group = class { + + constructor(name, groups) { + this._type = { name: 'Scope' }; + this._name = name; + this._nodes = []; + for (const [key, value] of groups) { + if (key === '') { + for (const node of value) { + this._nodes.push(node); + } + } else { + this._nodes.push(new onnx.Group(name === '' ? key : `${name}/${key}`, value)); + } + } + const set = new Set(); + const inputs = []; + const outputs = []; + for (const node of this._nodes) { + if (node instanceof onnx.Group) { + node.freeze(); + } + for (const parameter of node.outputs) { + for (const value of parameter.value) { + if (!value.initializer) { + outputs.push(value); + set.add(value.name); + } + } + } + } + for (const node of this._nodes) { + for (const parameter of node.inputs) { + for (const value of parameter.value) { + if (!set.has(value.name) && !value.initializer) { + inputs.push(value); + } + } + } + } + this._inputs = [ new onnx.Argument('inputs', inputs) ]; + this._outputs = [ new onnx.Argument('outputs', outputs) ]; + this._attributes = []; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get nodes() { + return this._nodes; + } +}; + +onnx.Tensor = class { + + constructor(context, tensor, category) { + this._category = category || null; + if (tensor.indices && tensor.values) { + this._name = tensor.values.name || ''; + this._type = context.createTensorType(tensor.values.data_type, tensor.dims.map((dim) => dim), 'sparse'); + this._location = context.createLocation(tensor.values.data_location); + this._values = new onnx.Tensor(context, tensor.values); + this._indices = new onnx.Tensor(context, tensor.indices); + } else { + this._name = tensor.name || ''; + this._type = context.createTensorType(tensor.data_type, tensor.dims.map((dim) => dim)); + this._location = context.createLocation(tensor.data_location); + switch (tensor.data_location) { + case onnx.DataLocation.DEFAULT: { + switch (tensor.data_type) { + case onnx.DataType.UNDEFINED: { + break; + } + case onnx.DataType.FLOAT: + this._data = new Float32Array(tensor.float_data); + this._encoding = '|'; + break; + case onnx.DataType.DOUBLE: + this._data = new Float64Array(tensor.double_data); + this._encoding = '|'; + break; + case onnx.DataType.BOOL: + if (tensor.int32_data && tensor.int32_data.length > 0) { + const array = tensor.int32_data; + this._data = new Array(array.length); + for (let i = 0; i < this._data.length; i++) { + this._data[i] = array[i] === 0 ? false : true; + } + this._encoding = '|'; + } + break; + case onnx.DataType.INT8: + this._data = new Int8Array(tensor.int32_data); + this._encoding = '|'; + break; + case onnx.DataType.UINT8: + this._data = new Uint8Array(tensor.int32_data); + this._encoding = '|'; + break; + case onnx.DataType.INT16: + this._data = new Int32Array(tensor.int32_data); + this._encoding = '|'; + break; + case onnx.DataType.UINT16: + this._data = new Int32Array(tensor.int32_data); + this._encoding = '|'; + break; + case onnx.DataType.INT32: + this._data = new Int32Array(tensor.int32_data); + this._encoding = '|'; + break; + case onnx.DataType.UINT32: + case onnx.DataType.UINT64: + this._data = tensor.uint64_data; + this._encoding = '|'; + break; + case onnx.DataType.INT64: + this._data = tensor.int64_data; + this._encoding = '|'; + break; + case onnx.DataType.STRING: + this._data = tensor.string_data; + this._encoding = '|'; + break; + case onnx.DataType.COMPLEX64: + case onnx.DataType.COMPLEX128: + break; + case onnx.DataType.FLOAT16: + case onnx.DataType.BFLOAT16: + if (tensor.int32_data && tensor.int32_data.length > 0) { + const array = tensor.int32_data; + const buffer = new Uint8Array(array.length << 1); + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + for (let i = 0; i < array.length; i++) { + view.setUint16(i << 1, array[i], true); + } + this._data = buffer; + this._encoding = '<'; + } + break; + case onnx.DataType.FLOAT8E4M3FN: + case onnx.DataType.FLOAT8E4M3FNUZ: + case onnx.DataType.FLOAT8E5M2: + case onnx.DataType.FLOAT8E5M2FNUZ: + if (tensor.int32_data && tensor.int32_data.length > 0) { + this._data = new Uint8Array(Array.from(tensor.int32_data)); + this._encoding = '<'; + } + break; + case onnx.DataType.UINT4: + case onnx.DataType.INT4: + if (tensor.int32_data && tensor.int32_data.length > 0) { + this._data = new Uint8Array(Array.from(tensor.int32_data)); + this._encoding = '<'; + } + break; + default: + throw new onnx.Error(`Unsupported tensor data type '${tensor.data_type}'.`); + } + if (this._data && (Array.isArray(this._data) || ArrayBuffer.isView(this._data)) && this._data.length === 0) { + this._data = undefined; + } + if (!this._data && tensor.raw_data && tensor.raw_data.length > 0) { + this._data = tensor.raw_data; + this._encoding = '<'; + } + break; + } + case onnx.DataLocation.EXTERNAL: { + if (Array.isArray(tensor.external_data)) { + const external_data = {}; + for (const entry of tensor.external_data) { + external_data[entry.key] = entry.value; + } + if (external_data.location && external_data.offset && external_data.length) { + const offset = parseInt(external_data.offset, 10); + const length = parseInt(external_data.length, 10); + if (Number.isInteger(offset) && Number.isInteger(length)) { + this._data = context.location(external_data.location, offset, length); + this._encoding = '<'; + } + } + } + break; + } + default: { + break; + } + } + } + } + + get name() { + return this._name; + } + + get category() { + return this._category; + } + + get encoding() { + return this._encoding; + } + + get type() { + return this._type; + } + + get indices() { + return this._indices; + } + + get values() { + switch (this.type.layout) { + case 'sparse': { + return this._values; + } + default: { + if (!this._data || this._data instanceof Uint8Array) { + return this._data; + } + if (Array.isArray(this._data) || ArrayBuffer.isView(this._data)) { + return this._data; + } + return this._data.peek(); + } + } + } +}; + +onnx.TensorType = class { + + constructor(dataType, shape, layout, denotation) { + this._dataType = dataType; + this._shape = shape; + this._layout = layout || null; + this._denotation = denotation || null; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + get layout() { + return this._layout; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +onnx.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.map((dim) => dim ? dim.toString() : '?').join(',')}]`; + } +}; + +onnx.SequenceType = class { + + constructor(elementType, denotation) { + this._elementType = elementType; + this._denotation = denotation; + } + + get elementType() { + return this._elementType; + } + + get dennotation() { + return this._dennotation; + } + + toString() { + const elementType = this._elementType ? this._elementType.toString() : ''; + return `sequence<${elementType}>`; + } +}; + +onnx.MapType = class { + + constructor(keyType, valueType, denotation) { + this._keyType = keyType; + this._valueType = valueType; + this._denotation = denotation; + } + + get keyType() { + return this._keyType; + } + + get valueType() { + return this._valueType; + } + + get denotation() { + return this._denotation; + } + + toString() { + return `map<${this._keyType},${this._valueType}>`; + } +}; + +onnx.OpaqueType = class { + + constructor(domain, name) { + this._domain = domain; + this._name = name; + } + + toString() { + const name = (this._domain ? (`${this._domain}.`) : '') + this._name; + return `opaque<${name}>`; + } +}; + +onnx.OptionalType = class { + + constructor(type) { + this._type = type; + } + + get type() { + return this._type; + } + + toString() { + return `optional<${this._type}>`; + } +}; + +onnx.Function = class { + + constructor(context, func) { + this._name = func.name; + this._domain = func.domain; + this._description = func.doc_string; + this._inputs = []; + this._outputs = []; + this._attributes = func.attribute.map((attribtue) => { + return { name: attribtue }; + }); + context = new onnx.GraphContext(context, func); + func.input = func.input.map((input) => context.tensor(input)); + func.output = func.output.map((output) => context.tensor(output)); + context.push(func.node, func.input, func.output); + this._nodes = context.pop(); + for (const input of func.input) { + const value = context.value(input.name); + if (!value.initializer) { + this._inputs.push(new onnx.Argument(input.name, [ value ])); + } + } + for (const output of func.output) { + const value = context.value(output.name); + if (!value.initializer) { + this._outputs.push(new onnx.Argument(output.name, [ value ])); + } + } + } + + get type() { + return 'function'; + } + + get name() { + return this._name; + } + + get module() { + return this._domain; + } + + get description() { + return this._description; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get nodes() { + return this._nodes; + } +}; + +onnx.ModelMetadata = class { + + constructor(metadata, imports) { + this._metadata = metadata; + this._imports = imports; + this._cache = new Map(); + this._attributes = new Map(); + this._functions = new Map(); + } + + add(func) { + if (!this._functions.has(func.module)) { + this._functions.set(func.module, new Map()); + } + const map = this._functions.get(func.module); + if (map.has(func.name)) { + throw new onnx.Error(`Duplicate function identifier '${func.module}.${func.name}'.`); + } + map.set(func.name, func); + } + + type(name, domain) { + const key = `${domain}:${name}`; + if (!this._cache.has(key)) { + let value = this._metadata.type(name, domain, this._imports); + if (!value) { + if (this._functions.has(domain)) { + const map = this._functions.get(domain); + if (map.has(name)) { + value = map.get(name); + } + } + } + this._cache.set(key, value); + } + return this._cache.get(key); + } + + attribute(type, domain, name) { + const key = `${domain}:${type}:${name}`; + if (!this._attributes.has(key)) { + this._attributes.set(key, null); + const metadata = this.type(type, domain); + if (metadata && Array.isArray(metadata.attributes) && metadata.attributes.length > 0) { + for (const attribute of metadata.attributes) { + const key = `${domain}:${type}:${attribute.name}`; + this._attributes.set(key, attribute); + } + } + } + return this._attributes.get(key); + } +}; + +onnx.Metadata = class { + + static async open(context) { + if (onnx.Metadata._metadata) { + return onnx.Metadata._metadata; + } + try { + const data = await context.request('onnx-metadata.json'); + onnx.Metadata._metadata = new onnx.Metadata(data); + return onnx.Metadata._metadata; + } catch (error) { + onnx.Metadata._metadata = new onnx.Metadata(null); + return onnx.Metadata._metadata; + } + } + + constructor(data) { + this._types = new Map(); + if (data) { + const types = JSON.parse(data); + for (const type of types) { + if (!this._types.has(type.module)) { + this._types.set(type.module, new Map()); + } + const types = this._types.get(type.module); + if (!types.has(type.name)) { + types.set(type.name, []); + } + types.get(type.name).push(type); + } + } + } + + type(name, domain, imports) { + domain = domain || 'ai.onnx'; + let current = null; + if (this._types.has(domain)) { + const types = this._types.get(domain); + if (types.has(name)) { + for (const type of types.get(name)) { + const matchVersion = current ? current.version : -1; + const importVersion = imports.get(type.module) || 0; + if (importVersion >= type.version && matchVersion < type.version) { + current = type; + } + } + } + } + return current; + } +}; + +onnx.Inference = class { + + constructor(nodes, outputs) { + this._outputs = new Map(); + + for (const node of nodes) { + for (const output of node.output) { + this._outputs.set(output.name, node); + } + } + + for (const output of outputs) { + this._infer(output.name); + } + } + + _infer(output) { + if (this._outputs.has(output)) { + let hasInputShapes = true; + const node = this._outputs.get(output); + for (const input of node.input) { + if (!input.type) { + this._infer(input); + if (!input.type) { + hasInputShapes = false; + break; + } + } + } + if (hasInputShapes) { + // continue + } + } + } +}; + +onnx.DataLocation = { + DEFAULT: 0, + EXTERNAL: 1 +}; + +onnx.DataType = { + UNDEFINED: 0, + FLOAT: 1, + UINT8: 2, + INT8: 3, + UINT16: 4, + INT16: 5, + INT32: 6, + INT64: 7, + STRING: 8, + BOOL: 9, + FLOAT16: 10, + DOUBLE: 11, + UINT32: 12, + UINT64: 13, + COMPLEX64: 14, + COMPLEX128: 15, + BFLOAT16: 16, + FLOAT8E4M3FN: 17, + FLOAT8E4M3FNUZ: 18, + FLOAT8E5M2: 19, + FLOAT8E5M2FNUZ: 20, + UINT4: 21, + INT4: 22 +}; + +onnx.AttributeType = { + UNDEFINED: 0, + FLOAT: 1, + INT: 2, + STRING: 3, + TENSOR: 4, + GRAPH: 5, + FLOATS: 6, + INTS: 7, + STRINGS: 8, + TENSORS: 9, + GRAPHS: 10, + SPARSE_TENSOR: 11, + SPARSE_TENSORS: 12, + TYPE_PROTO: 13, + TYPE_PROTOS: 14 +}; + +onnx.ModelContext = class { + + constructor(metadata, locations, imageFormat) { + this._metadata = metadata; + this._locations = locations; + this._imageFormat = imageFormat; + } + + get metadata() { + return this._metadata; + } + + get imageFormat() { + return this._imageFormat; + } + + location(name, offset, length) { + if (this._locations.has(name)) { + const stream = this._locations.get(name); + if (offset >= 0 && (offset + length) <= stream.length) { + try { + const position = stream.position; + stream.seek(offset); + const value = stream.stream(length); + stream.seek(position); + return value; + } catch (error) { + // continue regardless of error + } + } + } + return null; + } + + initializer(/* name */) { + return null; + } +}; + +onnx.GraphContext = class { + + constructor(context, graph) { + this._context = context; + this._dataTypes = new Map(Object.entries(onnx.DataType).map(([name, value]) => [ value, name.toLowerCase() ])); + this._dataTypes.set(onnx.DataType.UNDEFINED, 'undefined'); + this._dataTypes.set(onnx.DataType.BOOL, 'boolean'); + this._dataTypes.set(onnx.DataType.FLOAT, 'float32'); + this._dataTypes.set(onnx.DataType.DOUBLE, 'float64'); + this._graphs = new Map(); + this._initializers = new Map(); + this._tensors = new Map(); + this._values = new Map(); + this._groups = new Map(); + this._nodes = []; + if (Array.isArray(graph.initializer)) { + for (const initializer of graph.initializer) { + const tensor = new onnx.Tensor(this, initializer, 'Initializer'); + this._initializers.set(initializer.name, tensor); + } + } + if (Array.isArray(graph.sparse_initializer)) { + for (const sparse_initializer of graph.sparse_initializer) { + const tensor = new onnx.Tensor(this, sparse_initializer, 'Initializer'); + this._initializers.set(sparse_initializer.values.name, tensor); + } + } + for (const node of graph.node) { + node.input = node.input.map((name) => this.tensor(name)); + node.output = node.output.map((name) => this.tensor(name)); + node.param = {}; + if (Array.isArray(node.attribute)) { + for (const attribute of node.attribute) { + if (attribute.type) { + continue; + } + if (Array.isArray(attribute.ints) && attribute.ints.length > 0) { + attribute.type = onnx.AttributeType.INTS; + } else if (Array.isArray(attribute.floats) && attribute.floats.length > 0) { + attribute.type = onnx.AttributeType.FLOATS; + } else if (Array.isArray(attribute.strings) && attribute.strings.length > 0) { + attribute.type = onnx.AttributeType.STRINGS; + } else if (Array.isArray(attribute.graphs) && attribute.graphs.length > 0) { + attribute.type = onnx.AttributeType.GRAPHS; + } else if (Array.isArray(attribute.s) && attribute.s.length > 0) { + attribute.type = onnx.AttributeType.STRING; + } else if (attribute.f !== undefined) { + attribute.type = onnx.AttributeType.FLOAT; + } else if (attribute.i !== undefined) { + attribute.type = onnx.AttributeType.INT; + } else if (attribute.t !== undefined) { + attribute.type = onnx.AttributeType.TENSOR; + } else if (attribute.g !== undefined) { + attribute.type = onnx.AttributeType.GRAPH; + } else if (attribute.sparse_tensor !== undefined) { + attribute.type =onnx.AttributeType.SPARSE_TENSOR; + } else { + attribute.type = onnx.AttributeType.UNDEFINED; + } + } + } + } + } + + get metadata() { + return this._context.metadata; + } + + graph(value) { + if (!this._graphs.has(value)) { + this._graphs.set(value, new onnx.Graph(this, value)); + } + return this._graphs.get(value); + } + + initializer(name) { + if (this._initializers.has(name)) { + return this._initializers.get(name); + } + return this._context.initializer(name); + } + + tensor(name) { + if (!this._tensors.has(name)) { + this._tensors.set(name, { name: name, initializer: this.initializer(name) }); + } + return this._tensors.get(name); + } + + location(name, offset, length) { + return this._context.location(name, offset, length); + } + + group(name) { + if (!this._groups.has(name)) { + const path = name.split('/'); + if (path.length > 1) { + path.pop(); + return this.group(path.join('/')); + } + this._groups.set(name, new Map([ [ '', [] ]])); + } + return this._groups.get(name); + } + + value(name) { + if (!this._values.has(name)) { + const tensor = this.tensor(name); + const type = tensor.initializer ? tensor.initializer.type : tensor.type || null; + this._values.set(name, new onnx.Value(name, type, tensor.initializer, tensor.annotation, tensor.description)); + } + return this._values.get(name); + } + + createType(type) { + if (!type) { + return null; + } + let denotation = ''; + switch (type.denotation) { + case undefined: + case null: + case '': + break; + case 'TENSOR': + denotation = 'Tensor'; + break; + case 'IMAGE': + denotation = `Image${this._context.imageFormat ? `(${this._context.imageFormat.join(',')})` : ''}`; + break; + case 'AUDIO': + denotation = 'Audio'; + break; + case 'TEXT': + denotation = 'Text'; + break; + default: + throw new onnx.Error(`Unsupported tensor type denotation '${type.denotation}'.`); + } + if (type.tensor_type) { + const tensor_type = type.tensor_type; + const shape = tensor_type.shape && tensor_type.shape.dim ? tensor_type.shape.dim.map((dim) => dim.dim_param ? dim.dim_param : dim.dim_value ? dim.dim_value : null) : []; + return this.createTensorType(tensor_type.elem_type, shape, null, denotation); + } else if (type.sparse_tensor_type) { + type = type.sparse_tensor_type; + const shape = type.shape && type.shape.dim ? type.shape.dim.map((dim) => dim.dim_param ? dim.dim_param : dim.dim_value ? dim.dim_value : null) : []; + return this.createTensorType(type.elem_type, shape, 'sparse', denotation); + } else if (type.map_type) { + return this.createMapType(type.map_type.key_type, this.createType(type.map_type.value_type), denotation); + } else if (type.sequence_type) { + return new onnx.SequenceType(this.createType(type.sequence_type.elem_type), denotation); + } else if (type.opaque_type) { + return new onnx.OpaqueType(type.opaque_type.domain, type.opaque_type.name); + } else if (type.optional_type) { + return new onnx.OptionalType(this.createType(type.optional_type.elem_type), denotation); + } else if (Object.keys(type).length == 0) { + return null; + } + throw new onnx.Error(`Unsupported tensor type '${JSON.stringify(type)}'.`); + } + + createTensorType(dataType, shape, layout, denotation) { + dataType = this.createDataType(dataType); + return new onnx.TensorType(dataType, new onnx.TensorShape(shape), layout, denotation); + } + + createMapType(keyType, valueType, denotation) { + keyType = this.createDataType(keyType); + return new onnx.MapType(keyType, valueType, denotation); + } + + createDataType(value) { + if (!Number.isInteger(value)) { + if (value && value.toNumber) { + value = value.toNumber(); + } else if (value && typeof value === 'string' && onnx.DataType[value.toUpperCase()] !== undefined) { + value = onnx.DataType[value.toUpperCase()]; + } else { + throw new onnx.Error(`Unsupported data type '${JSON.stringify(value)}'.`); + } + } + if (this._dataTypes.has(value)) { + return this._dataTypes.get(value); + } + throw new onnx.Error(`Unsupported data type '${JSON.stringify(value)}'.`); + } + + createLocation(value) { + switch (value) { + case onnx.DataLocation.DEFAULT: return 'default'; + case onnx.DataLocation.EXTERNAL: return 'external'; + default: return 'UNDEFINED'; + } + } + + decodeText(value) { + if (typeof value === 'string') { + return value; + } + this._decoder = this._decoder || new TextDecoder('utf-8'); + return this._decoder.decode(value); + } + + push(nodes, inputs, outputs) { + const inputMap = new Map(); + const outputMap = new Map(); + for (const node of nodes) { + node.input.every((input) => inputMap.set(input.name, (inputMap.get(input) || 0) + 1)); + node.output.every((output) => outputMap.set(output.name, (outputMap.get(output) || 0) + 1)); + } + inputs.every((input) => inputMap.delete(input.name)); + outputs.every((output) => outputMap.delete(output.name)); + nodes = nodes.filter((node) => { + const constant = node && + node.op_type === 'Constant' && + node.attribute.length === 1 && node.attribute[0] && + node.input.length === 0 && + node.output.length === 1 && node.output[0] && inputMap.get(node.output[0].name) === 1 && outputMap.get(node.output[0].name) === 1; + const attribute = constant ? node.attribute[0] : null; + if (attribute && attribute.name === 'value' && attribute.type === onnx.AttributeType.TENSOR && attribute.t) { + const tensor = this.tensor(node.output[0].name); + tensor.initializer = new onnx.Tensor(this, attribute.t, 'Constant'); + return false; + } else if (attribute && attribute.name === 'sparse_value' && attribute.type === onnx.AttributeType.SPARSE_TENSOR && attribute.sparse_tensor) { + const tensor = this.tensor(node.output[0].name); + tensor.initializer = new onnx.Tensor(this, attribute.sparse_tensor, 'Constant'); + return false; + } + return true; + }); + for (let node of nodes) { + const domain = node.domain || 'ai.onnx'; + const type = this._context.metadata.type(node.op_type, domain); + const inputs = []; + node.input = node.input || []; + for (let i = 0; i < node.input.length;) { + const input = type && type.inputs && i < type.inputs.length ? type.inputs[i] : { name: i.toString() }; + const count = input.list ? node.input.length - i : 1; + const list = node.input.slice(i, i + count).filter((value) => value.name !== '' || value.initializer); + const values = list.map((input) => this.value(input.name)); + const argument = new onnx.Argument(input.name, values); + inputs.push(argument); + i += count; + } + const outputs = []; + node.output = node.output || []; + for (let i = 0; i < node.output.length;) { + const output = type && type.outputs && i < type.outputs.length ? type.outputs[i] : { name: i.toString() }; + const count = output.list ? node.output.length - i : 1; + const list = node.output.slice(i, i + count).filter((value) => value.name !== '' || value.initializer); + const values = list.map((output) => this.value(output.name)); + const argument = new onnx.Argument(output.name, values); + outputs.push(argument); + i += count; + } + node = new onnx.Node(this, node.op_type, node.domain, node.name, node.doc_string, node.attribute, inputs, outputs); + this._nodes.push(node); + + // const path = (node.name || '').split('/'); + // path.pop(); + // this.group(path.join('/')).get('').push(node); + } + } + + pop() { + /* + const nodes = []; + for (const [name, value] of this._groups) { + if (name === '') { + for (const node of value.get('')) { + nodes.push(node); + } + continue; + } + nodes.push(new onnx.Group(name, value)); + } + return nodes; + */ + return this._nodes; + } +}; + +onnx.ProtoReader = class { + + static open(context) { + const binaryTags = context.tags('pb'); + if (binaryTags.size > 0) { + const tags = binaryTags; + if (tags.size === 1 && tags.get(1) === 2) { + const tags = context.tags('pb+'); + const match = (tags, schema) => { + for (const [key, inner] of schema) { + const value = tags[key]; + if (value === undefined) { + continue; + } + if (inner === false) { + return false; + } + if (Array.isArray(inner)) { + if (typeof value !== 'object' || !match(value, inner)) { + return false; + } + } else if (inner !== value) { + if (inner === 2 && !Array.isArray(value) && Object(value) === (value) && Object.keys(value).length === 0) { + return true; + } + return false; + } + } + return true; + }; + // mediapipe.BoxDetectorIndex + if (match(tags, [[1,[[1,[[1,[[1,5],[2,5],[3,5],[4,5],[6,0],[7,5],[8,5],[10,5],[11,0],[12,0]]],[2,5],[3,[]]]],[2,false],[3,false],[4,false],[5,false]]],[2,false],[3,false]])) { + return undefined; + } + // third_party.tensorflow.python.keras.protobuf.SavedMetadata + if (match(tags, [[1,[[1,[[1,0],[2,0]]],[2,0],[3,2],[4,2],[5,2]]]])) { + return undefined; + } + } + if (Array.from(tags.keys()).every((tag) => tag <= 100) && + Array.from(tags.values()).every((type) => type < 5)) { + // TensorProto + if (tags.get(1) === 0 && tags.get(2) === 0) { + const schema = [[1,0],[2,0],[4,2],[5,2],[7,2],[8,2],[9,2]]; + if (schema.every(([key, value]) => !tags.has(key) || tags.get(key) === value)) { + return new onnx.ProtoReader(context, 'binary', 'tensor'); + } + } + // GraphProto + if (tags.get(1) === 2) { + const schema = [[1,2],[2,2],[3,2],[4,2],[5,2],[6,0],[7,0],[8,2],[9,2],[10,2],[11,2],[12,2],[13,2],[14,2]]; + if (schema.every(([key, value]) => !tags.has(key) || tags.get(key) === value)) { + const decode = (buffer, value) => { + const reader = protobuf.BinaryReader.open(buffer); + const length = reader.length; + while (reader.position < length) { + const tag = reader.uint32(); + const number = tag >>> 3; + const type = tag & 7; + if (value === number) { + return type === 2 ? reader.bytes() : null; + } + reader.skipType(type); + } + return null; + }; + const stream = context.stream; + const buffer = stream.peek(); + const nodeBuffer = decode(buffer, 1); + if (nodeBuffer) { + const nameBuffer = decode(nodeBuffer, 4); + if (nameBuffer && nameBuffer.every((c) => c > 0x20 && c < 0x7f)) { + return new onnx.ProtoReader(context, 'binary', 'graph'); + } + } + } + } + // ModelProto + if (tags.get(7) === 2) { + const schema = [[1,0],[2,2],[3,2],[4,2],[5,0],[6,2],[7,2],[8,2],[14,2],[20,2]]; + if (schema.every(([key, value]) => !tags.has(key) || tags.get(key) === value)) { + return new onnx.ProtoReader(context, 'binary', 'model'); + } + } + } + } + const stream = context.stream; + if (stream && stream.length > 5) { + const buffer = stream.peek(Math.min(stream.length, 32)); + if (buffer[0] === 0x08 && buffer[1] < 0x0A && buffer[2] === 0x12) { + const producers = [ + 'backend-test', 'BrainwaveCompiler', + 'CNTK', 'customvision', + 'keras2onnx', 'Kneron', 'kneron_formatter', 'kneron_kl530_test_case', + 'darknet to ONNX example', + 'htshinichi', + 'MATLAB Deep Learning Toolbox Converter for ONNX Model Format', 'ML.NET', 'MVTec Software', + 'onnx-caffe2', 'onnx-example', 'onnx.quantize', 'onnx.utils.extract_model', 'OnnxMLTools', 'onnx_test', 'onnxruntime-tools', 'onnxruntime.transformers', + 'PaddlePaddle', 'pytorch', + 'sclblonnx', 'skl2onnx', + 'Tencent YouTu', 'tf2onnx', 'tflite2onnx', + 'WinMLTools' + ]; + if (producers.some((producer) => Array.from(producer).every((ch, index) => index + 4 < buffer.length && ch.charCodeAt(0) === buffer[index + 4]))) { + return new onnx.ProtoReader(context, 'binary', 'model'); + } + } + } + if (stream && stream.length > 8) { + const buffer = stream.peek(4); + const length = buffer[0] | (buffer[1] << 8) | (buffer[2] << 16) | (buffer[3] << 24); + if (length === stream.length - 4) { + stream.seek(4); + try { + const reader = protobuf.BinaryReader.open(stream); + const tags = reader.signature(); + if (tags.get(7) === 2) { + stream.seek(4); + return new onnx.ProtoReader(context, 'binary', 'model'); + } + } catch (error) { + // continue regardless of error + } + } + } + const textTags = context.tags('pbtxt'); + if (textTags.size > 0) { + const tags = textTags; + if (tags.has('ir_version')) { + return new onnx.ProtoReader(context, 'text', 'model'); + } + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (tags.has('graph') && extension !== 'model') { + return new onnx.ProtoReader(context, 'text', 'model'); + } + } + return undefined; + } + + constructor(context, encoding, type) { + this._context = context; + this._encoding = encoding; + this._type = type; + } + + async read() { + await this._context.require('./onnx-proto'); + onnx.proto = protobuf.get('onnx').onnx; + const stream = this._context.stream; + switch (this._encoding) { + case 'text': { + try { + const reader = protobuf.TextReader.open(stream); + this.model = onnx.proto.ModelProto.decodeText(reader); + this.format = `ONNX${this.model.ir_version ? ` v${this.model.ir_version}` : ''}`; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File text format is not onnx.ModelProto (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'binary': { + switch (this._type) { + case 'tensor': { + // TensorProto + // input_0.pb, output_0.pb + try { + const reader = protobuf.BinaryReader.open(stream); + const tensor = onnx.proto.TensorProto.decode(reader); + tensor.name = tensor.name || this._context.identifier; + const attribute = new onnx.proto.AttributeProto(); + attribute.name = 'value'; + attribute.type = onnx.AttributeType.TENSOR; + attribute.t = tensor; + const node = new onnx.proto.NodeProto(); + node.op_type = 'Constant'; + node.attribute = [ attribute ]; + const graph = new onnx.proto.GraphProto(); + graph.node = [ node ]; + this.model = new onnx.proto.ModelProto(); + this.model.graph = graph; + this.format = 'ONNX Tensor'; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File format is not onnx.TensorProto (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'graph': { + // GraphProto + try { + const reader = protobuf.BinaryReader.open(stream); + this.model = new onnx.proto.ModelProto(); + this.model.graph = onnx.proto.GraphProto.decode(reader); + this.format = 'ONNX'; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File format is not onnx.GraphProto (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'model': { + // ModelProto + try { + const reader = protobuf.BinaryReader.open(stream); + this.model = onnx.proto.ModelProto.decode(reader); + this.format = `ONNX${this.model.ir_version ? ` v${this.model.ir_version}` : ''}`; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File format is not onnx.ModelProto (${message.replace(/\.$/, '')}).`); + } + break; + } + default: { + throw new onnx.Error('Unsupported ONNX format type.'); + } + } + break; + } + default: { + throw new onnx.Error('Unsupported ONNX format encoding.'); + } + } + } +}; + +onnx.OrtReader = class { + + static open(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + if (stream && stream.length >= 8) { + const buffer = stream.peek(Math.min(32, stream.length)); + const reader = flatbuffers.BinaryReader.open(buffer); + const identifier = reader.identifier; + if (identifier === 'ORTM') { + return new onnx.OrtReader(context); + } + if (extension === 'ort') { + const signature = [ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]; + if (signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return new onnx.OrtReader(context); + } + } + } + return null; + } + + constructor(context) { + this._context = context; + } + + async read() { + await this._context.require('./onnx-schema'); + onnx.schema = flatbuffers.get('ort').onnxruntime.fbs; + try { + const stream = this._context.stream; + this._graphs = new Set(); + const reader = flatbuffers.BinaryReader.open(stream); + const session = onnx.schema.InferenceSession.create(reader); + this.model = session.model; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File format is not ort.Model (${message.replace(/\.$/, '')}).`); + } + const tensor_shape = (value) => { + if (value && value.dim && Array.isArray(value.dim)) { + for (const dimension of value.dim) { + switch (dimension.value.dim_type) { + case 0: + return {}; + case 1: + dimension.dim_value = dimension.value.dim_value; + delete dimension.value; + break; + case 2: + dimension.dim_param = dimension.value.dim_param; + delete dimension.value.dim_param; + break; + default: + throw new onnx.Error(`Unknown shape dimension '${JSON.stringify(dimension.value)}'.`); + } + } + } + return value; + }; + /* eslint-disable no-use-before-define */ + const node = (value) => { + value.input = value.inputs; + value.output = value.outputs; + value.attribute = value.attributes.map((attribute) => { + const type = attribute.type; + if (type === onnx.AttributeType.GRAPH) { + graph(attribute.g); + } else if (type === onnx.AttributeType.GRAPHS) { + for (const graph of attribute.graphs) { + graph(graph); + } + } else if (type === onnx.AttributeType.TYPE_PROTO) { + attribute.tp = type(attribute.tp); + } else if (type === onnx.AttributeType.TYPE_PROTOS) { + attribute.type_protos = attribute.type_protos.map((type) => type(type)); + } + return attribute; + }); + delete value.inputs; + delete value.outputs; + delete value.attributes; + return value; + }; + const tensor_type = (value) => { + value.shape = tensor_shape(value.shape); + return value; + }; + const sequence_type = (value) => { + value.shape = type(value.elem_type); + return value; + }; + const map_type = (value) => { + value.value_type = type(value.value_type); + return value; + }; + /* eslint-enable no-use-before-define */ + const type = (value) => { + if (value) { + const type = value.value; + if (type && type instanceof onnx.schema.TensorTypeAndShape) { + value.tensor_type = tensor_type(type); + return value; + } + if (type && type instanceof onnx.schema.SequenceType) { + value.sequence_type = sequence_type(type); + return value; + } + if (type && type instanceof onnx.schema.MapType) { + value.map_type = map_type(type); + return value; + } + throw new onnx.Error(`Unsupported type value '${JSON.stringify(value.value)}`); + } + return null; + }; + const graph = (value) => { + if (this._graphs.has(value)) { + return; + } + this._graphs.add(value); + value.name = this._graphs.size.toString(); + value.node = value.nodes.map((value) => node(value)); + delete value.nodes; + value.value_info = value.node_args.map((valueInfo) => { + return { + name: valueInfo.name, + doc_string: valueInfo.doc_string, + type: type(valueInfo.type) + }; + }); + delete value.node_args; + const value_info = new Map(value.value_info.map((entry) => [ entry.name, entry ])); + value.input = value.inputs.map((input) => { + return value_info.has(input) ? value_info.get(input) : { name: input }; + }); + delete value.inputs; + value.output = value.outputs.map((output) => { + return value_info.has(output) ? value_info.get(output) : { name: output }; + }); + delete value.outputs; + value.initializer = value.initializers.map((tensor) => { + tensor.data_location = onnx.DataLocation.DEFAULT; + return tensor; + }); + delete value.initializers; + value.sparse_initializer = value.sparse_initializers.map((tensor) => { + tensor.values.data_location = onnx.DataLocation.DEFAULT; + tensor.indices.data_location = onnx.DataLocation.DEFAULT; + return tensor; + }); + delete value.sparse_initializers; + }; + graph(this.model.graph); + this.model.graph.doc_string = this.model.graph_doc_string; + delete this.model.graph_doc_string; + this.format = `ONNX Runtime${this.model.ir_version ? ` v${this.model.ir_version}` : ''}`; + } +}; + +onnx.JsonReader = class { + + static open(context) { + const obj = context.peek('json'); + if (obj && (obj.irVersion !== undefined || (obj.graph && Array.isArray(obj.graph.node)))) { + return new onnx.JsonReader(obj); + } + return null; + } + + constructor(obj) { + this.model = obj; + this._attributeTypes = new Map(Object.entries(onnx.AttributeType)); + } + + async read() { + const tensor_shape = (value) => { + if (Array.isArray(value.dim)) { + for (const dimension of value.dim) { + if (dimension.dimValue !== undefined) { + dimension.dim_value = parseInt(dimension.dimValue, 10); + delete dimension.dimValue; + } else if (dimension.dimParam !== undefined) { + dimension.dim_param = dimension.dimParam; + delete dimension.dimParam; + } + } + } + return value; + }; + const tensor_type = (value) => { + value.elem_type = value.elemType; + delete value.elemType; + if (value.shape) { + value.shape = tensor_shape(value.shape); + } + return value; + }; + /* eslint-disable no-use-before-define */ + const optional_type = (value) => { + value.elem_type = type(value.elemType); + delete value.elemType; + return value; + }; + const sequence_type = (value) => { + value.elem_type = type(value.elemType); + delete value.elemType; + return value; + }; + const map_type = (value) => { + value.key_type = value.keyType; + delete value.keyType; + value.value_type = type(value.valueType); + delete value.valueType; + return value; + }; + const sparse_tensor_type = (value) => { + value.elem_type = value.elemType; + delete value.elemType; + if (value.shape) { + value.shape = tensor_shape(value.shape); + } + return value; + }; + const type = (value) => { + if (value.tensorType) { + value.tensor_type = tensor_type(value.tensorType); + delete value.tensorType; + } else if (value.sequenceType) { + value.sequence_type = sequence_type(value.sequenceType); + delete value.sequenceType; + } else if (value.optionalType) { + value.optional_type = optional_type(value.optionalType); + delete value.optionalType; + } else if (value.mapType) { + value.map_type = map_type(value.mapType); + delete value.mapType; + } else if (value.sparseTensorType) { + value.sparse_tensor_type = sparse_tensor_type(value.sparseTensorType); + delete value.sparseTensorType; + } else { + throw new onnx.Error(`Unsupported ONNX JSON type '${JSON.stringify(Object.keys(value))}'.`); + } + return value; + }; + const tensor = (value) => { + value.data_type = value.dataType; + value.dims = Array.isArray(value.dims) ? value.dims.map((dim) => parseInt(dim, 10)) : []; + delete value.dataType; + if (value.rawData !== undefined) { + value.data_location = onnx.DataLocation.DEFAULT; + const data = atob(value.rawData); + const length = data.length; + const array = new Uint8Array(length); + for (let i = 0; i < length; i++) { + array[i] = data[i].charCodeAt(0); + } + value.raw_data = array; + delete value.rawData; + } else if (Array.isArray(value.floatData)) { + value.data_location = onnx.DataLocation.DEFAULT; + value.float_data = value.floatData; + delete value.floatData; + } else if (Array.isArray(value.int32Data)) { + value.data_location = onnx.DataLocation.DEFAULT; + value.int32_data = value.int32Data; + delete value.int32Data; + } else if (Array.isArray(value.int64Data)) { + value.data_location = onnx.DataLocation.DEFAULT; + value.int64_data = value.int64Data.map((value) => parseInt(value, 10)); + delete value.int64Data; + } else { + throw new onnx.Error(`Unsupported ONNX JSON tensor data '${JSON.stringify(value.data_type)}.`); + } + return value; + }; + const sparse_tensor = (value) => { + value.indices = tensor(value.indices); + value.values = tensor(value.values); + return value; + }; + const attribute = (value) => { + if (value.type && this._attributeTypes.has(value.type)) { + value.type = this._attributeTypes.get(value.type); + } + if (value.refAttrName) { + value.ref_attr_name = value.refAttrName; + delete value.refAttrName; + } else if (value.type === onnx.AttributeType.FLOATS || Array.isArray(value.floats)) { + value.floats = value.floats.map((value) => parseFloat(value)); + } else if (value.type === onnx.AttributeType.INTS || Array.isArray(value.ints)) { + value.ints = value.ints.map((value) => parseInt(value, 10)); + } else if (value.type === onnx.AttributeType.STRINGS || Array.isArray(value.strings)) { + value.strings = value.strings.map((value) => atob(value)); + } else if (value.type === onnx.AttributeType.TENSORS || Array.isArray(value.tensors)) { + value.tensors = value.tensors.map((value) => tensor(value)); + } else if (value.type === onnx.AttributeType.GRAPHS || Array.isArray(value.graphs)) { + value.graphs = value.graphs.map((value) => graph(value)); + } else if (value.type === onnx.AttributeType.SPARSE_TENSORS || Array.isArray(value.sparseTensors)) { + value.sparse_tensors = value.sparseTensors.map((value) => sparse_tensor(value)); + delete value.sparseTensors; + } else if (value.type === onnx.AttributeType.FLOAT || value.f !== undefined) { + value.f = parseFloat(value.f); + } else if (value.type === onnx.AttributeType.INT || value.i !== undefined) { + value.i = parseInt(value.i, 10); + } else if (value.type === onnx.AttributeType.STRING || value.s !== undefined) { + value.s = atob(value.s); + } else if (value.type === onnx.AttributeType.TENSOR || value.t !== undefined) { + value.t = tensor(value.t); + } else if (value.type === onnx.AttributeType.GRAPH || value.g !== undefined) { + value.g = graph(value.g); + } else if (value.type === onnx.AttributeType.SPARSE_TENSOR || value.sparseTensor !== undefined) { + value.sparse_tensor = sparse_tensor(value.sparseTensor); + delete value.sparseTensor; + } else { + throw new onnx.Error(`Unsupported ONNX JSON attribute type '${JSON.stringify(value.type)}'.`); + } + return value; + }; + const node = (value) => { + value.op_type = value.opType; + delete value.opType; + value.input = Array.isArray(value.input) ? value.input : []; + value.output = Array.isArray(value.output) ? value.output : []; + value.attribute = Array.isArray(value.attribute) ? value.attribute.map((value) => attribute(value)) : []; + return value; + }; + const value_info = (value) => { + value.type = type(value.type); + return value; + }; + const operator_set = (value) => { + value.version = parseInt(value.version, 10); + return value; + }; + const graph = (value) => { + value.node = value.node.map((value) => node(value)); + value.initializer = Array.isArray(value.initializer) ? value.initializer.map((value) => tensor(value)) : []; + value.sparse_initializer = Array.isArray(value.sparseInitializer) ? value.sparseInitializer.map((value) => sparse_tensor(value)) : []; + value.value_info = Array.isArray(value.valueInfo) ? value.valueInfo.map((value) => value_info(value)) : []; + value.input = Array.isArray(value.input) ? value.input.map((value) => value_info(value)) : []; + value.output = Array.isArray(value.output) ? value.output.map((value) => value_info(value)) : []; + return value; + }; + const func = (value) => { + value.node = value.node.map((value) => node(value)); + value.input = Array.isArray(value.input) ? value.input : []; + value.output = Array.isArray(value.output) ? value.output : []; + value.attribute = Array.isArray(value.attribute) ? value.attribute : []; + value.attribute_proto = Array.isArray(value.attributeProto) ? value.attributeProto.map((value) => attribute(value)) : []; + delete value.attributeProto; + if (value.docString) { + value.doc_string = value.docString; + delete value.docString; + } + return value; + }; + /* eslint-enable no-use-before-define */ + this.model.ir_version = parseInt(this.model.irVersion, 10); + delete this.model.irVersion; + if (this.model.version !== undefined) { + this.model.version = parseInt(this.model.version, 10); + } + if (this.model.producerName) { + this.model.producer_name = this.model.producerName; + delete this.model.producerName; + } + if (this.model.producerVersion) { + this.model.producer_version = this.model.producerVersion; + delete this.model.producerVersion; + } + if (this.model.modelVersion) { + this.model.model_version = parseInt(this.model.modelVersion, 10); + delete this.model.modelVersion; + } + if (this.model.docString) { + this.model.doc_string = this.model.docString; + delete this.model.docString; + } + this.model.graph = graph(this.model.graph); + if (Array.isArray(this.model.opsetImport)) { + this.model.opset_import = this.model.opsetImport.map((value) => operator_set(value)); + delete this.model.opsetImport; + } + if (Array.isArray(this.model.metadataProps)) { + this.model.metadata_props = this.model.metadataProps; + delete this.model.metadataProps; + } + if (Array.isArray(this.model.functions)) { + this.model.functions = this.model.functions.map((value) => func(value)); + } + this.format = `ONNX JSON${this.model.ir_version ? ` v${this.model.ir_version}` : ''}`; + } +}; + +onnx.TextReader = class { + + static open(context) { + try { + const stream = context.stream; + if (stream && stream.length > 0 && (stream.peek(1)[0] < 0x80 || stream.peek(1)[0] >= 0xFE)) { + const reader = text.Reader.open(stream); + const lines = []; + for (let i = 0; i < 32; i++) { + const line = reader.read(); + if (line === undefined) { + break; + } + lines.push(line); + } + const content = lines.join('\n'); + if (/^\s*<\s*ir_version\s*:/m.exec(content) || + /^\s*[a-zA-Z][a-zA-Z0-9]*\s*\(.*\)\s=>\s\(/m.exec(content)) { + return new onnx.TextReader(context); + } + } + } catch (err) { + // continue regardless of error + } + return null; + } + + constructor(context) { + this._context = context; + this._dataTypes = new Map(Object.entries(onnx.DataType).map(([key, value]) => [ key.toLowerCase(), value ])); + this._attributeTypes = new Map(Object.entries(onnx.AttributeType).map(([key, value]) => [ key.toLowerCase(), value ])); + } + + async read() { + await this._context.require('./onnx-proto'); + onnx.proto = protobuf.get('onnx').onnx; + try { + const stream = this._context.stream; + this._decoder = text.Decoder.open(stream); + this._position = 0; + this._char = this._decoder.decode(); + this.model = this._parseModel(); + this.format = `ONNX Text${this.model.ir_version ? ` v${this.model.ir_version}` : ''}`; + delete this._decoder; + delete this._position; + delete this._char; + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(`File format is not onnx.ModelProto (${message.replace(/\.$/, '')}).`); + } + } + + _seek(position) { + this._decoder.position = position; + this._char = ''; + this._next(); + } + + _parseModel() { + this._skipWhitespace(); + const model = new onnx.proto.ModelProto(); + if (this._match('<')) { + do { + const keyword = this._parseIdentifier(); + this._expect(':'); + switch (keyword) { + case 'ir_version': + case 'model_version': + model[keyword] = this._parseInteger(); + break; + case 'opset_import': + model[keyword] = this._parseOperatorSetId(); + break; + case 'producer_name': + case 'producer_version': + case 'domain': + case 'doc_string': + model[keyword] = this._parseString(); + break; + case 'metadata_props': + this._expect('['); + if (!this._match(']')) { + do { + const entry = new onnx.proto.StringStringEntryProto(); + entry.key = this._parseString(); + this._expect(':'); + entry.value = this._parseString(); + model.metadata_props.push(entry); + } while (this._match(',')); + this._expect(']'); + } + break; + default: + this._throw(`Unknown keyword '${keyword}'.`); + break; + } + } while (this._match(',')); + this._expect('>'); + } + model.graph = this._parseGraph(); + this._skipWhitespace(); + while (this._char !== undefined) { + const func = this._parseFunction(); + if (func) { + model.functions.push(func); + } + this._skipWhitespace(); + } + return model; + } + + _parseGraph() { + const graph = new onnx.proto.GraphProto(); + graph.name = this._parseIdentifier(); + if (this._match('(')) { + if (!this._match(')')) { + do { + const valueInfo = this._parseValueInfo(); + if (this._match('=')) { + const tensor = this._parseTensor(valueInfo.type); + tensor.name = valueInfo.name; + graph.initializer.push(tensor); + } + graph.input.push(valueInfo); + } + while (this._match(',')); + this._expect(')'); + } + } + this._expect('=>'); + graph.output = this._parseValueInfoList(); + if (this._match('<')) { + if (!this._match('>')) { + do { + const valueInfo = this._parseValueInfo(); + if (this._match('=')) { + const tensor = this._parseTensor(valueInfo.type); + tensor.name = valueInfo.name; + graph.initializer.push(tensor); + } else { + graph.value_info.push(valueInfo); + } + } + while (this._match(',')); + this._expect('>'); + } + } + graph.node = this._parseNodeList(); + return graph; + } + + _parseNodeList() { + const list = []; + this._expect('{'); + while (!this._match('}')) { + list.push(this._parseNode()); + } + return list; + } + + _parseNode() { + const node = new onnx.proto.NodeProto(); + node.output = this._parseIdentifierList(); + this._expect('='); + let identifier = this._parseIdentifier(); + let domain = ''; + while (this._match('.')) { + if (domain) { + domain += '.'; + } + domain += identifier; + identifier = this._parseIdentifier(); + } + node.domain = domain; + node.op_type = identifier; + node.attribute = this._parseAttributeList(); + this._expect('('); + node.input = this._parseIdentifierList(); + this._expect(')'); + if (!node.attribute || node.attribute.length === 0) { + node.attribute = this._parseAttributeList(); + } + return node; + } + + _parseAttributeList() { + const list = []; + if (this._match('<')) { + do { + list.push(this._parseAttribute()); + } + while (this._match(',')); + this._expect('>'); + } + return list; + } + + _parseAttribute() { + const attribute = new onnx.proto.AttributeProto(); + attribute.name = this._parseIdentifier(); + if (this._match(':')) { + const type = this._parseIdentifier(); + if (!this._attributeTypes.has(type)) { + this._throw(`Unexpected attribute type '${type}'.`); + } + attribute.type = this._attributeTypes.get(type); + } + this._expect('='); + if (this._match('[')) { + if (!this._match(']')) { + do { + const value = new onnx.proto.AttributeProto(); + let type; + switch (attribute.type) { + case onnx.AttributeType.FLOATS: type = onnx.AttributeType.FLOAT; break; + case onnx.AttributeType.INTS: type = onnx.AttributeType.INT; break; + case onnx.AttributeType.STRINGS: type = onnx.AttributeType.STRING; break; + case onnx.AttributeType.TENSORS: type = onnx.AttributeType.TENSOR; break; + case onnx.AttributeType.GRAPHS: type = onnx.AttributeType.GRAPH; break; + case onnx.AttributeType.SPARSE_TENSORS: type = onnx.AttributeType.SPARSE_TENSOR; break; + case onnx.AttributeType.TYPE_PROTOS: type = onnx.AttributeType.TYPE_PROTO; break; + default: type = attribute.type; break; + } + this._parseAttributeValue(value, type); + switch (value.type) { + case onnx.AttributeType.INT: + attribute.type = onnx.AttributeType.INTS; + attribute.ints.push(value.i); + break; + case onnx.AttributeType.FLOAT: + attribute.type = onnx.AttributeType.FLOATS; + attribute.floats.push(value.f); + break; + case onnx.AttributeType.STRING: + attribute.type = onnx.AttributeType.STRINGS; + attribute.strings.push(value.s); + break; + default: + break; + } + } + while (this._match(',')); + } else { + if (attribute.type == onnx.AttributeType.UNDEFINED) { + this._throw('Empty list attribute value requires type annotation.'); + } + switch (attribute.type) { + case onnx.AttributeType.FLOAT: + case onnx.AttributeType.INT: + case onnx.AttributeType.STRING: + case onnx.AttributeType.TENSOR: + case onnx.AttributeType.GRAPH: + case onnx.AttributeType.SPARSE_TENSOR: + case onnx.AttributeType.TYPE_PROTO: + this._throw("Singleton attribute value cannot be specified as a list."); + break; + default: + break; + } + } + this._expect(']'); + } else { + this._parseAttributeValue(attribute, attribute.type); + } + return attribute; + } + + _parseAttributeValue(attribute, type) { + if (this._isAlpha(this._char) || this._char === '_') { + const identifier = this._peekIdentifier(); + if (this._isType(identifier)) { + const type = this._parseType(this._parseIdentifier()); + if (!type.tensor_type.elem_type) { + this._throw('Expected tensor data type.'); + } + if (!type.tensor_type.shape || !type.tensor_type.shape.dim) { + this._throw('Expected tensor shape.'); + } + this._skipWhitespace(); + if (this._char === '{' || this._char === '=' || this._peekIdentifier()) { + attribute.type = onnx.AttributeType.TENSOR; + const name = this._parseIdentifier(true); + this._match('='); + attribute.t = this._parseTensor(type); + if (name) { + attribute.t.name = name; + } + } else { + attribute.type = onnx.AttributeType.TYPE_PROTO; + attribute.tp = type; + } + } else { + const value = this._peekIdentifier(); + if (value === 'inf' || value === 'infinity' || value === 'nan') { + attribute.type = onnx.AttributeType.FLOAT; + attribute.f = this._parseLiteral(); + } else { + attribute.type = onnx.AttributeType.GRAPH; + attribute.g = this._parseGraph(); + } + } + } else if (this._match('@')) { + attribute.ref_attr_name = this._parseIdentifier(); + } else { + const value = this._parseLiteral(); + switch (typeof value) { + case 'number': + if (Number.isInteger(value)) { + attribute.type = onnx.AttributeType.INT; + attribute.i = value; + } else { + attribute.type = onnx.AttributeType.FLOAT; + attribute.f = value; + } + break; + case 'string': + attribute.type = onnx.AttributeType.STRING; + attribute.s = value; + break; + default: { + this._throw(`Unexpected value '${JSON.stringify(value)}'.`); + } + } + } + if (type !== onnx.AttributeType.UNDEFINED && type !== attribute.type) { + if (type === onnx.AttributeType.FLOAT && attribute.type === onnx.AttributeType.INT) { + attribute.type = onnx.AttributeType.FLOAT; + attribute.f = attribute.i; + delete attribute.i; + } else { + this._throw('Attribute type mismatch.'); + } + } + } + + _parseValueInfoList() { + const list = []; + this._expect('('); + if (!this._match(')')) { + do { + const value = this._parseValueInfo(); + list.push(value); + } while (this._match(',')); + this._expect(')'); + } + return list; + } + + _parseValueInfo() { + const valueInfo = new onnx.proto.ValueInfoProto(); + let identifier = this._parseIdentifier(); + if (this._isType(identifier)) { + valueInfo.type = this._parseType(identifier); + identifier = this._parseIdentifier(); + } + valueInfo.name = identifier; + return valueInfo; + } + + _parseType(elem_type) { + const type = new onnx.proto.TypeProto(); + type.tensor_type = new onnx.proto.TypeProto.Tensor(); + type.tensor_type.elem_type = this._dataTypes.get(elem_type); + if (this._match('[')) { + if (!this._match(']')) { + type.tensor_type.shape = this._parseTensorShape(); + this._expect(']'); + } + } else { + type.tensor_type.shape = new onnx.proto.TensorShapeProto(); + } + return type; + } + + _parseTensorShape() { + const shape = new onnx.proto.TensorShapeProto(); + do { + const dimension = new onnx.proto.TensorShapeProto.Dimension(); + if (!this._match('?')) { + const identifier = this._parseIdentifier(true); + if (identifier) { + dimension.dim_param = identifier; + } else { + dimension.dim_value = this._parseInteger(); + } + } + shape.dim.push(dimension); + } + while (this._match(',')); + return shape; + } + + _parseTensor(type) { + const tensor = new onnx.proto.TensorProto(); + if (!type.tensor_type || !type.tensor_type.elem_type) { + this._throw('Expected tensor type.'); + } + if (!type.tensor_type.shape || !type.tensor_type.shape.dim || !type.tensor_type.shape.dim.every((dim) => dim.dim_value)) { + this._throw('Expected numeric tensor shape.'); + } + const elem_type = type.tensor_type.elem_type; + tensor.data_type = elem_type; + tensor.dims = type.tensor_type.shape.dim.map((dim) => dim.dim_value); + this._match('='); + this._expect('{'); + if (!this._match('}')) { + do { + switch (elem_type) { + case onnx.DataType.INT8: + case onnx.DataType.INT16: + case onnx.DataType.INT32: + case onnx.DataType.UINT8: + case onnx.DataType.UINT16: + case onnx.DataType.BOOL: + tensor.int32_data.push(this._parseInteger()); + break; + case onnx.DataType.INT64: + tensor.int64_data.push(this._parseInteger()); + break; + case onnx.DataType.UINT32: + case onnx.DataType.UINT64: + tensor.uint64_data.push(this._parseInteger()); + break; + case onnx.DataType.FLOAT: + tensor.float_data.push(this._parseFloat()); + break; + case onnx.DataType.DOUBLE: + tensor.double_data.push(this._parseFloat()); + break; + case onnx.DataType.STRING: + tensor.string_data.push(this.string()); + break; + default: + return this._throw(`Unsupported tensor element type '${elem_type}'.`); + } + } while (this._match(',')); + this._expect('}'); + } + return tensor; + } + + _parseFunction() { + const func = new onnx.proto.FunctionProto(); + if (this._match('<')) { + do { + const keyword = this._parseIdentifier(); + this._expect(':'); + switch (keyword) { + case 'opset_import': + func[keyword] = this._parseOperatorSetId(); + break; + case 'domain': + case 'doc_string': + func[keyword] = this._parseString(); + break; + default: + this._throw(`Unknown keyword '${keyword}'.`); + break; + } + } + while (this._match(',')); + this._expect('>'); + } + func.name = this._parseIdentifier(); + if (this._match('<')) { + func.attribute = this._parseIdentifierList(); + this._expect('>'); + } + if (this._match('(')) { + func.input = this._parseIdentifierList(); + this._expect(')'); + } + this._expect('=>'); + if (this._match('(')) { + func.output = this._parseIdentifierList(); + this._expect(')'); + } + func.node = this._parseNodeList(); + return func; + } + + _parseIdentifierList() { + const list = []; + const identifier = this._parseIdentifier(true); + if (identifier) { + list.push(identifier); + while (this._match(',')) { + list.push(this._parseIdentifier()); + } + } + return list; + } + + _peekIdentifier() { + const index = this._decoder.position; + const position = this._position; + const char = this._char; + const value = this._parseIdentifier(true); + this._char = char; + this._position = position; + this._decoder.position = index; + return value; + } + + _parseIdentifier(optional) { + this._skipWhitespace(); + const value = []; + if (this._isAlpha(this._char) || this._char === '_') { + value.push(this._char); + this._next(); + while (this._isAlpha(this._char) || (this._char >= '0' && this._char <= '9') || this._char === '_') { + value.push(this._char); + this._next(); + } + } + if (!optional && value.length == 0) { + this._throw('Identifier expected.'); + } + return value.join(''); + } + + _parseLiteral() { + this._skipWhitespace(); + let decimal_point = false; + if (this._char === '"') { + const value = []; + this._next(); + while (this._char !== undefined && this._char !== '"') { + value.push(this._char); + this._next(); + } + if (this._char !== undefined) { + this._next(); + } + return value.join(''); + } else if ((this._char >= '0' && this._char <= '9') || this._char === '-') { + const value = [ this._char ]; + this._next(); + while ((this._char >= '0' && this._char <= '9') || this._char === '.') { + if (this._char === '.') { + if (decimal_point) { + this._throw(); + } + decimal_point = true; + } + value.push(this._char); + this._next(); + } + if (value.length === 0) { + this._throw('Value expected.'); + } + if (this._char === 'e' || this._char === 'E') { + decimal_point = true; + value.push(this._char); + this._next(); + if (this._char === '+' || this._char === '-') { + value.push(this._char); + this._next(); + } + while ((this._char >= '0' && this._char <= '9')) { + value.push(this._char); + this._next(); + } + } + return decimal_point ? Number.parseFloat(value.join('')) : Number.parseInt(value.join(''), 10); + } + return undefined; + } + + _parseInteger() { + const value = this._parseLiteral(); + if (!Number.isInteger(value)) { + this._throw('Integer value expected.'); + } + return value; + } + + _parseFloat() { + const value = this._parseLiteral(); + if (typeof value !== 'number') { + this._throw('Float value expected.'); + } + return value; + } + + _parseString() { + const value = this._parseLiteral(); + if (typeof value !== 'string') { + this._throw('String value expected.'); + } + return value; + } + + _parseOperatorSetId() { + const list = []; + this._expect('['); + if (!this._match(']')) { + do { + const value = new onnx.proto.OperatorSetIdProto(); + value.domain = this._parseString(); + this._expect(':'); + value.version = this._parseInteger(); + list.push(value); + } + while (this._match(',')); + this._expect(']'); + } + return list; + } + + _isAlpha(value) { + return (value >= 'a' && value <= 'z') || (value >= 'A' && value <= 'Z'); + } + + _isType(identifier) { + return this._dataTypes.has(identifier) || + identifier === 'seq' || + identifier === 'map' || + identifier === 'optional' || + identifier === 'sparse_tensor'; + } + + _match(value) { + this._skipWhitespace(); + if (this._char !== value[0]) { + return false; + } + if (value.length === 1) { + this._next(); + return true; + } + const position = this._position; + for (let i = 0; i < value.length; i++) { + if (this._char !== value[i]) { + this._seek(position); + return false; + } + this._next(); + } + return true; + } + + _expect(value) { + if (!this._match(value)) { + this._unexpected(); + } + return true; + } + + _skipWhitespace() { + for (;;) { + while (this._char === ' ' || this._char === '\n' || this._char === '\r' || this._char === '\t') { + this._next(); + } + if (this._char === undefined || this._char !== '#') { + break; + } + while (this._char !== undefined && this._char !== '\n') { + this._next(); + } + } + } + + _next() { + if (this._char === undefined) { + this._unexpected(); + } + this._position = this._decoder.position; + this._char = this._decoder.decode(); + } + + _unexpected() { + let c = this._char; + if (c === undefined) { + throw new onnx.Error('Unexpected end of input.'); + } else if (c === '"') { + c = 'string'; + } else if ((c >= '0' && c <= '9') || c === '-') { + c = 'number'; + } else { + if (c < ' ' || c > '\x7F') { + const name = Object.keys(this._escape).filter((key) => this._escape[key] === c); + c = (name.length === 1) ? `\\${name}` : `\\u${(`000${c.charCodeAt(0).toString(16)}`).slice(-4)}`; + } + c = `token '${c}'`; + } + this._throw(`Unexpected ${c}`); + } + + _throw(message) { + message = message.replace(/\.$/, ''); + throw new onnx.Error(`${message} ${this._location()}`); + } + + _location() { + let line = 1; + let column = 1; + this._decoder.position = 0; + let c; + do { + if (this._decoder.position === this._position) { + return `at ${line}:${column}.`; + } + c = this._decoder.decode(); + if (c === '\n') { + line++; + column = 1; + } else { + column++; + } + } + while (c !== undefined); + return `at ${line}:${column}.`; + } +}; + +onnx.PickleReader = class { + + static open(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + if (extension === 'onnx' && stream && stream.length > 3) { + const signature = stream.peek(2); + if (signature[0] === 0x80 && signature[1] < 7) { + return new onnx.PickleReader(); + } + } + return undefined; + } + + async read() { + throw new onnx.Error('Unsupported Pickle content.'); + } +}; + +onnx.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading ONNX model.'; + } +}; + +export const ModelFactory = onnx.ModelFactory; diff --git a/openvino-metadata.json b/openvino-metadata.json new file mode 100644 index 00000000000..f79f41b2e5b --- /dev/null +++ b/openvino-metadata.json @@ -0,0 +1,1639 @@ +[ + { + "name": "Activation", + "category": "Activation", + "description": "**Short description**: *Activation* layer represents an activation function of each neuron in a layer, which is used to add non-linearity to the computational flow.\n**Detailed description**: [Reference](https://medium.com/the-theory-of-everything/understanding-activation-functions-in-neural-networks-9491262884e0)\n**Parameters**: *Activation layer* parameters should be specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n* Sigmoid function:\n \\f[\n f( x ) = \\frac{1}{1+e^{-x}}\n \\f]\n* Tahn function:\n \\f[\n f ( x ) = \\frac{2}{1+e^{-2x}} - 1 = 2sigmoid(2x) - 1\n \\f]\n*\tElu function:\n\t\\f[\n f(x) = \\left\\{\\begin{array}{ll}\n\t\te^{x} - 1 \\quad \\mbox{if } x < 0 \\\\\n\t\tx \\quad \\mbox{if } x \\geq 0\n\t\\end{array}\\right.\n\t\\f]\n*\tRelu6 function:\n\t\\f[\n f(x) = min(max(0, x), 6)\n\t\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "description": " *type* represents particular activation function. For example, *type* equal *sigmoid* means that neurons of this layer have a sigmoid activation function.", + "name": "type", + "required": true + }, + { + "default": 1, + "name": "alpha", + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "Add", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "ArgMax", + "description": "**Short description**: *ArgMax* layer compute the index of the *K* maximum values for each datum across all dimensions *CxHxW*.\n**Detailed description**: Intended for use after a classification layer to produce a prediction. If parameter *out_max_val* is set to \"true\", output is a vector of pairs *(max_ind, max_val)* for each image. The *axis* parameter specifies an axis along which to maximize.\n**Parameters**: *ArgMax* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*ArgMax* generally does the following with the input blobs:\n\\f[\no_{i} = \\left\\{\nx| x \\in S \\wedge \\forall y \\in S : f(y) \\leq f(x)\n\\right\\}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " if *out_max_val* equals 1, output is a vector of pairs *(max_ind, max_val)*, unless axis is set. Then output is *max_val* along the specified axis.", + "name": "top_k", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " if *out_max_val* equals 1, output is a vector of pairs *(max_ind, max_val)*, unless axis is set. Then output is *max_val* along the specified axis.", + "name": "top_k", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " if set, maximizes along the specified axis, else maximizes the flattened trailing dimensions for each index of the first / num dimension.", + "name": "axis", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "BatchNormalization", + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/batchnorm.html)\n**Detailed description**: [Reference](https://kratzert.github.io/2016/02/12/understanding-the-gradient-flow-through-the-batch-normalization-layer.html)\n**Parameters**: *BatchNormalization* layer parameters should be specified as the `batch_norm_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*BatchNormalization* is the normalization of the output in each hidden layer.\n* **Input**: Values of \\f$x\\f$ over a mini-batch:\n \\f[\n \\beta = \\{ x_{1...m} \\}\n \\f]\n* **Parameters to learn**: \\f$ \\gamma, \\beta\\f$\n* **Output**:\n \\f[\n \\{ o_{i} = BN_{\\gamma, \\beta} ( b_{i} ) \\}\n \\f]\n* **Mini-batch mean**:\n \\f[\n \\mu_{\\beta} \\leftarrow \\frac{1}{m}\\sum_{i=1}^{m}b_{i}\n \\f]\n* **Mini-batch variance**:\n \\f[\n \\sigma_{\\beta }^{2}\\leftarrow \\frac{1}{m}\\sum_{i=1}^{m} ( b_{i} - \\mu_{\\beta} )^{2}\n \\f]\n* **Normalize**:\n \\f[\n \\hat{b_{i}} \\leftarrow \\frac{b_{i} - \\mu_{\\beta}}{\\sqrt{\\sigma_{\\beta }^{2} + \\epsilon }}\n \\f]\n* **Scale and shift**:\n \\f[\n o_{i} \\leftarrow \\gamma\\hat{b_{i}} + \\beta = BN_{\\gamma ,\\beta } ( b_{i} )\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *epsilon* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal 0.001 means that 0.001 is added to the variance.", + "name": "epsilon", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "BinaryConvolution", + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Broadcast", + "inputs": [ + { "name": "data" }, + { "name": "target_shape" }, + { "name": "axes_mapping" } + ] + }, + { + "name": "Clamp", + "description": "**Short description**: *Clamp* layer represents clipping activation operation.\n**Detailed description**: [Reference](https://www.tensorflow.org/versions/r1.2/api_docs/MO_DG/prepare_model/python/tf/clip_by_value)\n**Parameters**: *Clamp* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Clamp* generally does the following with the input blobs:\n\\f[\nout_i=\\left\\{\\begin{array}{ll}\n\tmax\\_value \\quad \\mbox{if } \\quad input_i>max\\_value \\\\\n\tmin\\_value \\quad \\mbox{if } \\quad input_i\n\\end{array}\\right.\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 0, + "description": " *min* is the lower bound of values in the output shape. Any value in the input shape that is smaller than the bound, is replaced by the *min* value. For example, *min* equal 10 means that any value in the input shape that is smaller than the bound, is replaced by 10.", + "name": "min", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *max* is the upper bound of values in the output shape. Any value in the input shape that is greater than the bound, is replaced by the *max* value. For example, *max* equals 50 means that any value in the input shape that is greater than the bound, is replaced by 50.", + "name": "max", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Concat", + "category": "Tensor", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/concat.html)\n**Parameters**: *Concat* layer parameters should be specified in the `concat_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Axis* parameter specifies a blob dimension to concat values. For example, for two input blobs *B1xC1xH1xW1* and *B2xC2xh4xW2* if axis: 1, output blob is****: *B1xC1+C2xH1xW1*. This is only possible if *B1=B2*, *H1=H4*, *W1=W2*.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "description": " *axis* is the number of axis over which input blobs are concatenated. For example, *axis* equal 1 means that input blobs are concatenated over the first axis.", + "name": "axis", + "required": true, + "type": "int32" + } + ], + "inputs": [ + { + "name": "inputs", + "type": "Tensor[]" + } + ], + "support_level": "default" + }, + { + "name": "Concatenation", + "category": "Tensor" + }, + { + "name": "Convolution", + "category": "Layer", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/convolution.html)
**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#conv)\n**Parameters**: *Convolution* layer parameters should be specified in the `convolution_data` node, which is a child of the layer node.\n**Weights Layout** Weights layout is GOIYX, which means that *X* is changing the fastest, then *Y*, then *Input*, *Output*, then *Group*.\n**Mathematical Formulation**\n* For the convolutional layer, the number of output features in each dimension is calculated using the formula:\n\\f[\nn_{out} = \\left ( \\frac{n_{in} + 2p - k}{s} \\right ) + 1\n\\f]\n* The receptive field in each layer is calculated using the formulas:\n * Jump in the output feature map:\n \\f[\n j_{out} = j_{in} * s\n \\f]\n * Size of the receptive field of output feature:\n \\f[\n r_{out} = r_{in} + ( k - 1 ) * j_{in}\n \\f]\n * Center position of the receptive field of the first output feature:\n \\f[\n start_{out} = start_{in} + ( \\frac{k - 1}{2} - p ) * j_{in}\n \\f]\n * Output is calculated using the following formula:\n \\f[\n out = \\sum_{i = 0}^{n}w_{i}x_{i} + b\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n \n \n```", + "attributes": [ + { + "default": [ + 1, + null + ], + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "required": true, + "type": "int32" + }, + { + "default": [ + 1, + null + ], + "name": "strides", + "type": "int32[]" + }, + { + "default": 0, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "required": true, + "type": "int32" + }, + { + "default": 0, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "required": true, + "type": "int32" + }, + { + "default": 0, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "required": true, + "type": "int32" + }, + { + "default": 0, + "name": "pad-r", + "type": "int32" + }, + { + "default": 0, + "name": "pad-b", + "type": "int32" + }, + { + "default": [ + 1, + 1 + ], + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *output* is a number of output feature maps per whole output (when *group* > 1, *output* still matches the number of output features regardless of *group* value). For example, *output* equals 1 means that there is 1 output feature map in a layer.", + "name": "output", + "required": true, + "type": "int32", + "visible": false + }, + { + "default": 1, + "description": " *group* denotes the number of groups to which *output* and *input* should be split. For example, *group* equal 1 means that all the filters are applied to full input (usual convolution), *group* equals 2 means that both *input* and *output* channels are separated into 2 groups and *i-th output* group is connected to *i-th input* group channels. *group* equals number of output feature maps denotes depth-wise separable convolution ([Reference](https://medium.com/towards-data-science/types-of-convolutions-in-deep-learning-717013397f4d#6f51)).", + "name": "group", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *dilation* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal \"1,1\" means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal \"2,2\" means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation", + "required": true, + "type": "int32" + }, + { + "default": 1, + "name": "dilation-x", + "type": "int32" + }, + { + "default": [ + 1, + null + ], + "name": "dilations", + "type": "int32[]" + }, + { + "default": "same_upper", + "name": "auto_pad" + }, + { + "default": [ + 0, + null + ], + "name": "pads_begin", + "type": "int32[]" + }, + { + "default": [ + 0, + null + ], + "name": "pads_end", + "type": "int32[]" + }, + { + "default": 1, + "description": " *dilation-y* denotes the distance in height between elements (weights) in the filter. For example, *dilation-y* equal 1 means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation-y* equal 2 means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation-y", + "required": true, + "type": "int32" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "weights" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Crop", + "category": "Data", + "description": "**Short description**: *Crop* layer changes selected dimensions of the input blob according to the specified parameters.\n**Parameters**: *Crop* layer parameters should be specified in `data` section, which is placed as a child of the layer node. Due to various representation of Crop attributes in existing frameworks, this layer can be described in three independent ways: *Crop* **Type 1** layer takes two input blobs, and the shape of the second blob specifies the *Crop* size. The layer has two attributes: *axis* and *offset*. Crop layer takes two input blobs, and the shape of the second blob specifies the *Crop* size. The *Crop* layer of this type supports shape inference.\n**Inputs**\n* **1**: Multidimensional input blob *(for example, NCHW, NCH, or NC)*\n* **2**: Shape of this input will be used for crop\n**Example**\n\n```html\n\n \n \n \n 1\n 21\n 44\n 44\n \n \n 1\n 21\n 34\n 34\n \n \n \n \n 1\n 21\n 34\n 34\n \n \n\n```", + "attributes": [ + { + "default": 1, + "description": " *axis* is a number of a dimension to be used for cropping. For example, *axis* equal to 1 means that cropping is performed over the first dimension.", + "name": "axis", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *offset* denotes the starting point for crop in the input blob. For example, *offset* equal to 2 means that crop is starting from the second value in the given axis.", + "name": "offset", + "required": true, + "type": "int32[]" + } + ], + "support_level": "default" + }, + { + "name": "CTCGreadyDecoder", + "category": "Layer", + "description": "**Short description**: *CTCGreadyDecoder* performs greedy decoding on the logits given in input (best path).\n**Detailed description**: [Reference](https://www.tensorflow.org/api_docs/python/tf/nn/ctc_greedy_decoder)\n**Parameters**: *CTCGreadyDecoder* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nGiven an input sequence \\f$X\\f$ of length \\f$T\\f$, *CTCGreadyDecoder* assumes the probability of a length \\f$T\\f$ character sequence \\f$C\\f$ is given by\n\\f[\np(C|X) = \\prod_{t=1}^{T} p(c_{t}|X)\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *ctc_merge_repeated* is a flag for collapsing the repeated labels during the ctc calculation.", + "name": "ctc_merge_repeated", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Deconvolution", + "category": "Layer", + "description": "**Short description**: *Deconvolution* layer is applied for upsampling the output to the higher image resolution.\n**Detailed description**: [Reference](https://distill.pub/2016/deconv-checkerboard/)\n**Parameters**: *Deconvolution* layer parameters should be specified in the `deconvolution_data` node, which is a child of the layer node.\n**Parameters**: *Convolution* layer parameters should be specified in the `convolution_data` node, which is a child of the layer node.\n**Weights Layout** Weights layout is the following: GOIYX, which means that *X* is changing the fastest, then *Y*, then *Input*, *Output*, then *Group*.\n**Mathematical Formulation**\n*Deconvolution* is also called transpose convolution and performs operation, reverse to convolution.\nThe number of output features for each dimensions is calculated:\n\\f[S_{o}=stride(S_{i} - 1 ) + S_{f} - 2pad \\f]\nWhere \\f$S\\f$ is size of output, input and filter.\nOutput is calculated in the same way as for convolution layer:\n\\f[out = \\sum_{i = 0}^{n}w_{i}x_{i} + b\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *output* is a number of output feature maps per whole output (when *group* > 1, *output* still matches the number of output features regardless of *group* value). For example, *output* equals 1 means that there is 1 output feature map in a layer.", + "name": "output", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *group* denotes the number of groups to which *output* and *input* should be split. For example, *group* equal 1 means that all the filters are applied to full input (usual convolution), *group* equals 2 means that both *input* and *output* channels are separated into 2 groups and *i-th output* group is connected to *i-th input* group channels. *group* equals number of output feature maps denotes depth-wise separable convolution ([Reference](https://medium.com/towards-data-science/types-of-convolutions-in-deep-learning-717013397f4d#6f51)).", + "name": "group", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *dilation* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal \"1,1\" means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal \"2,2\" means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *dilation-y* denotes the distance in height between elements (weights) in the filter. For example, *dilation-y* equal 1 means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation-y* equal 2 means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation-y", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "DetectionOutput", + "description": "**Short description**: *DetectionOutput* layer performs non-maximum suppression to generate the detection output using information on location and confidence predictions.\n**Detailed description**: [Reference](https://arxiv.org/pdf/1512.02325.pdf)\n**Parameters**: *DetectionOutput* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nAt each feature map cell, *DetectionOutput* predicts the offsets relative to the default box shapes in the cell, as well as the per-class scores that indicate the presence of a class instance in each of those boxes. Specifically, for each box out of k at a given location, *DetectionOutput* computes class scores and the four offsets relative to the original default box shape. This results in a total of \\f$(c + 4)k\\f$ filters that are applied around each location in the feature map, yielding \\f$(c + 4)kmn\\f$ outputs for a m × n feature map.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " number of classes to be predicted", + "name": "num_classes", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " background label id. If there is no background class, set it to -1.", + "name": "background_label_id", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " maximum number of results to be kept on NMS stage", + "name": "top_k", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " if \"true\", variance is encoded in target. Otherwise, we need to adjust the predicted offset accordingly.", + "name": "variance_encoded_in_target", + "required": true + }, + { + "default": 1, + "description": " number of total bboxes to be kept per image after NMS step. -1 means keeping all bboxes after NMS step.", + "name": "keep_top_k", + "required": true, + "type": "int32" + }, + { + "default": 1, + "name": "num_orient_classes", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " type of coding method for bounding boxes. caffe.PriorBoxParameter.CENTER_SIZE and others.", + "name": "code_type", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " bounding boxes are shared among different classes.", + "name": "share_location", + "required": true + }, + { + "default": 1, + "name": "interpolate_orientation", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " threshold to be used in NMS stage", + "name": "nms_threshold", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " only consider detections whose confidences are larger than a threshold. If not provided, consider all boxes.", + "name": "confidence_threshold", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "Eltwise", + "description": "**Short description**: *Eltwise* layer performs element-wise operation, which is specified in parameters, over given inputs.\n**Parameters**: *Eltwise* layer parameters should be specified in the `elementwise_data` node, which is placed as a child of the layer node.\n**Mathematical Formulation** *Eltwise* accepts 2 inputs of any number of dimensions - from 1 to 4, however, it is required for both of them to have absolutely same dimensions. The produced blob is also of the same dimension as each of its parents\n*Eltwise* does the following with the input blobs:\n\\f[\no_{i} = f(b_{i}^{1}, b_{i}^{2})\n\\f]\nwhere \\f$b_{i}^{1}\\f$ - first blob \\f$i\\f$-th element, \\f$b_{i}^{2}\\f$ - second blob \\f$i\\f$-th element, \\f$o_{i}\\f$ - output blob \\f$i\\f$-th element, \\f$f(a, b)\\f$ - is a function that performs an operation over its two arguments \\f$a, b\\f$.\n* For *sum* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = a + b\n \\f]\n* For *mul* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = a * b\n \\f]\n* For *max* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = \\left\\{\\begin{array}{ll}\n\t\ta \\quad \\mbox{if } a \\geq b \\\\\n\t\tb \\quad \\mbox{if } b > a\n\t\\end{array}\\right. \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": "sum", + "description": " *operation* is the simple mathematical operation to be performed over inputs. For example, *operation* equal *mul* means that input blobs are multiplied.", + "name": "operation", + "required": true, + "type": "string" + } + ], + "inputs": [ + { + "name": "inputs", + "type": "Tensor[]" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "support_level": "default" + }, + { + "name": "Flatten", + "category": "Shape", + "attributes": [ + { + "name": "axis", + "type": "int32" + }, + { + "name": "end_axis", + "type": "int32", + "default": -1 + } + ] + }, + { + "name": "FakeQuantize", + "inputs": [ + { "name": "X" }, + { "name": "input_low" }, + { "name": "input_high" }, + { "name": "output_low" }, + { "name": "output_high" } + ] + }, + { + "name": "FullyConnected", + "category": "Layer", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/innerproduct.html)\n**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#fc)\n**Parameters**: Specify *FullyConnected* layer parameters in the `fc_data` node, which is a child of the layer node.\n**Weights Layout** OI, which means that Input is changing the fastest, then Output.\n**Mathematical Formulation**\n* If previous layer is *FullyConnected*:\n \\f[\n y_{i} = f( z_{i} ) \\quad with \\quad z_{i} = \\sum_{j=1}^{m_{1}^{( l-1 )}}w_{i,j}^{( l )}y_{i}^{ ( l -1 )}\n \\f]\n* Otherwise:\n \\f[\n y_{i} = f( z_{i} ) \\quad with \\quad z_{i}^{ ( l )} = \\sum_{j=1}^{m_{1}^{( l-1 )}}\\sum_{r=1}^{m_{2}^{ ( l-1 )}}\\sum_{s=1}^{m_{3}^{ ( l-1 )}}w_{i,j,r,s}^{ ( l )} ( Y_{i}^{ (l-1) })_{r,s}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "attributes": [ + { + "default": 1, + "description": " *out-size* is a length of the output vector. For example, *out-size* equal 4096 means that the output vector length is 4096.", + "name": "out-size", + "required": true, + "type": "int32" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "weights" + }, + { + "name": "bias" + } + ], + "support_level": "default" + }, + { + "name": "Gather", + "category": "Transform", + "inputs": [ + { "name": "data" }, + { "name": "indices" }, + { "name": "axis" } + ] + }, + { + "name": "GRN", + "category": "Normalization", + "description": "**Short description**: *GRN* is Global Response Normalization with L2 norm (across channels only).\n**Parameters**: GRN layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*GRN* computes L2 norm by channels for input blob. *GRN* generally does the following with the input blob:\n\\f[\noutput_{i} = \\frac{input_{i}}{\\sqrt{\\sum_{i}^{C} input_{i}}}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *bias* is added to the variance.", + "name": "bias", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "GroupConvolution", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "GRUCell", + "category": "Layer", + "description": "GRUCell represents a single GRU Cell that computes the output using the formula described in the [paper](https://arxiv.org/abs/1406.1078).", + "attributes": [ + { "name": "hidden_size", "type": "int64", "description": "pecifies hidden state size." }, + { "name": "linear_before_reset", "type": "boolean", "optional": true, "default": false, "description": "denotes if the layer behaves according to the modification of GRUCell described in the formula in the [ONNX documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU)." } + ], + "inputs": [ + { "name": "X", "description": "2D tensor of type T `[batch_size, input_size]`, input data. Required." }, + { "name": "initial_hidden_state", "description": "2D tensor of type T `[batch_size, hidden_size]`. Required." }, + { "name": "W", "description": "2D tensor of type T `[3 * hidden_size, input_size]`, the weights for matrix multiplication, gate order: zrh. Required." }, + { "name": "R", "description": "2D tensor of type T `[3 * hidden_size, hidden_size]`, the recurrence weights for matrix multiplication, gate order: zrh. Required." }, + { "name": "B", "description": "1D tensor of type T. If linear_before_reset is set to 1, then the shape is `[4 * hidden_size]` - the sum of biases for z and r gates (weights and recurrence weights), the biases for h gate are placed separately. Otherwise the shape is `[3 * hidden_size]`, the sum of biases (weights and recurrence weights). Optional." } + ] + }, + { + "name": "Interpolate", + "inputs": [ + { "name": "data" }, + { "name": "sizes" }, + { "name": "scales" }, + { "name": "axes" } + ] + }, + { + "name": "LSTMCell", + "category": "Layer", + "inputs": [ + { "name": "X" }, + { "name": "initial_hidden_state" }, + { "name": "initial_cell_state" }, + { "name": "W" }, + { "name": "R" }, + { "name": "B" } + ] + }, + { + "name": "MaxPool", + "category": "Pool" + }, + { + "name": "MatMul", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Memory", + "description": "**Short description**: *Memory* layer represents delay layer in terms of LSTM terminology. To read more about LSTM topologies please refer this [link](http://colah.github.io/posts/2015-08-Understanding-LSTMs).\n**Detailed description**: *Memory* layer saves state between two infer requests. In the topology, it is the single layer, however, in the Intermediate Representation, it is always represented as a pair of **Memory** layers. One of these layers does not have outputs and another does not have inputs (in terms of the Intermediate Representation).\n**Parameters**: *Memory* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Memory* save data from the input blob.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *id* is the id of the pair of *Memory* layers. For example, *id* equals r_27-28 means that layers with id 27 and 28 are in one pair.", + "name": "id", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *index* represents if the given layer is input or output. For example, *index* equal 0 means this layer is output one.", + "name": "index", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *size* represents the size of the group. For example, *size* equals 2 means this group is a pair.", + "name": "size", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Multiply", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "MVN", + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/mvn.html)\n**Parameters**: *MVN* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*MVN* subtracts mean from the input blob:\n\\f[\no_{i} = i_{i} - \\frac{\\sum{i_{k}}}{C * H * W}\n\\f]\nIf *normalize_variance* is set to 1, the output blob is divided by variance:\n\\f[\no_{i}=\\frac{o_{i}}{\\sum \\sqrt {o_{k}^2}+\\epsilon}\n\\f]\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n\n```", + "attributes": [ + { + "default": 1, + "description": " *across_channels* is a flag that denotes if mean values are shared across channels. For example, *across_channels* equal 0 means that mean values are not shared across channels.", + "name": "across_channels", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *normalize_variance* is a flag that denotes whether to perform variance normalization.", + "name": "normalize_variance", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal 0.001 means that 0.001 is added to the variance.", + "name": "eps", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "Norm", + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/lrn.html)\n**Detailed description**: [Reference](http://yeephycho.github.io/2016/08/03/Normalizations-in-neural-networks/#Local-Response-Normalization-LRN)\n**Parameters**: *Norm* layer parameters should be specified in the `norm_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[o_{i} = \\left( 1 + \\left( \\frac{\\alpha}{n} \\right)\\sum_{i}x_{i}^{2} \\right)^{\\beta}\\f]\nWhere \\f$n\\f$ is the size of each local region.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *alpha* represents the scaling parameter for the normalizing sum. For example, *alpha* equal 0.0001 means that the normalizing sum is multiplied by 0.0001.", + "name": "alpha", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *beta* represents the exponent for the normalizing sum. For example, *beta* equal 0.75 means that the normalizing sum is raised to the power of 0.75.", + "name": "beta", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *region* represents strategy of local regions extension. For example, *region* equal *across* means that the normalizing sum is performed over adjacent channels.", + "name": "region", + "required": true, + "type": "" + }, + { + "default": 1, + "description": " *local-size* represents the side length of the region to be used for the normalization sum or number of channels depending on the strategy specified in the *region* parameter. For example, *local-size* equal 5 for the across strategy means application of sum across 5 adjacent channels.", + "name": "local-size", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Normalize", + "category": "Normalization", + "description": "**Short description**: *Normalize* layer performs l-p normalization of 1 of input blob.\n**Parameters**: *Normalize* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\no_{i} = \\sum_{i}^{H*W}\\frac{\\left ( n*C*H*W \\right )* scale}{\\sqrt{\\sum_{i=0}^{C*H*W}\\left ( n*C*H*W \\right )^{2}}}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *across_spatial* is a flag that denotes if normalization is performed over CHW or HW. For example, *across_spatial* equals 0 means that normalization is not shared across channels.", + "name": "across_spatial", + "required": true + }, + { + "default": 1, + "description": " *channel_shared* is a flag that denotes if scale parameters are shared across channels. For example, *channel_shared* equal 0 means that scale parameters are not shared across channels.", + "name": "channel_shared", + "required": true + }, + { + "default": 1, + "description": " *eps* is the epsilon used to avoid division by zero when normalizing the value. For example, *eps* equals 0.001 means that 0.001 is used if all the values in normalization are equal to zero.", + "name": "eps", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "NormalizeL2", + "category": "Normalization", + "inputs": [ + { "name": "data" }, + { "name": "axes" } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "attributes": [ + { + "name": "pad_value", + "type": "float32" + }, + { + "name": "pads_begin", + "type": "int32[]" + }, + { + "name": "pads_end", + "type": "int32[]" + }, + { + "name": "pad_mode" + } + ] + }, + { + "name": "Permute", + "category": "Shape", + "description": "**Short description**: *Permute* layer performs reordering of input blob dimensions.\n**Detailed description**: [Reference](http://caffe.help/manual/layers/tile.html)\n**Parameters**: *Permute* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Permute* layer performs reordering input blob. Source indexes and destination indexes are bound by formula:\n\\f[\nsrc\\_ind_{offset} = n * ordered[1] * ordered[2] * ordered[3] + (h * ordered[3] + w)\n\\f]\n\\f[\nn \\in ( 0, order[0] )\n\\f]\n\\f[\nh \\in ( 0, order[2] )\n\\f]\n\\f[\nw \\in ( 0, order[3] )\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "description": " *order* is the set of dimensions indexes for output blob. For example, *order* equal 0,2,3,1 means that the output blob has following dimensions: first dimension from the input blob, third dimension from the input blob, fourth dimension from the input blob, second dimension from the input blob.", + "name": "order", + "required": true, + "type": "int32[]" + } + ], + "support_level": "default" + }, + { + "name": "Pooling", + "category": "Pool", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html)\n**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#pool)\n**Parameters**: Specify pooling layer parameters in the `pooling_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n* For *max pool-method*:\n \\f[\n output_{j} = MAX\\{ x_{0}, ... x_{i}\\}\n \\f]\n* For *avg pool-method*:\n \\f[\n output_{j} = \\frac{\\sum_{i = 0}^{n}x_{i}}{n}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "attributes": [ + { + "default": [ + 1, + null + ], + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "required": true, + "type": "int32" + }, + { + "default": [ + 1, + null + ], + "name": "strides", + "type": "int32[]" + }, + { + "default": 1, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "required": true, + "type": "int32" + }, + { + "default": 0, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "required": true, + "type": "int32" + }, + { + "default": 0, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "required": true, + "type": "int32" + }, + { + "default": 0, + "name": "pad-r", + "type": "int32" + }, + { + "default": 0, + "name": "pad-b", + "type": "int32" + }, + { + "default": [ + 0, + null + ], + "name": "pads_begin", + "type": "int32[]" + }, + { + "default": [ + 0, + null + ], + "name": "pads_end", + "type": "int32[]" + }, + { + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "required": true, + "type": "int32" + }, + { + "default": "max", + "description": " *pool-method* is a type of pooling strategy for values.", + "name": "pool-method", + "required": true, + "type": "" + }, + { + "default": false, + "description": " *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is \"true\", zero-values in the padding are not used.", + "name": "exclude-pad", + "required": true, + "type": "boolean" + }, + { + "default": "ceil", + "description": " *rounding_type* is a type of rounding to be applied.", + "name": "rounding-type", + "required": true + } + ], + "support_level": "default" + }, + { + "name": "Power", + "description": "**Short description**: *Power* layer computes the output as (shift + scale * x) ^ power for each input element x.\n**Parameters**: Power layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\np = (shift + scale * x)^{power}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [], + "support_level": "default", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "PReLU", + "category": "Activation", + "description": "**Short description**: *PReLU* is the Parametric Rectifier Linear Unit. The difference from *ReLU* is that negative slopes can vary across channels.\n**Parameters**: *PReLU* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*PReLU* accepts one input with four dimensions. The produced blob has the same dimensions as input.\n*PReLU* does the following with the input blob:\n\\f[\no_{i} = max(0, x_{i}) + w_{i} * min(0,x_{i})\n\\f]\nwhere \\f$w_{i}\\f$ is from weights blob.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *channel_shared* shows if negative slope shared across channels or not.", + "name": "channel_shared", + "required": true, + "type": "int32" + }, + { + "description": " *filler_type* defines initialization type for negative slope.", + "name": "filler_type", + "required": true, + "type": "string" + }, + { + "default": 1, + "description": " *filler_value* defines the value in constant filler.", + "name": "filler_value", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *min(max)* defines the minimal(maximal) value in uniform filler.", + "name": "min(max)", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *mean* defines the mean value in Gaussian filler.", + "name": "mean", + "required": true, + "type": "int32" + } + ], + "support_level": "default", + "inputs": [ + { "name": "data" }, + { "name": "slope" } + ] + }, + { + "name": "PriorBox", + "description": "**Short description**: *PriorBox* layer generates prior boxes of specified sizes and aspect ratios across all dimensions.\n**Parameters**: *PriorBox* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**:\n*PriorBox* computes coordinates of prior boxes by following:\n1. First calculates *center_x* and *center_y* of prior box:\n \\f[\n W \\equiv Width \\quad Of \\quad Image\n \\f]\n \\f[\n H \\equiv Height \\quad Of \\quad Image\n \\f]\n * If step equals 0:\n \\f[\n center_x=(w+0.5)\n \\f]\n \\f[\n center_y=(h+0.5)\n \\f]\n * else:\n \\f[\n center_x=(w+offset)*step\n \\f]\n \\f[\n center_y=(h+offset)*step\n \\f]\n \\f[\n w \\subset \\left( 0, W \\right )\n \\f]\n \\f[\n h \\subset \\left( 0, H \\right )\n \\f]\n2. Then, for each \\f$ s \\subset \\left( 0, min_sizes \\right ) \\f$ calculates coordinates of priorboxes:\n \\f[\n xmin = \\frac{\\frac{center_x - s}{2}}{W}\n \\f]\n \\f[\n ymin = \\frac{\\frac{center_y - s}{2}}{H}\n \\f]\n \\f[\n xmax = \\frac{\\frac{center_x + s}{2}}{W}\n \\f]\n \\f[\n ymin = \\frac{\\frac{center_y + s}{2}}{H}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "name": "min_size", + "required": true, + "type": "float32" + }, + { + "name": "max_size", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *aspect_ratio* is a variance of aspect ratios. Duplicate values are ignored. For example, *aspect_ratio* equal 2.000000,3.000000 means that for the first box aspect_ratio is equal to 2 and for the second box - 3.", + "name": "aspect_ratio", + "required": true, + "type": "float32" + }, + { + "default": false, + "description": " *flip* is a flag that denotes that each *aspect_ratio* is duplicated and flipped. For example, *flip* equals 1 and *aspect_ratio* equals 3 mean that aspect_ratio is equal to 1/3.", + "name": "flip", + "required": true, + "type": "boolean" + }, + { + "default": false, + "description": " *clip* is a flag that denotes if each value in the output blob is within [0,1]. For example, *clip* equal 1 means that each value in the output blob is within [0,1].", + "name": "clip", + "required": true, + "type": "boolean" + }, + { + "description": " *step* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85.", + "name": "step", + "required": true, + "type": "float32" + }, + { + "default": 0.5, + "description": " *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "offset", + "required": true, + "type": "float32" + }, + { + "description": " *variance* denotes a variance of adjusting bounding boxes. For example, *variance* equals 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "variance", + "required": true, + "type": "float32[]" + }, + { + "default": 1, + "description": " *scale_all_sizes* is a flag that denotes type of inference. For example, *scale_all_sizes* equals 0 means that priorbox layer is inferd in MXNet-like manner. In particular, *max_size* parameter is ignored.", + "name": "scale_all_sizes", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "PriorBoxClustered", + "description": "**Short description**: *PriorBoxClustered* layer generates prior boxes of specified sizes.\n**Parameters**: *PriorBoxClustered* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*PriorBoxClustered* computes coordinates of prior boxes by following:\n1. Calculates the *center_x* and *center_y* of prior box:\n \\f[\n W \\equiv Width \\quad Of \\quad Image\n \\f]\n \\f[\n H \\equiv Height \\quad Of \\quad Image\n \\f]\n \\f[\n center_x=(w+offset)*step\n \\f]\n \\f[\n center_y=(h+offset)*step\n \\f]\n \\f[\n w \\subset \\left( 0, W \\right )\n \\f]\n \\f[\n h \\subset \\left( 0, H \\right )\n \\f]\n2. For each \\f$s \\subset \\left( 0, W \\right )\\f$ calculates the prior boxes coordinates:\n \\f[\n xmin = \\frac{center_x - \\frac{width_s}{2}}{W}\n \\f]\n\t\\f[\n\tymin = \\frac{center_y - \\frac{height_s}{2}}{H}\n\t\\f]\n\t\\f[\n\txmax = \\frac{center_x - \\frac{width_s}{2}}{W}\n\t\\f]\n\t\\f[\n\tymax = \\frac{center_y - \\frac{height_s}{2}}{H}\n\t\\f]\nIf *clip* is defined, the coordinates of prior boxes are recalculated with the formula:\n\\f$coordinate = \\min(\\max(coordinate,0), 1)\\f$\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n\n```", + "attributes": [ + { + "description": " *width* is a parameter that specifies desired boxes widths in pixels.", + "name": "width", + "required": true, + "type": "float32[]" + }, + { + "name": "height", + "required": true, + "type": "float32[]" + }, + { + "default": false, + "description": " *clip* is a flag that denotes if each value in the output blob is within [0,1]. For example, *clip* equal 1 means that each value in the output blob is within [0,1].", + "name": "clip", + "required": true, + "type": "boolean" + }, + { + "default": false, + "description": " *flip* is a flag that denotes whether the list of boxes is augmented with the flipped ones.", + "name": "flip", + "required": true, + "type": "boolean" + }, + { + "description": " *step* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85.", + "name": "step", + "required": true, + "type": "float32" + }, + { + "name": "step_w", + "required": true, + "type": "float32" + }, + { + "name": "step_h", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "offset", + "required": true, + "type": "float32" + }, + { + "description": " *variance* denotes a variance of adjusting bounding boxes. For example, *variance* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "variance", + "required": true, + "type": "float32[]" + }, + { + "description": " *img_h* specifies height of input image. These parameters are calculated unless provided explicitly.", + "name": "img_h", + "required": true, + "type": "float32" + }, + { + "name": "img_w", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "Proposal", + "category": "Layer", + "description": "**Short description**: *Proposal* layer performs filtering of only those bounding boxes and outputs with the highest confidence of prediction.\n**Parameters**: Proposal layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Proposal* layer accepts three inputs with four dimensions. The produced blob has two dimensions: first one equals *batch_size * post_nms_topn*.\n*Proposal* does the following with the input blob:\n1. Generates initial anchor boxes Left top corner of all boxes in (0, 0). Width and height of boxes are calculated from *base_size* with scale and ratio parameters\n2. For each point in the first input blob:\n * pins anchor boxes to the image according to the second input blob that contains four deltas for each box: for *x* and *y* of center, for *width* and for *height*\n * finds out score in the first input blob\n3. Filters out boxes with size less than *min_size*\n4. Sorts all proposals (*box*, *score*) by score from highest to lowest\n5. Takes top *pre_nms_topn* proposals\n6. Calculates intersections for boxes and filter out all with \\f$intersection/union > nms\\_thresh\\f$\n7. Takes top *post_nms_topn* proposals\n8. Returns top proposals\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *pre_nms_topn (post_nms_topn)* is the quantity of bounding boxes before (after) applying NMS operation. For example, *pre_nms_topn (post_nms_topn)* equal 15 means that the minimum (maximum) box size is 15.", + "name": "pre_nms_topn (post_nms_topn)", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.", + "name": "nms_thresh", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal 16 means that all boxes are analyzed with the slide 16.", + "name": "feat_stride", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.", + "name": "min_size", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *ratio* is the ratios for anchor generation.", + "name": "ratio", + "required": true, + "type": "float32[]" + }, + { + "default": 1, + "description": " *ratio* is the ratios for anchor generation.", + "name": "ratio", + "required": true, + "type": "float32[]" + }, + { + "default": 1, + "description": " *scale* is the scales for anchor generation.", + "name": "scale", + "required": true, + "type": "float32[]" + } + ], + "support_level": "default" + }, + { + "name": "PriorBox", + "inputs": [ + { "name": "output_size" }, + { "name": "image_size" } + ] + }, + { + "name": "PSROIPooling", + "category": "Pool", + "description": "**Short description**: *PSROIPooling* layer compute position-sensitive max pooling on regions of interest specified by input, takes as input N position-sensitive score maps and a list of R regions of interest.\n**Detailed description**: [Reference](https://arxiv.org/pdf/1703.06211.pdf)\n**Parameters**: *PSRoiPooling* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nThe output value for \\f$(i, j)\\f$-th bin is obtained by summation from one score map \\f$x_{i,j}\\f$ corresponding to that bin. In short, the difference from *RoIPooling* is that a general feature map \\f$x\\f$ is replaced by a specific positive-sensitive score map \\f$x_{i,j}\\f$.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " pooled output channel number", + "name": "output_dim", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " number of groups to encode position-sensitive score maps", + "name": "group_size", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling", + "name": "spatial_scale", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "Range", + "inputs": [ + { "name": "start" }, + { "name": "stop" }, + { "name": "step" } + ] + }, + { + "name": "RegionYolo", + "category": "Layer", + "description": "**Short description**: *RegionYolo* computes coordinates of regions with probability for each class.\n**Detailed description**: [Reference][p_yolo]\n**Parameters**: *RegionYolo* layer parameters should be specified as the `data` node, which is a child of the `layer` node.\n**Example**\n\n```html\n\n \n ... \n ... \n \n\n```", + "attributes": [ + { + "default": 1, + "description": " *coords* is num coordinates for each region", + "name": "coords", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *classes* is num classes for each region", + "name": "classes", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *num* is number of regions", + "name": "num", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *do_softmax* is a flag which specifies the method of infer", + "name": "do_softmax", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *anchors* coordinates regions", + "name": "anchors", + "required": true, + "type": "float32[]" + }, + { + "default": 1, + "description": " *mask* specifies which anchors to use", + "name": "mask", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *mask* specifies which anchors to use", + "name": "mask", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *axis* is the number of the dimension from which flattening is performed. For example, *axis* equals 1 means that flattening is started from the 1st dimension.", + "name": "axis", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *end_axis* is the number of the dimension on which flattening is ended. For example, *end_axis* equals -1 means that flattening is performed till the last dimension.", + "name": "end_axis", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "ReLU", + "category": "Activation", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/relu.html)\n**Detailed description**: [Reference](https://github.com/Kulbear/deep-learning-nano-foundation/wiki/ReLU-and-Softmax-Activation-Functions#rectified-linear-units)\n**Parameters**: *ReLU* layer parameters can be (not mandatory) specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\nY_{i}^{( l )} = max(0, Y_{i}^{( l - 1 )})\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 0, + "description": " *negative_slope* is a multiplier, which is used if the unit is not active (that is negative). For example, *negative_slope* equal 0.1 means that an inactive unit value would be multiplied by 0.1 and this is the [Leaky ReLU](https://keras.io/layers/advanced-activations/#leakyrelu). If *negative_slope* is equal to 0, this is the usual *ReLU*.", + "name": "negative_slope", + "required": true, + "type": "float64" + } + ], + "support_level": "default" + }, + { + "name": "ReorgYolo", + "category": "Layer", + "description": "**Short description**: *ReorgYolo* reorganizes input blob taking into account strides.\n**Detailed description**: [Reference][p_yolo]\n**Parameters**: *ReorgYolo* layer parameters should be specified as the `data` node, which is a child of the `layer` node.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *stride* is distance of cut throws in output blobs.", + "name": "stride", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Resample", + "category": "Layer", + "description": "Layer scales the input blob by the specified parameters.\n**Parameters**: Resample layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n​\n```", + "attributes": [ + { + "default": 1, + "description": "Parameter specifies type of blob interpolation.", + "name": "type", + "required": true + }, + { + "default": 1, + "description": " *antialias* is a flag that denotes whether to perform anti-aliasing.", + "name": "antialias", + "required": true + } + ], + "support_level": "default" + }, + { + "name": "ReduceMean", + "inputs": [ + { "name": "data" }, + { "name": "axes" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "description": "**Short description**: *Reshape* layer changes dimensions of the input blob according to the specified order. Input blob volume is equal to output blob volume, where volume is the product of dimensions.\n**Detailed description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/reshape.html)\n**Parameters**: *Reshape* layer parameters should be specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nIf you want to reshape input blob *BxCxHxW* into *Bx1x(C*H)xW*, the *dim* parameters of your layer should be:\n```html\n layer {\n name: \"reshape\"\n type: \"Reshape\"\n bottom: \"input\"\n top: \"output\"\n reshape_param {\n shape {\n dim: 0 # copy the dimension from below\n dim: 1\n dim: -1 # infer it from the other dimensions\n dim: 0\n }\n }\n }\n```\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *axis* is the number of the starting axis for reshape. For example, *axis* equal 1 means that *Reshape* replaces dimensions starting from the next after the first dimension.", + "name": "axis", + "required": true, + "type": "int32" + }, + { + "description": " *dim* is a set of numbers separated with comma, which denote the dimensions of output blob. For example, *dim* equal 88,1,71 means that output blob gets following dimensions: first dimension equals 88, second dimension equals 1, third dimension equals 71. For more information, refer to the **Description** block. If *dim* is equal to two numbers, it performs [flattening](http://caffe.berkeleyvision.org/tutorial/layers/flatten.html).", + "name": "dim", + "required": true, + "type": "int32[]" + }, + { + "default": 1, + "description": " *num_axes* is the number of dimensions to be replaced with a reshaped blob starting from the dimension number specified in *axis* property. For example, *num_axes* equal 2 means that 2 dimensions are replaced with reshaped blob.", + "name": "num_axes", + "required": true, + "type": "int32" + }, + { + "name": "special_zero", + "type": "boolean" + } + ], + "inputs": [ + { "name": "data" }, + { "name": "shape" } + ], + "support_level": "default" + }, + { + "name": "ROIPooling", + "category": "Layer", + "description": "**Short description**: It is a *pooling layer* with *max* pooling strategy (see *max* option in the *Pooling layer* parameters description). It is used over feature maps of non-uniform sizes and outputs another feature map of a fixed size.\n**Detailed description**: [deepsense.io reference](https://blog.deepsense.ai/region-of-interest-pooling-explained/)\n**Parameters**: Specify *ROIPooling* layer parameters in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\noutput_{j} = MAX\\{ x_{0}, ... x_{i}\\}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "attributes": [ + { + "default": 1, + "description": " *pooled_h* is a height of the ROI output feature map. For example, *pooled_h* equal 6 means that the height of the output of *ROIpooling* is 6.", + "name": "pooled_h", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *pooled_w* is a width of the ROI output feature map. For example, *pooled_w* equal 6 means that the width of the output of *ROIpooling* is 6.", + "name": "pooled_w", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *spatial_scale* is a ratio of the input feature map over the input image size.", + "name": "spatial_scale", + "required": true, + "type": "float32" + } + ], + "support_level": "default" + }, + { + "name": "ScaleShift", + "category": "Layer", + "description": "**Short description**: *ScaleShift* layer performs linear transformation of the input blobs. Weights denote scaling parameter, biases - a shift.\n**Parameters**: *ScaleShift* layer does not have additional parameters.\n**Mathematical Formulation**\n\\f[\no_{i} =\\gamma b_{i} + \\beta\n\\f]\n**Example**\n\n```\n\n ... \n ... \n\n```", + "attributes": [], + "support_level": "default" + }, + { + "name": "Sigmoid", + "category": "Activation" + }, + { + "name": "SimplerNMS", + "category": "Layer", + "description": "**Short description**: *SimplerNMS* layer performs filtering of bounding boxes and outputs only those with the highest confidence of prediction.\n**Parameters**: *SimplerNMS* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*SimplerNMS* accepts three inputs with four dimensions. Produced blob has two dimensions, the first one equals *post_nms_topn*.\n*SimplerNMS* does the following with the input blob:\n1. Generates initial anchor boxes. Left top corner of all boxes is (0, 0). Width and height of boxes are calculated based on scaled (according to the scale parameter) default widths and heights\n2. For each point in the first input blob:\n * pins anchor boxes to picture according to the second input blob, which contains four deltas for each box: for x and y of center, for width, and for height\n * finds out score in the first input blob\n3. Filters out boxes with size less than *min_bbox_size.*\n4. Sorts all proposals (*box, score*) by score from highest to lowest\n5. Takes top *pre_nms_topn* proposals\n6. Calculates intersections for boxes and filters out all with \\f$intersection/union > iou\\_threshold\\f$\n7. Takes top *post_nms_topn* proposals\n8. Returns top proposals\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *pre_nms_topn (post_nms_topn)* is the quantity of bounding boxes before (after) applying NMS operation. For example, *pre_nms_topn (post_nms_topn)* equals 15 means that the minimum (maximum) box size is 15.", + "name": "pre_nms_topn (post_nms_topn)", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *cls_threshold* is the minimum value of the proposal to be taken into consideration. For example, *cls_threshold* equal 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.", + "name": "cls_threshold", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *iou_threshold* is the minimum ratio of boxes overlapping to be taken into consideration. For example, *iou_threshold* equal 0.7 means that all boxes with overlapping ratio less than 0.7 are filtered out.", + "name": "iou_threshold", + "required": true, + "type": "float32" + }, + { + "default": 1, + "description": " *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal 16 means that all boxes are analyzed with the slide 16.", + "name": "feat_stride", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *min_bbox_size* is the minimum size of box to be taken into consideration. For example, *min_bbox_size* equal 35 means that all boxes with box size less than 35 are filtered out.", + "name": "min_bbox_size", + "required": true, + "type": "int32" + }, + { + "default": 1, + "description": " *scale* is array of scales for anchor boxes generating.", + "name": "scale", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Subtract", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "SoftMax", + "category": "Activation", + "description": "**Short description**: [Reference](https://github.com/Kulbear/deep-learning-nano-foundation/wiki/ReLU-and-Softmax-Activation-Functions#softmax)\n**Detailed description**: [Reference](http://cs231n.github.io/linear-classify/#softmax)\n**Parameters**: *SoftMax* layer parameters can be (not mandatory) specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\ny_{c} = \\frac{e^{Z_{c}}}{\\sum_{d=1}^{C}e^{Z_{d}}}\n\\f]\nwhere \\f$C\\f$ is a number of classes\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "description": " *axis* represents the axis of which the *SoftMax* is calculated. *axis* equal 1 is a default value.", + "name": "axis", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Split", + "category": "Tensor", + "description": "**Short description**: *Split* layer splits the input into several output groups. Group sizes are denoted by the number and the size of output ports.\n**Detailed description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/split.html)\n**Parameters**: *None*\n**Mathematical Formulation**\nSplits input blob among children. For example, blob is *BxC+CxHxW* and there are two children. Then, output blob is *BxCxHxW*.\n**Example**\n\n```html\n\n ... \n ... \n\n```", + "attributes": [ + { + "name": "axis", + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Squeeze", + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "axes" } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "inputs": [ + { "name": "data" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "stride" } + ] + }, + { + "name": "Swish", + "category": "Activation" + }, + { + "name": "TensorIterator", + "inputs": [ + { "name": "inputs", "type": "Tensor[]" } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "Tile", + "description": "**Short description**: *Tile* layer extends input blob with copies of data along specific axis.\n**Detailed description**: [Reference](http://caffe.help/manual/layers/tile.html)\n**Parameters**: *Tile* layer parameters should be specified as the `tile_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Tile* extends input blobs and filling in output blobs following rules:\n\\f[\nout_i=input_i[inner\\_dim*t]\n\\f]\n\\f[\nt \\in \\left ( 0, \\quad tiles \\right )\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "attributes": [ + { + "default": 1, + "description": " *axis* is the index of the axis to tile. For example, *axis* equals 3 means that fourth axis is used for tiling.", + "name": "axis", + "required": true, + "type": "int32" + }, + { + "description": " *tiles* is a size of the specified axis in the output blob. For example, *tiles* equal 88 means that output blob gets 88 copies of data from specified axis.", + "name": "tiles", + "required": true, + "type": "int32" + } + ], + "support_level": "default" + }, + { + "name": "Transpose", + "category": "Transform", + "inputs": [ + { "name": "arg" }, + { "name": "input_order" } + ] + }, + { + "name": "Unsqueeze", + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "axes" } + ] + } +] \ No newline at end of file diff --git a/openvino.js b/openvino.js new file mode 100644 index 00000000000..f158bab5a90 --- /dev/null +++ b/openvino.js @@ -0,0 +1,795 @@ + +import * as xml from './xml.js'; + +const openvino = {}; + +openvino.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'bin') { + const stream = context.stream; + const signature = [ 0x21, 0xA8, 0xEF, 0xBE, 0xAD, 0xDE ]; + if (signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return undefined; + } + if (stream.length > 4) { + const buffer = stream.peek(Math.min(256, stream.length)); + const signature = (buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24) >>> 0; + if (signature === 0x00000000 || signature === 0x00000001 || + signature === 0x01306B47 || signature === 0x000D4B38 || signature === 0x0002C056) { + return undefined; + } + for (let i = 0; i < buffer.length - 4; i++) { + const signature = (buffer[i] | buffer[i + 1] << 8 | buffer[i + 2] << 16 | buffer [i + 3] << 24) >>> 0; + if (signature === 0xdeadbeef) { + return undefined; + } + } + } + if (/^.*pytorch_model.*\.bin$/.test(identifier) || + /^.*group.+-shard.+of.+\.bin$/.test(identifier)) { + return undefined; + } + const identifiers = new Set([ 'config.bin', 'model.bin', '__model__.bin', 'weights.bin', 'programs.bin', 'best.bin', 'ncnn.bin' ]); + if (identifiers.has(identifier)) { + return undefined; + } + return 'openvino.bin'; + } + const tags = context.tags('xml'); + if (tags.has('net')) { + return 'openvino.xml'; + } + return undefined; + } + + async open(context, target) { + const identifier = context.identifier; + const base = identifier.substring(0, identifier.length - 4); + let stream = null; + let bin = null; + switch (target) { + case 'openvino.xml': { + stream = context.stream; + try { + const file = `${base}.bin`; + const content = await context.fetch(file); + bin = content.stream.peek(); + } catch (error) { + // continue regardless of error + } + break; + } + case 'openvino.bin': { + const file = `${base}.xml`; + const content = await context.fetch(file, null); + stream = content.stream; + bin = context.stream.peek(); + break; + } + default: { + throw new openvino.Error(`Unsupported OpenVINO format '${target}'.`); + } + } + const metadata = await context.metadata('openvino-metadata.json'); + let document = null; + try { + const reader = xml.TextReader.open(stream); + document = reader.read(); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new openvino.Error(`File format is not OpenVINO XML (${message.replace(/\.$/, '')}).`); + } + if (!document.documentElement || document.documentElement.localName != 'net') { + throw new openvino.Error('File format is not OpenVINO IR.'); + } + const element = document.documentElement; + const object = (element) => { + const obj = {}; + for (const attribute of element.attributes) { + obj[attribute.localName] = attribute.value; + } + return obj; + }; + const child = (parent, name) => { + const elements = parent.getElementsByTagName(name); + if (elements.length > 1) { + throw new openvino.Error(`Element '${parent.localName}' has multiple '${name}' elements.`); + } + return elements.length > 0 ? elements[0] : null; + }; + const children = (parent, name, element) => { + const list = child(parent, name); + return list ? list.getElementsByTagName(element) : []; + }; + const edges = (parent, name) => { + const map = {}; + for (const element of children(parent, name || 'edges', 'edge')) { + const fromLayer = element.getAttribute('from-layer'); + const fromPort = element.getAttribute('from-port'); + const toLayer = element.getAttribute('to-layer'); + const toPort = element.getAttribute('to-port'); + map[`${toLayer}:${toPort}`] = `${fromLayer}:${fromPort}`; + } + return map; + }; + const layers = (parent) => { + const ports = (parent, name) => { + return children(parent, name, 'port').map((element) => { + const port = object(element); + port.dims = element.getElementsByTagName('dim').map((dim) => parseInt(dim.textContent.trim(), 10)); + return port; + }); + }; + return children(parent, 'layers', 'layer').map((element) => { + const layer = object(element); + layer.input = ports(element, 'input'); + layer.output = ports(element, 'output'); + const data = child(element, 'data'); + const blobs = child(element, 'blobs'); + layer.data = !data ? {} : object(data); + layer.blobs = !blobs ? [] : blobs.getElementsByTagName('*').map((blob) => { + const obj = object(blob); + obj.name = blob.localName; + obj.offset = parseInt(obj.offset, 10); + obj.size = parseInt(obj.size, 10); + return obj; + }); + if (layer.type === 'TensorIterator') { + layer.back_edges = edges(element, 'back_edges'); + const body = child(element, 'body'); + if (body) { + layer.body = { + layers: layers(body), + edges: edges(body) + }; + } + const port_map = child(element, 'port_map'); + if (port_map) { + layer.port_map = { input: [], output: [] }; + for (const port of port_map.getElementsByTagName('*')) { + const item = object(port); + switch (port.localName) { + case 'input': layer.port_map.input.push(item); break; + case 'output': layer.port_map.output.push(item); break; + default: throw new openvino.Error(`Unsupported port local name '${port.localName}'.`); + } + } + } + } + return layer; + }); + }; + const net = object(element); + net.body = { + layers: layers(element), + edges: edges(element) + }; + return new openvino.Model(metadata, net, bin); + } +}; + +openvino.Model = class { + + constructor(metadata, net, bin) { + this.name = net.name || ''; + this.graphs = [ new openvino.Graph(metadata, net, bin) ]; + this.format = 'OpenVINO IR'; + } +}; + +openvino.Graph = class { + + constructor(metadata, net, bin) { + this.name = net.name || ''; + this.nodes = []; + this.inputs = []; + this.outputs = []; + const tensors = new Map(); + const values = new Map(); + values.map = (layer, precision, port, map) => { + const id = `${layer}:${port.id}`; + const name = map && map[id] ? map[id] : id; + if (name === '') { + throw new openvino.Error('Empty value name.'); + } + const shape = port.dims.length == 0 ? null : new openvino.TensorShape(port.dims); + if (!precision && values.has(name)) { + const value = values.get(name); + if (value.type && value.type.shape && value.type.shape.equals(shape)) { + return value; + } + } + const type = new openvino.TensorType(precision, shape); + let tensor = null; + if (tensors.has(id)) { + const blob = tensors.get(id); + const offset = blob.offset; + const size = blob.size; + const shape = new openvino.TensorShape(blob.shape); + const type = new openvino.TensorType(blob.precision || precision, shape); + const data = (bin && (offset + size) <= bin.length) ? bin.slice(offset, offset + size) : null; + tensor = new openvino.Tensor(type, data, 'Const'); + } + if (!values.has(name)) { + values.set(name, new openvino.Value(name, type, tensor)); + } else if (type && !type.equals(values.get(name).type)) { + throw new openvino.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const nodes = new Map(); + const constant = (layers, edges, back_edges) => { + back_edges = back_edges || {}; + for (const layer of layers) { + if (layer.type === 'Const' && + layer.input.length === 0 && layer.output.length === 1 && layer.blobs.length === 0 && + layer.data && layer.data.element_type !== undefined && layer.data.offset !== undefined && layer.data.size !== undefined) { + let precision = null; + switch (layer.data.element_type) { + case 'f16': precision = 'FP16'; break; + case 'f32': precision = 'FP32'; break; + case 'f64': precision = 'FP64'; break; + default: precision = layer.data.element_type.toUpperCase(); + } + const shape = layer.data.shape; + layer.blobs.push({ + name: 'value', + precision: precision, + offset: parseInt(layer.data.offset, 10), + size: parseInt(layer.data.size, 10), + shape: shape ? shape.split(',').map((dim) => parseInt(dim.trim(), 10)) : null + }); + layer.data = {}; + } + if (layer.type === 'Const' && layer.blobs.length === 1 && !layer.blobs[0].shape && + layer.input.length === 0 && layer.output.length === 1 && layer.output[0].dims) { + layer.blobs[0].shape = layer.output[0].dims; + } + } + const constants = new Map(); + for (const layer of layers) { + if (layer.type === 'Const' && layer.input.length === 0 && layer.output.length === 1) { + const from = `${layer.id}:${layer.output[0].id}`; + constants.set(from, { layer: layer, counter: 0 }); + } + } + for (const from of Object.values(edges)) { + if (constants.has(from)) { + constants.get(from).counter++; + } + } + if (back_edges) { + for (const from of Object.values(back_edges)) { + if (constants.has(from)) { + constants.get(from).counter++; + } + } + } + for (const [name, value] of constants) { + if (value.counter !== 1) { + constants.delete(name); + } + } + for (const layer of layers) { + if (layer.blobs.length === 0) { + for (let i = layer.input.length - 1; i > 0; i--) { + const input = layer.input[i]; + const to = `${layer.id}:${input.id}`; + const from = edges[to] || back_edges[to]; + if (!constants.has(from)) { + break; + } + const constLayer = constants.get(from).layer; + if (constLayer && Array.isArray(constLayer.blobs) && constLayer.blobs.length > 0) { + const [blob] = constLayer.blobs; + if (blob) { + blob.id = constLayer.name || constLayer.id; + layer.input[i].blob = blob; + constants.get(from).layer = null; + constants.get(from).delete = true; + } + } + } + } + } + return layers.filter((layer) => { + if (layer.type === 'Const' && layer.input.length === 0 && layer.output.length === 1) { + const from = `${layer.id}:${layer.output[0].id}`; + if (constants.has(from) && constants.get(from).delete) { + return false; + } + } + return true; + }); + }; + const body = net.body; + const layers = new Map(body.layers.map((entry) => [ entry.id, entry ])); + const ports = new Map(); + if (Array.isArray(net.input)) { + for (const input of net.input) { + const value = values.map('', input.precision, input); + const argument = new openvino.Argument(input.id, [ value ]); + this.inputs.push(argument); + ports.set(input.id, value); + } + } + if (Array.isArray(net.output)) { + for (const output of net.output) { + const value = values.map('', output.precision, output); + const argument = new openvino.Argument(output.id, [ value ]); + this.outputs.push(argument); + ports.set(output.id, value); + } + } + for (const layer of body.layers) { + for (const output of layer.output) { + if (!output.precision) { + output.precision = layer.precision; + } + } + } + if (net.port_map) { + for (const input of net.port_map.input) { + const external_port = net.input.find((v) => v.id === input.external_port_id); + const layer = layers.get(input.internal_layer_id); + if (input.internal_port_id === undefined) { + input.internal_port_id = ''; + layer.input.push({ + id: input.internal_port_id, + precision: layer.data.element_type, + dims: layer.data.shape.split(',') + }); + } + const internal_port = layer.input.find((v) => v.id === input.internal_port_id); + internal_port.precision = external_port.precision; + } + for (const output of net.port_map.output) { + const external_port = net.output.find((v) => v.id === output.external_port_id); + const layer = layers.get(output.internal_layer_id); + if (output.internal_port_id === undefined) { + output.internal_port_id = ''; + layer.output.push({ + id: output.internal_port_id, + precision: external_port.precision, + dims: external_port.dims + }); + } + } + } + const layer_list = constant(body.layers, body.edges); + for (const layer of layer_list) { + for (const input of layer.input) { + if (input.blob) { + tensors.set(`${layer.id}:${input.id}`, input.blob); + } + } + } + for (const layer of layer_list) { + for (const output of layer.output) { + values.map(layer.id, output.precision, output, null); + } + } + for (const layer of layer_list) { + const inputs = layer.input.map((input) => { + const to = `${layer.id}:${input.id}`; + if (body.edges[to]) { + const output = body.edges[to] ? body.edges[to].split(':') : []; + const [outputLayerId, outputId] = output; + const outputLayer = layers.get(outputLayerId); + if (outputLayer && outputId) { + const output = outputLayer.output.find((output) => output.id === outputId); + if (input && output) { + input.precision = output.precision; + } + } + } + return values.map(layer.id, input.precision || layer.precision, input, body.edges); + }); + const outputs = layer.output.map((output) => { + const precision = output && output.precision ? output.precision : layer && layer.precision ? layer.precision : null; + return values.map(layer.id, precision, output, null); + }); + const subgraph = Array.isArray(net.input) || Array.isArray(net.output); + if (!subgraph && (layer.type === 'Input' || layer.type === 'Parameter')) { + const name = layer.name || ''; + // precision is a part of OpenVINO IR layers of IR v6 and earlier + // in IR v7 and newer the port is no longer an attribute of the layer but of each output port + // IR input is not just a placeholder, it is conceptually the legitimate layer + // in order not to break compatibility with the overall approach + // with openvino.Argument for inputs and openvino.Node for outputs + // input openvino.Node would be stored as an optional attribute of openvino.Parameter + this.inputs.push(new openvino.Argument(name, outputs)); + } else { + const node = new openvino.Node(metadata, layer, inputs, outputs, bin); + nodes.set(layer.id, node); + } + } + this.nodes = Array.from(nodes.values()); + if (net.port_map) { + const createMapLayer = (obj) => { + const data = {}; + for (const [name, value] of Object.entries(obj)) { + if (name === 'external_port_id' || name === 'internal_layer_id' || name === 'internal_port_id') { + continue; + } + data[name] = value; + } + const layer = {}; + layer.type = '-'; + layer.data = data; + return layer; + }; + for (const input of net.port_map.input) { + const internal_port = layers.get(input.internal_layer_id).input.find((v) => v.id === input.internal_port_id); + const inputs = [ ports.get(input.external_port_id) ]; + const outputs = [ values.map(input.internal_layer_id, internal_port.precision, internal_port) ]; + const layer = createMapLayer(input); + this.nodes.push(new openvino.Node(metadata, layer, inputs, outputs)); + } + for (const output of net.port_map.output) { + const internal_port = layers.get(output.internal_layer_id).output.find((v) => v.id === output.internal_port_id); + const inputs = [ values.map(output.internal_layer_id, internal_port.precision, internal_port) ]; + const outputs = [ ports.get(output.external_port_id) ]; + const layer = createMapLayer(output); + this.nodes.push(new openvino.Node(metadata, layer, inputs, outputs)); + } + } + } +}; + +openvino.Node = class { + + constructor(metadata, layer, inputs, outputs, bin) { + this.name = layer.name || ''; + this.inputs = []; + this.outputs = []; + this.attributes = []; + const type = layer.type; + this.type = metadata.type(type) || { name: type }; + for (let i = 0; i < inputs.length;) { + const input = this.type && Array.isArray(this.type.inputs) && i < this.type.inputs.length ? this.type.inputs[i] : inputs.length === 1 ? { name: 'input' } : { name: i.toString() }; + const count = input.type === 'Tensor[]' ? inputs.length - i : 1; + const values = inputs.slice(i, i + count); + const argument = new openvino.Argument(input.name, values); + this.inputs.push(argument); + i += count; + } + for (let i = 0; i < outputs.length;) { + const output = this.type && Array.isArray(this.type.outputs) && i < this.type.outputs.length ? this.type.outputs[i] : outputs.length === 1 ? { name: 'output' } : { name: i.toString() }; + const count = output.type === 'Tensor[]' ? outputs.length - i : 1; + const values = outputs.slice(i, i + count); + const argument = new openvino.Argument(output.name, values); + this.outputs.push(argument); + i += count; + } + for (const [name, value] of Object.entries(layer.data)) { + const attribute = new openvino.Attribute(metadata.attribute(type, name), name, value); + this.attributes.push(attribute); + } + if (layer.type === 'TensorIterator') { + const graph = new openvino.Graph(metadata, layer, null); + const attribute = new openvino.Attribute({ type: 'graph' }, 'body', graph); + this.attributes.push(attribute); + } + for (const blob of layer.blobs || []) { + const name = blob.name; + const offset = blob.offset; + let data = (bin && (offset + blob.size) <= bin.length) ? bin.slice(offset, offset + blob.size) : null; + let dimensions = blob.shape || null; + const category = blob.kind || 'Blob'; + const id = blob.id || ''; + const precision = blob.precision || layer.precision; + let itemSize = undefined; + switch (precision) { + case 'BOOL': case 'BOOLEAN': itemSize = 1; break; + case 'I1': case 'U1': itemSize = 0.125; break; + case 'I4': case 'U4': itemSize = 0.5; break; + case 'I8': case 'U8': itemSize = 1; break; + case 'I16': case 'U16': case 'FP16': itemSize = 2; break; + case 'I32': case 'U32': case 'FP32': itemSize = 4; break; + case 'I64': case 'U64': case 'FP64': itemSize = 8; break; + default: throw new openvino.Error(`Unsupported data type size '${precision}'.`); + } + const weight = (name, precision, dimensions, data) => { + const shape = dimensions ? new openvino.TensorShape(dimensions) : null; + const type = new openvino.TensorType(precision, shape); + const tensor = new openvino.Tensor(type, data, category); + const value = new openvino.Value(id, null, tensor); + this.inputs.push(new openvino.Argument(name, [ value ])); + const size = Math.ceil(dimensions.reduce((a, b) => a * b, 1) * itemSize); + if (data && data.length !== size) { + return data.slice(size, data.length); + } + return null; + }; + if (itemSize) { + switch (`${type}:${name}`) { + case 'FullyConnected:weights': { + const outSize = parseInt(layer.data['out-size'], 10); + dimensions = [ layer.input[0].dims[1], outSize ]; + break; + } + case 'FullyConnected:biases': { + dimensions = [ parseInt(layer.data['out-size'], 10) ]; + break; + } + case 'Convolution:weights': + case 'Deconvolution:weights': { + /* eslint-disable prefer-destructuring */ + const c = this.inputs[0].value[0].type.shape.dimensions[1]; + /* eslint-enable prefer-destructuring */ + const group = parseInt(layer.data.group || '1', 10); + const kernel = layer.data['kernel-x'] !== undefined && layer.data['kernel-y'] !== undefined ? + [ parseInt(layer.data['kernel-x'], 10), parseInt(layer.data['kernel-y'], 10) ] : + layer.data.kernel.split(',').map((v) => parseInt(v.trim(), 10)); + const n = parseInt(layer.data.output, 10); + dimensions = [ Math.floor(c / group), n ].concat(kernel); + break; + } + case 'LSTMCell:weights': { + /* eslint-disable prefer-destructuring */ + const input_size = inputs[0].type.shape.dimensions[1]; + /* eslint-enable prefer-destructuring */ + const hidden_size = parseInt(layer.data.hidden_size, 10); + data = weight('W', precision, [ 4 * hidden_size, input_size ], data); + data = weight('R', precision, [ 4 * hidden_size, hidden_size ], data); + break; + } + case 'LSTMCell:biases': { + const hidden_size = parseInt(layer.data.hidden_size, 10); + data = weight('B', precision, [ 4 * hidden_size ], data); + break; + } + case 'GRUCell:weights': { + /* eslint-disable prefer-destructuring */ + const input_size = inputs[0].type.shape.dimensions[1]; + /* eslint-enable prefer-destructuring */ + const hidden_size = parseInt(layer.data.hidden_size, 10); + data = weight('W', precision, [ 3 * hidden_size, input_size ], data); + data = weight('R', precision, [ 3 * hidden_size, hidden_size ], data); + break; + } + case 'GRUCell:biases': { + const linear_before_reset = parseInt(layer.data.linear_before_reset, 10); + const hidden_size = parseInt(layer.data.hidden_size, 10); + dimensions = linear_before_reset ? [ 4 * hidden_size ] : [ 3 * hidden_size ]; + data = weight('B', precision, dimensions, data); + break; + } + case 'Convolution:biases': { + dimensions = [ parseInt(layer.data.output, 10) ]; + break; + } + case 'ScaleShift:weights': + case 'ScaleShift:biases': + case 'Normalize:weights': { + dimensions = [ layer.input[0].dims[1] ]; + break; + } + case 'PReLU:weights': { + dimensions = layer.data.channel_shared === '1' ? [ 1 ] : [ layer.input[0].dims[1] ]; + break; + } + case 'Const:custom': { + if (this.outputs.length > 0 && + this.outputs[0].value.length > 0 && + this.outputs[0].value[0].type && + this.outputs[0].value[0].type.shape && + this.outputs[0].value[0].type.shape.dimensions) { + dimensions = this.outputs[0].value[0].type.shape.dimensions; + } + break; + } + default: { + break; + } + } + } + if (data) { + weight(name, precision, dimensions, data); + } + } + } +}; + +openvino.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +openvino.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new openvino.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : type; + this.initializer = initializer || null; + } +}; + +openvino.Attribute = class { + + constructor(metadata, name, value) { + this.name = name; + this.value = value; + if (metadata && metadata.type !== undefined) { + this.type = metadata.type; + switch (metadata.type) { + case '': + case 'graph': + case 'string': + break; + case 'boolean': + if (value === '1' || value === 'true' || value === 'True') { + this.value = true; + } else if (value === '0' || value === 'false' || value === 'False') { + this.value = false; + } else { + throw new openvino.Error(`Unsupported attribute boolean value '${value}'.`); + } + break; + case 'int32': + case 'int64': { + const intValue = Number.parseInt(this.value, 10); + this.value = Number.isNaN(this.value - intValue) ? value : intValue; + break; + } + case 'float32': + case 'float64': { + const floatValue = Number.parseFloat(this.value); + this.value = Number.isNaN(this.value - floatValue) ? value : floatValue; + break; + } + case 'int32[]': + if (this.value.length > 2) { + let ints = []; + for (const entry of this.value.split(',')) { + const item = entry.trim(); + const intValue = Number.parseInt(item, 10); + if (Number.isNaN(item - intValue)) { + ints = null; + } else if (ints != null) { + ints.push(intValue); + } + } + if (ints != null) { + this.value = ints; + } + } + break; + case 'float32[]': + if (this.value.length > 2) { + let floats = []; + for (const entry of this.value.split(',')) { + const item = entry.trim(); + const floatValue = Number.parseFloat(item); + if (Number.isNaN(item - floatValue)) { + floats = null; + } else if (floats != null) { + floats.push(floatValue); + } + } + if (floats != null) { + this.value = floats; + } + } + break; + default: + throw new openvino.Error(`Unsupported attribute type '${metadata.type}'.`); + } + } + if (metadata && metadata.visible == false) { + this.visible = false; + } else if (metadata && metadata.default !== undefined) { + let defaultValue = metadata.default; + if (this.value == defaultValue) { + this.visible = false; + } else if (Array.isArray(this.value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < this.value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (this.value.every((item, index) => item == defaultValue[index])) { + this.visible = false; + } + } + } + } +}; + +openvino.Tensor = class { + + constructor(type, data, category) { + this.type = type; + this.values = data; + this.category = category; + } +}; + +openvino.TensorType = class { + + constructor(precision, shape) { + precision = precision ? precision.toLowerCase() : precision; + switch (precision) { + case 'f16': this.dataType = 'float16'; break; + case 'f32': this.dataType = 'float32'; break; + case 'f64': this.dataType = 'float64'; break; + case 'fp16': this.dataType = 'float16'; break; + case 'fp32': this.dataType = 'float32'; break; + case 'fp64': this.dataType = 'float64'; break; + case 'bf16': this.dataType = 'bfloat16'; break; + case 'i4': this.dataType = 'int4'; break; + case 'i8': this.dataType = 'int8'; break; + case 'i16': this.dataType = 'int16'; break; + case 'i32': this.dataType = 'int32'; break; + case 'i64': this.dataType = 'int64'; break; + case 'u1': this.dataType = 'boolean'; break; + case 'u4': this.dataType = 'uint4'; break; + case 'u8': this.dataType = 'uint8'; break; + case 'u16': this.dataType = 'uint16'; break; + case 'u32': this.dataType = 'uint32'; break; + case 'u64': this.dataType = 'uint64'; break; + case 'bool': this.dataType = 'boolean'; break; + case 'boolean': this.dataType = 'boolean'; break; + case 'bin': this.dataType = 'bit'; break; + case '': this.dataType = '?'; break; + case null: this.dataType = '?'; break; + default: throw new openvino.Error(`Unsupported precision '${JSON.stringify(precision)}'.`); + } + this.shape = shape; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && + ((this.shape === null && obj.shape === null) || this.shape && this.shape.equals(obj.shape)); + } + + toString() { + if (this.shape == null) { + return `${this.dataType}[?]`; + } + return this.dataType + this.shape.toString(); + } +}; + +openvino.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + equals(obj) { + return obj && Array.isArray(obj.dimensions) && + Array.isArray(this.dimensions) && this.dimensions.length === obj.dimensions.length + && obj.dimensions.every((value, index) => this.dimensions[index] === value); + } + + toString() { + if (!this.dimensions || this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.join(',')}]`; + } +}; + +openvino.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading OpenVINO model.'; + } +}; + +export const ModelFactory = openvino.ModelFactory; + diff --git a/paddle-metadata.json b/paddle-metadata.json new file mode 100644 index 00000000000..65b297fdd0f --- /dev/null +++ b/paddle-metadata.json @@ -0,0 +1,124 @@ +[ + { + "name": "batch_norm", + "category": "Normalization", + "attributes": [ + { "name": "momentum", "default": 0.8999999761581421 }, + { "name": "epsilon", "default": 0.000009999999747378752 }, + { "name": "fuse_with_relu", "default": false }, + { "name": "data_layout", "default": "NCHW" } + ] + }, + { + "name": "concat", + "category": "Tensor" + }, + { + "name": "conv2d", + "category": "Layer", + "attributes": [ + { "name": "workspace_size_MB", "default": 4096 }, + { "name": "fuse_residual_connection", "default": false }, + { "name": "fuse_eltwise", "default": false }, + { "name": "fuse_relu", "default": false }, + { "name": "data_format", "default": "AnyLayout" }, + { "name": "groups", "default": 1 }, + { "name": "paddings", "default": [ 0, 0 ] }, + { "name": "dilations", "default": [ 1, 1 ] }, + { "name": "strides", "default": [ 1, 1 ] } + ] + }, + { + "name": "conv2d_transpose", + "category": "Layer" + }, + { + "name": "depthwise_conv2d", + "category": "Layer", + "attributes": [ + { "name": "workspace_size_MB", "default": 4096 }, + { "name": "fuse_residual_connection", "default": false }, + { "name": "data_format", "default": "AnyLayout" }, + { "name": "groups", "default": 1 }, + { "name": "fuse_relu", "default": false } + ] + }, + { + "name": "elementwise_add", + "attributes": [ + { "name": "axis", "default": -1 } + ] + }, + { + "name": "lrn", + "category": "Normalization", + "attributes": [ + { "name": "alpha", "default": 0.00009999999747378752 }, + { "name": "beta", "default": 0.75 }, + { "name": "k", "default": 1 } + ] + }, + { + "name": "pad2d", + "category": "Tensor" + }, + { + "name": "pool2d", + "category": "Pool", + "attributes": [ + { "name": "data_format", "default": "AnyLayout" }, + { "name": "ceil_mode", "default": false }, + { "name": "global_pooling", "default": false }, + { "name": "exclusive", "default": true }, + { "name": "pooling_type", "default": "max" }, + { "name": "paddings", "default": [ 0, 0 ] } + ] + }, + { + "name": "hard_swish", + "category": "Activation" + }, + { + "name": "hard_sigmoid", + "category": "Activation" + }, + { + "name": "relu", + "category": "Activation" + }, + { + "name": "sigmoid", + "category": "Activation" + }, + { + "name": "reshape", + "category": "Shape" + }, + { + "name": "reshape2", + "category": "Shape" + }, + { + "name": "scale", + "category": "Layer" + }, + { + "name": "rnn", + "category": "Layer" + }, + { + "name": "transpose", + "category": "Transform" + }, + { + "name": "transpose2", + "category": "Transform" + }, + { + "name": "softmax", + "category": "Activation", + "attributes": [ + { "name": "data_format", "default": "AnyLayout" } + ] + } +] \ No newline at end of file diff --git a/paddle-proto.js b/paddle-proto.js new file mode 100644 index 00000000000..e298cc15fbd --- /dev/null +++ b/paddle-proto.js @@ -0,0 +1,1680 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('paddle'); + +$root.paddle = {}; + +$root.paddle.framework = {}; + +$root.paddle.framework.proto = {}; + +$root.paddle.framework.proto.Version = class Version { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.Version(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.Version(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.paddle.framework.proto.Version.prototype.version = protobuf.Int64.create(0); + +$root.paddle.framework.proto.AttrType = { + "INT": 0, + "FLOAT": 1, + "STRING": 2, + "INTS": 3, + "FLOATS": 4, + "STRINGS": 5, + "BOOLEAN": 6, + "BOOLEANS": 7, + "BLOCK": 8, + "LONG": 9, + "BLOCKS": 10, + "LONGS": 11, + "FLOAT64S": 12, + "VAR": 13, + "VARS": 14, + "FLOAT64": 15, + "SCALAR": 16, + "SCALARS": 17 +}; + +$root.paddle.framework.proto.Complex = class Complex { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.Complex(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.r = reader.double(); + break; + case 2: + message.i = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'r')) { + throw new Error("Excepted 'r'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'i')) { + throw new Error("Excepted 'i'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.Complex(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "r": + message.r = reader.double(); + break; + case "i": + message.i = reader.double(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "r")) { + throw new Error("Excepted 'r'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "i")) { + throw new Error("Excepted 'i'."); + } + return message; + } +}; + +$root.paddle.framework.proto.Complex.prototype.r = 0; +$root.paddle.framework.proto.Complex.prototype.i = 0; + +$root.paddle.framework.proto.Scalar = class Scalar { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.Scalar(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.b = reader.bool(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.r = reader.double(); + break; + case 5: + message.c = $root.paddle.framework.proto.Complex.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.Scalar(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.paddle.framework.proto.Scalar.Type); + break; + case "b": + message.b = reader.bool(); + break; + case "i": + message.i = reader.int64(); + break; + case "r": + message.r = reader.double(); + break; + case "c": + message.c = $root.paddle.framework.proto.Complex.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.Scalar.prototype.type = 1; +$root.paddle.framework.proto.Scalar.prototype.b = false; +$root.paddle.framework.proto.Scalar.prototype.i = protobuf.Int64.create(0); +$root.paddle.framework.proto.Scalar.prototype.r = 0; +$root.paddle.framework.proto.Scalar.prototype.c = null; + +$root.paddle.framework.proto.Scalar.Type = { + "BOOLEAN": 1, + "LONG": 2, + "FLOAT64": 3, + "COMPLEX128": 4 +}; + +$root.paddle.framework.proto.OpDesc = class OpDesc { + + constructor() { + this.inputs = []; + this.outputs = []; + this.attrs = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.type = reader.string(); + break; + case 1: + message.inputs.push($root.paddle.framework.proto.OpDesc.Var.decode(reader, reader.uint32())); + break; + case 2: + message.outputs.push($root.paddle.framework.proto.OpDesc.Var.decode(reader, reader.uint32())); + break; + case 4: + message.attrs.push($root.paddle.framework.proto.OpDesc.Attr.decode(reader, reader.uint32())); + break; + case 5: + message.is_target = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "inputs": + message.inputs.push($root.paddle.framework.proto.OpDesc.Var.decodeText(reader)); + break; + case "outputs": + message.outputs.push($root.paddle.framework.proto.OpDesc.Var.decodeText(reader)); + break; + case "attrs": + message.attrs.push($root.paddle.framework.proto.OpDesc.Attr.decodeText(reader)); + break; + case "is_target": + message.is_target = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpDesc.prototype.type = ""; +$root.paddle.framework.proto.OpDesc.prototype.is_target = false; + +$root.paddle.framework.proto.OpDesc.Attr = class Attr { + + constructor() { + this.ints = []; + this.floats = []; + this.strings = []; + this.bools = []; + this.blocks_idx = []; + this.longs = []; + this.float64s = []; + this.vars_name = []; + this.scalars = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpDesc.Attr(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + case 3: + message.i = reader.int32(); + break; + case 4: + message.f = reader.float(); + break; + case 5: + message.s = reader.string(); + break; + case 6: + message.ints = reader.array(message.ints, () => reader.int32(), tag); + break; + case 7: + message.floats = reader.floats(message.floats, tag); + break; + case 8: + message.strings.push(reader.string()); + break; + case 10: + message.b = reader.bool(); + break; + case 11: + message.bools = reader.array(message.bools, () => reader.bool(), tag); + break; + case 12: + message.block_idx = reader.int32(); + break; + case 13: + message.l = reader.int64(); + break; + case 14: + message.blocks_idx = reader.array(message.blocks_idx, () => reader.int32(), tag); + break; + case 15: + message.longs = reader.array(message.longs, () => reader.int64(), tag); + break; + case 16: + message.float64s = reader.doubles(message.float64s, tag); + break; + case 17: + message.var_name = reader.string(); + break; + case 18: + message.vars_name.push(reader.string()); + break; + case 19: + message.float64 = reader.double(); + break; + case 20: + message.scalar = $root.paddle.framework.proto.Scalar.decode(reader, reader.uint32()); + break; + case 21: + message.scalars.push($root.paddle.framework.proto.Scalar.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpDesc.Attr(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.enum($root.paddle.framework.proto.AttrType); + break; + case "i": + message.i = reader.int32(); + break; + case "f": + message.f = reader.float(); + break; + case "s": + message.s = reader.string(); + break; + case "ints": + reader.array(message.ints, () => reader.int32()); + break; + case "floats": + reader.array(message.floats, () => reader.float()); + break; + case "strings": + reader.array(message.strings, () => reader.string()); + break; + case "b": + message.b = reader.bool(); + break; + case "bools": + reader.array(message.bools, () => reader.bool()); + break; + case "block_idx": + message.block_idx = reader.int32(); + break; + case "l": + message.l = reader.int64(); + break; + case "blocks_idx": + reader.array(message.blocks_idx, () => reader.int32()); + break; + case "longs": + reader.array(message.longs, () => reader.int64()); + break; + case "float64s": + reader.array(message.float64s, () => reader.double()); + break; + case "var_name": + message.var_name = reader.string(); + break; + case "vars_name": + reader.array(message.vars_name, () => reader.string()); + break; + case "float64": + message.float64 = reader.double(); + break; + case "scalar": + message.scalar = $root.paddle.framework.proto.Scalar.decodeText(reader); + break; + case "scalars": + message.scalars.push($root.paddle.framework.proto.Scalar.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpDesc.Attr.prototype.name = ""; +$root.paddle.framework.proto.OpDesc.Attr.prototype.type = 0; +$root.paddle.framework.proto.OpDesc.Attr.prototype.i = 0; +$root.paddle.framework.proto.OpDesc.Attr.prototype.f = 0; +$root.paddle.framework.proto.OpDesc.Attr.prototype.s = ""; +$root.paddle.framework.proto.OpDesc.Attr.prototype.b = false; +$root.paddle.framework.proto.OpDesc.Attr.prototype.block_idx = 0; +$root.paddle.framework.proto.OpDesc.Attr.prototype.l = protobuf.Int64.create(0); +$root.paddle.framework.proto.OpDesc.Attr.prototype.var_name = ""; +$root.paddle.framework.proto.OpDesc.Attr.prototype.float64 = 0; +$root.paddle.framework.proto.OpDesc.Attr.prototype.scalar = null; + +$root.paddle.framework.proto.OpDesc.Var = class Var { + + constructor() { + this["arguments"] = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpDesc.Var(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.parameter = reader.string(); + break; + case 2: + message["arguments"].push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'parameter')) { + throw new Error("Excepted 'parameter'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpDesc.Var(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "parameter": + message.parameter = reader.string(); + break; + case "arguments": + reader.array(message["arguments"], () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "parameter")) { + throw new Error("Excepted 'parameter'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpDesc.Var.prototype.parameter = ""; + +$root.paddle.framework.proto.OpProto = class OpProto { + + constructor() { + this.inputs = []; + this.outputs = []; + this.attrs = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.string(); + break; + case 2: + message.inputs.push($root.paddle.framework.proto.OpProto.Var.decode(reader, reader.uint32())); + break; + case 3: + message.outputs.push($root.paddle.framework.proto.OpProto.Var.decode(reader, reader.uint32())); + break; + case 4: + message.attrs.push($root.paddle.framework.proto.OpProto.Attr.decode(reader, reader.uint32())); + break; + case 5: + message.comment = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'comment')) { + throw new Error("Excepted 'comment'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "inputs": + message.inputs.push($root.paddle.framework.proto.OpProto.Var.decodeText(reader)); + break; + case "outputs": + message.outputs.push($root.paddle.framework.proto.OpProto.Var.decodeText(reader)); + break; + case "attrs": + message.attrs.push($root.paddle.framework.proto.OpProto.Attr.decodeText(reader)); + break; + case "comment": + message.comment = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "comment")) { + throw new Error("Excepted 'comment'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpProto.prototype.type = ""; +$root.paddle.framework.proto.OpProto.prototype.comment = ""; + +$root.paddle.framework.proto.OpProto.Var = class Var { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpProto.Var(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.comment = reader.string(); + break; + case 3: + message.duplicable = reader.bool(); + break; + case 4: + message.intermediate = reader.bool(); + break; + case 5: + message.dispensable = reader.bool(); + break; + case 6: + message.extra = reader.bool(); + break; + case 7: + message.quant = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'comment')) { + throw new Error("Excepted 'comment'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpProto.Var(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "comment": + message.comment = reader.string(); + break; + case "duplicable": + message.duplicable = reader.bool(); + break; + case "intermediate": + message.intermediate = reader.bool(); + break; + case "dispensable": + message.dispensable = reader.bool(); + break; + case "extra": + message.extra = reader.bool(); + break; + case "quant": + message.quant = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "comment")) { + throw new Error("Excepted 'comment'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpProto.Var.prototype.name = ""; +$root.paddle.framework.proto.OpProto.Var.prototype.comment = ""; +$root.paddle.framework.proto.OpProto.Var.prototype.duplicable = false; +$root.paddle.framework.proto.OpProto.Var.prototype.intermediate = false; +$root.paddle.framework.proto.OpProto.Var.prototype.dispensable = false; +$root.paddle.framework.proto.OpProto.Var.prototype.extra = false; +$root.paddle.framework.proto.OpProto.Var.prototype.quant = false; + +$root.paddle.framework.proto.OpProto.Attr = class Attr { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpProto.Attr(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + case 3: + message.comment = reader.string(); + break; + case 4: + message.generated = reader.bool(); + break; + case 5: + message.extra = reader.bool(); + break; + case 6: + message.quant = reader.bool(); + break; + case 7: + message.support_tensor = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'comment')) { + throw new Error("Excepted 'comment'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpProto.Attr(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.enum($root.paddle.framework.proto.AttrType); + break; + case "comment": + message.comment = reader.string(); + break; + case "generated": + message.generated = reader.bool(); + break; + case "extra": + message.extra = reader.bool(); + break; + case "quant": + message.quant = reader.bool(); + break; + case "support_tensor": + message.support_tensor = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "comment")) { + throw new Error("Excepted 'comment'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpProto.Attr.prototype.name = ""; +$root.paddle.framework.proto.OpProto.Attr.prototype.type = 0; +$root.paddle.framework.proto.OpProto.Attr.prototype.comment = ""; +$root.paddle.framework.proto.OpProto.Attr.prototype.generated = false; +$root.paddle.framework.proto.OpProto.Attr.prototype.extra = false; +$root.paddle.framework.proto.OpProto.Attr.prototype.quant = false; +$root.paddle.framework.proto.OpProto.Attr.prototype.support_tensor = false; + +$root.paddle.framework.proto.VarType = class VarType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.selected_rows = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 3: + message.lod_tensor = $root.paddle.framework.proto.VarType.LoDTensorDesc.decode(reader, reader.uint32()); + break; + case 4: + message.tensor_array = $root.paddle.framework.proto.VarType.LoDTensorArrayDesc.decode(reader, reader.uint32()); + break; + case 5: + message.reader = $root.paddle.framework.proto.VarType.ReaderDesc.decode(reader, reader.uint32()); + break; + case 7: + message.tuple = $root.paddle.framework.proto.VarType.Tuple.decode(reader, reader.uint32()); + break; + case 8: + message.string = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 9: + message.strings = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 10: + message.vocab = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 11: + message.sparse_coo = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 12: + message.sparse_csr = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.enum($root.paddle.framework.proto.VarType.Type); + break; + case "selected_rows": + message.selected_rows = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "lod_tensor": + message.lod_tensor = $root.paddle.framework.proto.VarType.LoDTensorDesc.decodeText(reader); + break; + case "tensor_array": + message.tensor_array = $root.paddle.framework.proto.VarType.LoDTensorArrayDesc.decodeText(reader); + break; + case "reader": + message.reader = $root.paddle.framework.proto.VarType.ReaderDesc.decodeText(reader); + break; + case "tuple": + message.tuple = $root.paddle.framework.proto.VarType.Tuple.decodeText(reader); + break; + case "string": + message.string = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "strings": + message.strings = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "vocab": + message.vocab = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "sparse_coo": + message.sparse_coo = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "sparse_csr": + message.sparse_csr = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarType.prototype.type = 0; +$root.paddle.framework.proto.VarType.prototype.selected_rows = null; +$root.paddle.framework.proto.VarType.prototype.lod_tensor = null; +$root.paddle.framework.proto.VarType.prototype.tensor_array = null; +$root.paddle.framework.proto.VarType.prototype.reader = null; +$root.paddle.framework.proto.VarType.prototype.tuple = null; +$root.paddle.framework.proto.VarType.prototype.string = null; +$root.paddle.framework.proto.VarType.prototype.strings = null; +$root.paddle.framework.proto.VarType.prototype.vocab = null; +$root.paddle.framework.proto.VarType.prototype.sparse_coo = null; +$root.paddle.framework.proto.VarType.prototype.sparse_csr = null; + +$root.paddle.framework.proto.VarType.Type = { + "BOOL": 0, + "INT16": 1, + "INT32": 2, + "INT64": 3, + "FP16": 4, + "FP32": 5, + "FP64": 6, + "SIZE_T": 19, + "UINT8": 20, + "INT8": 21, + "BF16": 22, + "COMPLEX64": 23, + "COMPLEX128": 24, + "LOD_TENSOR": 7, + "SELECTED_ROWS": 8, + "FEED_MINIBATCH": 9, + "FETCH_LIST": 10, + "STEP_SCOPES": 11, + "LOD_RANK_TABLE": 12, + "LOD_TENSOR_ARRAY": 13, + "PLACE_LIST": 14, + "READER": 15, + "RAW": 17, + "TUPLE": 18, + "STRING": 25, + "STRINGS": 26, + "VOCAB": 27, + "FEED_LIST": 28, + "PSTRING": 29, + "SPARSE_COO": 30, + "SPARSE_CSR": 31 +}; + +$root.paddle.framework.proto.VarType.TensorDesc = class TensorDesc { + + constructor() { + this.dims = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType.TensorDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data_type = reader.int32(); + break; + case 2: + message.dims = reader.array(message.dims, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'data_type')) { + throw new Error("Excepted 'data_type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType.TensorDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "data_type": + message.data_type = reader.enum($root.paddle.framework.proto.VarType.Type); + break; + case "dims": + reader.array(message.dims, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "data_type")) { + throw new Error("Excepted 'data_type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarType.TensorDesc.prototype.data_type = 0; + +$root.paddle.framework.proto.VarType.LoDTensorDesc = class LoDTensorDesc { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType.LoDTensorDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 2: + message.lod_level = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'tensor')) { + throw new Error("Excepted 'tensor'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType.LoDTensorDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor": + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "lod_level": + message.lod_level = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "tensor")) { + throw new Error("Excepted 'tensor'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarType.LoDTensorDesc.prototype.tensor = null; +$root.paddle.framework.proto.VarType.LoDTensorDesc.prototype.lod_level = 0; + +$root.paddle.framework.proto.VarType.LoDTensorArrayDesc = class LoDTensorArrayDesc { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType.LoDTensorArrayDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 2: + message.lod_level = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'tensor')) { + throw new Error("Excepted 'tensor'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType.LoDTensorArrayDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor": + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decodeText(reader); + break; + case "lod_level": + message.lod_level = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "tensor")) { + throw new Error("Excepted 'tensor'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarType.LoDTensorArrayDesc.prototype.tensor = null; +$root.paddle.framework.proto.VarType.LoDTensorArrayDesc.prototype.lod_level = 0; + +$root.paddle.framework.proto.VarType.ReaderDesc = class ReaderDesc { + + constructor() { + this.lod_tensor = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType.ReaderDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lod_tensor.push($root.paddle.framework.proto.VarType.LoDTensorDesc.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType.ReaderDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "lod_tensor": + message.lod_tensor.push($root.paddle.framework.proto.VarType.LoDTensorDesc.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.paddle.framework.proto.VarType.Tuple = class Tuple { + + constructor() { + this.element_type = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarType.Tuple(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.element_type = reader.array(message.element_type, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarType.Tuple(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "element_type": + reader.array(message.element_type, () => reader.enum($root.paddle.framework.proto.VarType.Type)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.paddle.framework.proto.VarDesc = class VarDesc { + + constructor() { + this.attrs = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = $root.paddle.framework.proto.VarType.decode(reader, reader.uint32()); + break; + case 3: + message.persistable = reader.bool(); + break; + case 4: + message.need_check_feed = reader.bool(); + break; + case 5: + message.is_parameter = reader.bool(); + break; + case 6: + message.stop_gradient = reader.bool(); + break; + case 7: + message.attrs.push($root.paddle.framework.proto.VarDesc.Attr.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = $root.paddle.framework.proto.VarType.decodeText(reader); + break; + case "persistable": + message.persistable = reader.bool(); + break; + case "need_check_feed": + message.need_check_feed = reader.bool(); + break; + case "is_parameter": + message.is_parameter = reader.bool(); + break; + case "stop_gradient": + message.stop_gradient = reader.bool(); + break; + case "attrs": + message.attrs.push($root.paddle.framework.proto.VarDesc.Attr.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarDesc.prototype.name = ""; +$root.paddle.framework.proto.VarDesc.prototype.type = null; +$root.paddle.framework.proto.VarDesc.prototype.persistable = false; +$root.paddle.framework.proto.VarDesc.prototype.need_check_feed = false; +$root.paddle.framework.proto.VarDesc.prototype.is_parameter = false; +$root.paddle.framework.proto.VarDesc.prototype.stop_gradient = false; + +$root.paddle.framework.proto.VarDesc.Attr = class Attr { + + constructor() { + this.ints = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.VarDesc.Attr(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + case 3: + message.i = reader.int32(); + break; + case 4: + message.s = reader.string(); + break; + case 5: + message.ints = reader.array(message.ints, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'name')) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'type')) { + throw new Error("Excepted 'type'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.VarDesc.Attr(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.enum($root.paddle.framework.proto.AttrType); + break; + case "i": + message.i = reader.int32(); + break; + case "s": + message.s = reader.string(); + break; + case "ints": + reader.array(message.ints, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "name")) { + throw new Error("Excepted 'name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "type")) { + throw new Error("Excepted 'type'."); + } + return message; + } +}; + +$root.paddle.framework.proto.VarDesc.Attr.prototype.name = ""; +$root.paddle.framework.proto.VarDesc.Attr.prototype.type = 0; +$root.paddle.framework.proto.VarDesc.Attr.prototype.i = 0; +$root.paddle.framework.proto.VarDesc.Attr.prototype.s = ""; + +$root.paddle.framework.proto.BlockDesc = class BlockDesc { + + constructor() { + this.vars = []; + this.ops = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.BlockDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.idx = reader.int32(); + break; + case 2: + message.parent_idx = reader.int32(); + break; + case 3: + message.vars.push($root.paddle.framework.proto.VarDesc.decode(reader, reader.uint32())); + break; + case 4: + message.ops.push($root.paddle.framework.proto.OpDesc.decode(reader, reader.uint32())); + break; + case 5: + message.forward_block_idx = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'idx')) { + throw new Error("Excepted 'idx'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'parent_idx')) { + throw new Error("Excepted 'parent_idx'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.BlockDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "idx": + message.idx = reader.int32(); + break; + case "parent_idx": + message.parent_idx = reader.int32(); + break; + case "vars": + message.vars.push($root.paddle.framework.proto.VarDesc.decodeText(reader)); + break; + case "ops": + message.ops.push($root.paddle.framework.proto.OpDesc.decodeText(reader)); + break; + case "forward_block_idx": + message.forward_block_idx = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "idx")) { + throw new Error("Excepted 'idx'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "parent_idx")) { + throw new Error("Excepted 'parent_idx'."); + } + return message; + } +}; + +$root.paddle.framework.proto.BlockDesc.prototype.idx = 0; +$root.paddle.framework.proto.BlockDesc.prototype.parent_idx = 0; +$root.paddle.framework.proto.BlockDesc.prototype.forward_block_idx = -1; + +$root.paddle.framework.proto.OpVersion = class OpVersion { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpVersion(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'version')) { + throw new Error("Excepted 'version'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpVersion(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "version")) { + throw new Error("Excepted 'version'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpVersion.prototype.version = 0; + +$root.paddle.framework.proto.OpVersionMap = class OpVersionMap { + + constructor() { + this.pair = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpVersionMap(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair.push($root.paddle.framework.proto.OpVersionMap.OpVersionPair.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpVersionMap(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pair": + message.pair.push($root.paddle.framework.proto.OpVersionMap.OpVersionPair.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.paddle.framework.proto.OpVersionMap.OpVersionPair = class OpVersionPair { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.OpVersionMap.OpVersionPair(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op_name = reader.string(); + break; + case 2: + message.op_version = $root.paddle.framework.proto.OpVersion.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, 'op_name')) { + throw new Error("Excepted 'op_name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, 'op_version')) { + throw new Error("Excepted 'op_version'."); + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.OpVersionMap.OpVersionPair(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "op_name": + message.op_name = reader.string(); + break; + case "op_version": + message.op_version = $root.paddle.framework.proto.OpVersion.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + if (!Object.prototype.hasOwnProperty.call(message, "op_name")) { + throw new Error("Excepted 'op_name'."); + } + if (!Object.prototype.hasOwnProperty.call(message, "op_version")) { + throw new Error("Excepted 'op_version'."); + } + return message; + } +}; + +$root.paddle.framework.proto.OpVersionMap.OpVersionPair.prototype.op_name = ""; +$root.paddle.framework.proto.OpVersionMap.OpVersionPair.prototype.op_version = null; + +$root.paddle.framework.proto.ProgramDesc = class ProgramDesc { + + constructor() { + this.blocks = []; + } + + static decode(reader, length) { + const message = new $root.paddle.framework.proto.ProgramDesc(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blocks.push($root.paddle.framework.proto.BlockDesc.decode(reader, reader.uint32())); + break; + case 4: + message.version = $root.paddle.framework.proto.Version.decode(reader, reader.uint32()); + break; + case 5: + message.op_version_map = $root.paddle.framework.proto.OpVersionMap.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.paddle.framework.proto.ProgramDesc(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "blocks": + message.blocks.push($root.paddle.framework.proto.BlockDesc.decodeText(reader)); + break; + case "version": + message.version = $root.paddle.framework.proto.Version.decodeText(reader); + break; + case "op_version_map": + message.op_version_map = $root.paddle.framework.proto.OpVersionMap.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.paddle.framework.proto.ProgramDesc.prototype.version = null; +$root.paddle.framework.proto.ProgramDesc.prototype.op_version_map = null; diff --git a/paddle-schema.js b/paddle-schema.js new file mode 100644 index 00000000000..3d3d52267ac --- /dev/null +++ b/paddle-schema.js @@ -0,0 +1,536 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('paddlelite'); + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.AttrType = { + INT: 0, + FLOAT: 1, + STRING: 2, + INTS: 3, + FLOATS: 4, + STRINGS: 5, + BOOLEAN: 6, + BOOLEANS: 7, + BLOCK: 8, + LONG: 9, + BLOCKS: 10, + LONGS: 11, + FLOAT64S: 12, + VAR: 13, + VARS: 14, + FLOAT64: 15 +}; + +$root.paddle.lite.fbs.proto.Version = class Version { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.Version(); + $.version = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.Version(); + $.version = reader.value(json.version, 0); + return $; + } +}; + +$root.paddle.lite.fbs.proto.OpDesc = class OpDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc(); + $.type = reader.string_(position, 4, null); + $.inputs = reader.tableArray(position, 6, $root.paddle.lite.fbs.proto.OpDesc_.Var.decode); + $.outputs = reader.tableArray(position, 8, $root.paddle.lite.fbs.proto.OpDesc_.Var.decode); + $.attrs = reader.tableArray(position, 10, $root.paddle.lite.fbs.proto.OpDesc_.Attr.decode); + $.is_target = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc(); + $.type = reader.value(json.type, null); + $.inputs = reader.objectArray(json.inputs, $root.paddle.lite.fbs.proto.OpDesc_.Var.decodeText); + $.outputs = reader.objectArray(json.outputs, $root.paddle.lite.fbs.proto.OpDesc_.Var.decodeText); + $.attrs = reader.objectArray(json.attrs, $root.paddle.lite.fbs.proto.OpDesc_.Attr.decodeText); + $.is_target = reader.value(json.is_target, false); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarType = class VarType { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType(); + $.type = reader.int32_(position, 4, 0); + $.selected_rows = reader.table(position, 6, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decode); + $.lod_tensor = reader.table(position, 8, $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc.decode); + $.tensor_array = reader.table(position, 10, $root.paddle.lite.fbs.proto.VarType_.LoDTensorArrayDesc.decode); + $.reader = reader.table(position, 12, $root.paddle.lite.fbs.proto.VarType_.ReaderDesc.decode); + $.tuple = reader.table(position, 14, $root.paddle.lite.fbs.proto.VarType_.Tuple.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType(); + $.type = $root.paddle.lite.fbs.proto.VarType_.Type[json.type]; + $.selected_rows = reader.object(json.selected_rows, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decodeText); + $.lod_tensor = reader.object(json.lod_tensor, $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc.decodeText); + $.tensor_array = reader.object(json.tensor_array, $root.paddle.lite.fbs.proto.VarType_.LoDTensorArrayDesc.decodeText); + $.reader = reader.object(json.reader, $root.paddle.lite.fbs.proto.VarType_.ReaderDesc.decodeText); + $.tuple = reader.object(json.tuple, $root.paddle.lite.fbs.proto.VarType_.Tuple.decodeText); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarDesc = class VarDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarDesc(); + $.name = reader.string_(position, 4, null); + $.type = reader.table(position, 6, $root.paddle.lite.fbs.proto.VarType.decode); + $.persistable = reader.bool_(position, 8, false); + $.need_check_feed = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarDesc(); + $.name = reader.value(json.name, null); + $.type = reader.object(json.type, $root.paddle.lite.fbs.proto.VarType.decodeText); + $.persistable = reader.value(json.persistable, false); + $.need_check_feed = reader.value(json.need_check_feed, false); + return $; + } +}; + +$root.paddle.lite.fbs.proto.BlockDesc = class BlockDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.BlockDesc(); + $.idx = reader.int32_(position, 4, 0); + $.parent_idx = reader.int32_(position, 6, 0); + $.vars = reader.tableArray(position, 8, $root.paddle.lite.fbs.proto.VarDesc.decode); + $.ops = reader.tableArray(position, 10, $root.paddle.lite.fbs.proto.OpDesc.decode); + $.forward_block_idx = reader.int32_(position, 12, -1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.BlockDesc(); + $.idx = reader.value(json.idx, 0); + $.parent_idx = reader.value(json.parent_idx, 0); + $.vars = reader.objectArray(json.vars, $root.paddle.lite.fbs.proto.VarDesc.decodeText); + $.ops = reader.objectArray(json.ops, $root.paddle.lite.fbs.proto.OpDesc.decodeText); + $.forward_block_idx = reader.value(json.forward_block_idx, -1); + return $; + } +}; + +$root.paddle.lite.fbs.proto.OpVersion = class OpVersion { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpVersion(); + $.version = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpVersion(); + $.version = reader.value(json.version, 0); + return $; + } +}; + +$root.paddle.lite.fbs.proto.OpVersionMap = class OpVersionMap { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpVersionMap(); + $.pair = reader.tableArray(position, 4, $root.paddle.lite.fbs.proto.OpVersionMap_.OpVersionPair.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpVersionMap(); + $.pair = reader.objectArray(json.pair, $root.paddle.lite.fbs.proto.OpVersionMap_.OpVersionPair.decodeText); + return $; + } +}; + +$root.paddle.lite.fbs.proto.ProgramDesc = class ProgramDesc { + + static create(reader) { + return $root.paddle.lite.fbs.proto.ProgramDesc.decode(reader, reader.root); + } + + static createText(reader) { + return $root.paddle.lite.fbs.proto.ProgramDesc.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.ProgramDesc(); + $.blocks = reader.tableArray(position, 4, $root.paddle.lite.fbs.proto.BlockDesc.decode); + $.version = reader.table(position, 6, $root.paddle.lite.fbs.proto.Version.decode); + $.op_version_map = reader.table(position, 8, $root.paddle.lite.fbs.proto.OpVersionMap.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.ProgramDesc(); + $.blocks = reader.objectArray(json.blocks, $root.paddle.lite.fbs.proto.BlockDesc.decodeText); + $.version = reader.object(json.version, $root.paddle.lite.fbs.proto.Version.decodeText); + $.op_version_map = reader.object(json.op_version_map, $root.paddle.lite.fbs.proto.OpVersionMap.decodeText); + return $; + } +}; + +$root.paddle.lite.fbs.proto.CombinedParamsDesc = class CombinedParamsDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.CombinedParamsDesc(); + $.params = reader.tableArray(position, 4, $root.paddle.lite.fbs.proto.ParamDesc.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.CombinedParamsDesc(); + $.params = reader.objectArray(json.params, $root.paddle.lite.fbs.proto.ParamDesc.decodeText); + return $; + } +}; + +$root.paddle.lite.fbs.proto.ParamDesc = class ParamDesc { + + static create(reader) { + return $root.paddle.lite.fbs.proto.ParamDesc.decode(reader, reader.root); + } + + static createText(reader) { + return $root.paddle.lite.fbs.proto.ParamDesc.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc(); + $.version = reader.table(position, 4, $root.paddle.lite.fbs.proto.ParamDesc_.VersionDesc.decode); + $.name = reader.string_(position, 6, null); + $.variable = reader.union(position, 8, $root.paddle.lite.fbs.proto.ParamDesc_.VariableDesc.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc(); + $.version = reader.object(json.version, $root.paddle.lite.fbs.proto.ParamDesc_.VersionDesc.decodeText); + $.name = reader.value(json.name, null); + $.variable = $root.paddle.lite.fbs.proto.ParamDesc_.VariableDesc.decodeText(reader, json.variable, json.variable_type); + return $; + } +}; + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.VarType_ = $root.paddle.lite.fbs.proto.VarType_ || {}; + +$root.paddle.lite.fbs.proto.VarType_.Type = { + BOOL: 0, + INT16: 1, + INT32: 2, + INT64: 3, + FP16: 4, + FP32: 5, + FP64: 6, + LOD_TENSOR: 7, + SELECTED_ROWS: 8, + FEED_MINIBATCH: 9, + FETCH_LIST: 10, + STEP_SCOPES: 11, + LOD_RANK_TABLE: 12, + LOD_TENSOR_ARRAY: 13, + PLACE_LIST: 14, + READER: 15, + RAW: 17, + TUPLE: 18, + SIZE_T: 19, + UINT8: 20, + INT8: 21 +}; + +$root.paddle.lite.fbs.proto.VarType_.TensorDesc = class TensorDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.TensorDesc(); + $.data_type = reader.int32_(position, 4, 0); + $.dims = reader.int64s_(position, 6); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.TensorDesc(); + $.data_type = $root.paddle.lite.fbs.proto.VarType_.Type[json.data_type]; + $.dims = reader.array(json.dims); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc = class LoDTensorDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc(); + $.tensor = reader.table(position, 4, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decode); + $.lod_level = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc(); + $.tensor = reader.object(json.tensor, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decodeText); + $.lod_level = reader.value(json.lod_level, 0); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarType_.LoDTensorArrayDesc = class LoDTensorArrayDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.LoDTensorArrayDesc(); + $.tensor = reader.table(position, 4, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decode); + $.lod_level = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.LoDTensorArrayDesc(); + $.tensor = reader.object(json.tensor, $root.paddle.lite.fbs.proto.VarType_.TensorDesc.decodeText); + $.lod_level = reader.value(json.lod_level, 0); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarType_.ReaderDesc = class ReaderDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.ReaderDesc(); + $.lod_tensor = reader.tableArray(position, 4, $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.ReaderDesc(); + $.lod_tensor = reader.objectArray(json.lod_tensor, $root.paddle.lite.fbs.proto.VarType_.LoDTensorDesc.decodeText); + return $; + } +}; + +$root.paddle.lite.fbs.proto.VarType_.Tuple = class Tuple { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.Tuple(); + $.element_type = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.VarType_.Tuple(); + $.element_type = reader.objectArray(json.element_type, $root.paddle.lite.fbs.proto.VarType_.Type.decodeText); + return $; + } +}; + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.CompatibleInfo_ = $root.paddle.lite.fbs.proto.CompatibleInfo_ || {}; + +$root.paddle.lite.fbs.proto.CompatibleInfo_.Type = { + COMPATIBLE: 0, + DEFINITELY_NOT: 1, + POSSIBLE: 2, + BUG_FIX: 3, + PRECISION_CHANGE: 4 +}; + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.OpDesc_ = $root.paddle.lite.fbs.proto.OpDesc_ || {}; + +$root.paddle.lite.fbs.proto.OpDesc_.Attr = class Attr { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc_.Attr(); + $.name = reader.string_(position, 4, null); + $.type = reader.int32_(position, 6, 0); + $.i = reader.int32_(position, 8, 0); + $.f = reader.float32_(position, 10, 0); + $.s = reader.string_(position, 12, null); + $.ints = reader.typedArray(position, 14, Int32Array); + $.floats = reader.typedArray(position, 16, Float32Array); + $.strings = reader.strings_(position, 18); + $.b = reader.bool_(position, 20, false); + $.bools = reader.bools_(position, 22); + $.block_idx = reader.int32_(position, 24, 0); + $.l = reader.int64_(position, 26, 0); + $.blocks_idx = reader.typedArray(position, 28, Int32Array); + $.longs = reader.int64s_(position, 30); + $.float64 = reader.float64_(position, 32, 0); + $.float64s = reader.typedArray(position, 34, Float64Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc_.Attr(); + $.name = reader.value(json.name, null); + $.type = $root.paddle.lite.fbs.proto.AttrType[json.type]; + $.i = reader.value(json.i, 0); + $.f = reader.value(json.f, 0); + $.s = reader.value(json.s, null); + $.ints = reader.typedArray(json.ints, Int32Array); + $.floats = reader.typedArray(json.floats, Float32Array); + $.strings = reader.array(json.strings); + $.b = reader.value(json.b, false); + $.bools = reader.array(json.bools); + $.block_idx = reader.value(json.block_idx, 0); + $.l = reader.value(json.l, 0); + $.blocks_idx = reader.typedArray(json.blocks_idx, Int32Array); + $.longs = reader.array(json.longs); + $.float64 = reader.value(json.float64, 0); + $.float64s = reader.typedArray(json.float64s, Float64Array); + return $; + } +}; + +$root.paddle.lite.fbs.proto.OpDesc_.Var = class Var { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc_.Var(); + $.parameter = reader.string_(position, 4, null); + $.arguments = reader.strings_(position, 6); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpDesc_.Var(); + $.parameter = reader.value(json.parameter, null); + $.arguments = reader.array(json.arguments); + return $; + } +}; + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.OpVersionMap_ = $root.paddle.lite.fbs.proto.OpVersionMap_ || {}; + +$root.paddle.lite.fbs.proto.OpVersionMap_.OpVersionPair = class OpVersionPair { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.OpVersionMap_.OpVersionPair(); + $.op_name = reader.string_(position, 4, null); + $.op_version = reader.table(position, 6, $root.paddle.lite.fbs.proto.OpVersion.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.OpVersionMap_.OpVersionPair(); + $.op_name = reader.value(json.op_name, null); + $.op_version = reader.object(json.op_version, $root.paddle.lite.fbs.proto.OpVersion.decodeText); + return $; + } +}; + +$root.paddle = $root.paddle || {}; + +$root.paddle.lite = $root.paddle.lite || {}; + +$root.paddle.lite.fbs = $root.paddle.lite.fbs || {}; + +$root.paddle.lite.fbs.proto = $root.paddle.lite.fbs.proto || {}; + +$root.paddle.lite.fbs.proto.ParamDesc_ = $root.paddle.lite.fbs.proto.ParamDesc_ || {}; + +$root.paddle.lite.fbs.proto.ParamDesc_.LoDTensorDesc = class LoDTensorDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc_.LoDTensorDesc(); + $.lod_level = reader.int32_(position, 4, 0); + $.lod = reader.int64s_(position, 6); + $.dim = reader.int64s_(position, 8); + $.data_type = reader.int32_(position, 10, 0); + $.data = reader.typedArray(position, 12, Int8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc_.LoDTensorDesc(); + $.lod_level = reader.value(json.lod_level, 0); + $.lod = reader.array(json.lod); + $.dim = reader.array(json.dim); + $.data_type = $root.paddle.lite.fbs.proto.VarType_.Type[json.data_type]; + $.data = reader.typedArray(json.data, Int8Array); + return $; + } +}; + +$root.paddle.lite.fbs.proto.ParamDesc_.VersionDesc = class VersionDesc { + + static decode(reader, position) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc_.VersionDesc(); + $.version = reader.int32_(position, 4, 0); + $.model_version = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.paddle.lite.fbs.proto.ParamDesc_.VersionDesc(); + $.version = reader.value(json.version, 0); + $.model_version = reader.value(json.model_version, 0); + return $; + } +}; + +$root.paddle.lite.fbs.proto.ParamDesc_.VariableDesc = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.paddle.lite.fbs.proto.ParamDesc_.LoDTensorDesc.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'LoDTensorDesc': return $root.paddle.lite.fbs.proto.ParamDesc_.LoDTensorDesc.decodeText(reader, json); + default: return undefined; + } + } +}; diff --git a/paddle.js b/paddle.js new file mode 100644 index 00000000000..18688f6a87a --- /dev/null +++ b/paddle.js @@ -0,0 +1,852 @@ + +import * as base from './base.js'; +import * as flatbuffers from './flatbuffers.js'; +import * as protobuf from './protobuf.js'; +import * as python from './python.js'; + +const paddle = {}; + +paddle.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (identifier === '__model__' || extension === '__model__' || extension === 'paddle' || extension === 'pdmodel') { + const tags = context.tags('pb'); + if (tags.get(1) === 2) { + return 'paddle.pb'; + } + } + if (extension === 'pbtxt' || extension === 'txt') { + const tags = context.tags('pbtxt'); + if (tags.has('blocks')) { + return 'paddle.pbtxt'; + } + } + const stream = context.stream; + if (stream && stream.length > 16 && stream.peek(16).every((value) => value === 0x00)) { + return 'paddle.params'; + } + if (paddle.Pickle.open(context)) { + return 'paddle.pickle'; + } + if (paddle.Entries.open(context)) { + return 'paddle.entries'; + } + if (paddle.NaiveBuffer.open(context)) { + return 'paddle.naive'; + } + return undefined; + } + + async open(context, target) { + const metadata = await context.metadata('paddle-metadata.json'); + switch (target) { + case 'paddle.naive': { + await context.require('./paddle-schema'); + paddle.schema = flatbuffers.get('paddlelite').paddle.lite.fbs.proto; + const file = paddle.NaiveBuffer.open(context); + return new paddle.Model(metadata, file.format, file.model, file.weights); + } + default: { + await context.require('./paddle-proto'); + paddle.proto = protobuf.get('paddle').paddle.framework.proto; + const identifier = context.identifier; + const parts = identifier.split('.'); + const extension = parts.pop().toLowerCase(); + const base = parts.join('.'); + const openProgram = (stream, target) => { + const program = {}; + switch (target) { + case 'paddle.pbtxt': { + try { + const reader = protobuf.TextReader.open(stream); + program.desc = paddle.proto.ProgramDesc.decodeText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new paddle.Error(`File text format is not paddle.ProgramDesc (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'paddle.pb': { + try { + const reader = protobuf.BinaryReader.open(stream); + program.desc = paddle.proto.ProgramDesc.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new paddle.Error(`File format is not paddle.ProgramDesc (${message.replace(/\.$/, '')}).`); + } + break; + } + default: { + throw new paddle.Error(`Unsupported Paddle format '${target}'.`); + } + } + const formatVersion = (version) => { + if (version && version.version && version.version.toNumber) { + const number = version.version.toNumber(); + if (number > 0) { + const list = [ Math.floor(number / 1000000) % 1000, Math.floor(number / 1000) % 1000, number % 1000 ]; + if (list.slice(-1).pop() === 0) { + list.pop(); + if (list.slice(-1).pop() === 0) { + list.pop(); + } + } + return ` v${list.map((item) => item.toString()).join('.')}`; + } + } + return ''; + }; + program.format = `PaddlePaddle${formatVersion(program.desc.version)}`; + const variables = new Set(); + for (const block of program.desc.blocks) { + const blockVars = new Set(); + for (const variable of block.vars) { + if (variable.persistable && variable.type && + variable.type.type != paddle.DataType.FETCH_LIST && + variable.type.type != paddle.DataType.FEED_MINIBATCH) { + blockVars.add(variable.name); + } + } + for (const op of block.ops) { + for (const input of op.inputs) { + for (const argument of input.arguments) { + if (blockVars.has(argument)) { + variables.add(argument); + } + } + } + } + } + program.vars = Array.from(variables).sort(); + return program; + }; + const loadParams = (stream) => { + const params = []; + while (stream.position < stream.length) { + const tensor = paddle.Utility.openTensorDesc(stream); + params.push(tensor); + } + return params; + }; + const mapParams = (params, program) => { + const weights = new Map(); + const vars = program.vars.slice(); + for (const param of params) { + weights.set(vars.shift(), param); + } + return weights; + }; + switch (target) { + case 'paddle.pickle': { + const container = paddle.Pickle.open(context); + return new paddle.Model(metadata, container.format, null, container.weights); + } + case 'paddle.entries': { + const container = paddle.Entries.open(context); + return new paddle.Model(metadata, container.format, null, container.weights); + } + case 'paddle.params': { + const file = identifier !== 'params' ? `${base}.pdmodel` : 'model'; + const params = loadParams(context.stream); + try { + const content = await context.fetch(file); + const program = openProgram(content.stream, 'paddle.pb'); + const weights = mapParams(params, program); + return new paddle.Model(metadata, program.format, program.desc, weights); + } catch (error) { + const weights = new Map(params.map((param, index) => [ index.toString(), param ])); + return new paddle.Model(metadata, 'PaddlePaddle Inference Weights', null, weights); + } + } + case 'paddle.pb': + case 'paddle.pbtxt': { + const loadEntries = async (context, program) => { + const promises = program.vars.map((name) => context.fetch(name).then((context) => context.stream).catch(() => null)); + const streams = await Promise.all(promises); + const params = streams.map((stream) => stream ? paddle.Utility.openTensorDesc(stream) : null); + const weights = mapParams(params, program); + return new paddle.Model(metadata, program.format, program.desc, weights); + }; + const openNumPyArrayPickle = (stream) => { + const execution = new python.Execution(); + const unpickler = execution.invoke('pickle.Unpickler', [ stream ]); + const obj = unpickler.load(); + const container = new paddle.Pickle(obj); + return container.weights || new Map(); + }; + const program = openProgram(context.stream, target); + if (extension === 'pdmodel') { + try { + const name = `${base}.pdiparams`; + const content = await context.fetch(name); + const params = loadParams(content.stream); + const weights = mapParams(params, program); + return new paddle.Model(metadata, program.format, program.desc, weights); + } catch (error) { + try { + const name = `${base}.pdparams`; + const content = await context.fetch(name); + const weights = openNumPyArrayPickle(content.stream); + try { + const name = `${base}.pdopt`; + const content = await context.fetch(name); + for (const [name, value] of openNumPyArrayPickle(content.stream)) { + if (!weights.has(name)) { + weights.set(name, value); + } + } + return new paddle.Model(metadata, program.format, program.desc, weights); + } catch (error) { + return new paddle.Model(metadata, program.format, program.desc, weights); + } + } catch (error) { + try { + const name = `${base}.pdopt`; + const content = await context.fetch(name); + const weights = openNumPyArrayPickle(content.stream); + return new paddle.Model(metadata, program.format, program.desc, weights); + } catch (error) { + return loadEntries(context, program); + } + } + } + } + if (identifier === 'model') { + try { + const content = await context.fetch('params'); + const params = loadParams(content.stream); + const weights = mapParams(params, program); + return new paddle.Model(metadata, program.format, program.desc, weights); + } catch (error) { + return loadEntries(context, program); + } + } + return loadEntries(context, program); + } + default: { + throw new paddle.Error(`Unsupported PaddlePaddle format '${target}'.`); + } + } + } + } + } +}; + +paddle.Model = class { + + constructor(metadata, format, desc, tensors) { + desc = desc && Array.isArray(desc.blocks) ? desc : { blocks: [ null ] }; + this.format = format; + this.graphs = desc.blocks.map((block) => new paddle.Graph(metadata, block, tensors)); + } +}; + +paddle.Graph = class { + + constructor(metadata, block, tensors) { + this.nodes = []; + this.inputs = []; + this.outputs = []; + if (block) { + this.name = block.idx.toString(); + const values = new Map(); + for (const variable of block.vars) { + const type = variable.type && variable.type.type && variable.type.lod_tensor && variable.type.lod_tensor.tensor ? paddle.Utility.createTensorType(variable.type.lod_tensor.tensor.data_type, variable.type.lod_tensor.tensor.dims) : null; + const tensor = variable.persistable && variable.type && variable.type.type != paddle.DataType.FETCH_LIST && variable.type.type != paddle.DataType.FEED_MINIBATCH ? (tensors.get(variable.name) || new paddle.Tensor(type)) : null; + values.set(variable.name, new paddle.Value(variable.name, type, tensor)); + } + const scope = {}; + for (let i = 0; i < block.ops.length; i++) { + for (const input of block.ops[i].inputs) { + input.arguments = input.arguments.map((argument) => scope[argument] ? scope[argument] : argument); + } + for (const output of block.ops[i].outputs) { + output.arguments = output.arguments.map((argument) => { + if (scope[argument]) { + const next = `${argument}\n${i}`; // custom argument id + scope[argument] = next; + return next; + } + scope[argument] = argument; + return argument; + }); + } + } + for (const op of block.ops) { + for (const input of op.inputs) { + for (const name of input.arguments) { + if (!values.has(name)) { + values.set(name, new paddle.Value(name, null, null)); + } + } + } + for (const output of op.outputs) { + for (const name of output.arguments) { + if (!values.has(name)) { + values.set(name, new paddle.Value(name, null, null)); + } + } + } + } + let lastNode = null; + let lastOutput = null; + for (const op of block.ops) { + if (op.type == 'feed') { + const name = op.attrs.filter((attr) => attr.name == 'col')[0].i.toString(); + const argument = new paddle.Argument(name, op.outputs[0].arguments.map((id) => values.get(id))); + this.inputs.push(argument); + } else if (op.type == 'fetch') { + const name = op.attrs.filter((attr) => attr.name == 'col')[0].i.toString(); + const argument = new paddle.Argument(name, op.inputs[0].arguments.map((id) => values.get(id))); + this.outputs.push(argument); + } else { + const node = new paddle.Node(metadata, op, values); + if (op.inputs.length == 1 && op.inputs[0].arguments.length == 1 && + op.outputs.length >= 1 && op.outputs[0].arguments.length == 1 && + op.inputs[0].arguments[0].split('\n').shift() == op.outputs[0].arguments[0].split('\n').shift() && + lastNode && + lastOutput == op.inputs[0].arguments[0].split('\n').shift()) { + lastNode.chain.push(node); + } else { + this.nodes.push(node); + lastNode = null; + lastOutput = null; + if (op.outputs.length == 1 && op.outputs[0].arguments.length == 1) { + lastNode = node; + lastOutput = op.outputs[0].arguments[0].split('\n').shift(); + } + } + } + } + } else { + const values = new Map(); + const ops = new Map(); + for (const [name, tensor] of tensors) { + values.set(name, new paddle.Value(name, tensor.type, tensor)); + const separator = name.indexOf('.') !== -1 ? '.' : '_'; + const regex = /(.*)_((w_attr|scale|weights|offset|b|w|b_attr)_(moment|beta|velocity|mean_square|mean_grad).*)/; + const parts = separator === '.' ? name.split(separator) : (regex.test(name) ? regex.exec(name).slice(1, 3) : [ '', name ]); + const parameter_name = parts.pop(); + const op_name = parts.join(separator); + if (!ops.has(op_name)) { + ops.set(op_name, { name: op_name, type: 'Weights', inputs: [] }); + } + const op = ops.get(op_name); + op.inputs.push({ parameter: parameter_name, arguments: [ name ] }); + } + for (const op of Array.from(ops.values())) { + this.nodes.push(new paddle.Node(metadata, op, values)); + } + } + } +}; + +paddle.Argument = class { + + constructor(name, value, type, visible) { + this.name = name; + this.value = value; + if (type) { + this.type = type; + } + if (visible === false) { + this.visible = visible; + } + } +}; + +paddle.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new paddle.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type ? type : initializer && initializer.type ? initializer.type : null; + this.initializer = initializer || null; + } +}; + +paddle.Node = class { + + constructor(metadata, op, values) { + const type = op.type; + this.type = metadata.type(type) || { name: type }; + this.name = op.name || ''; + this.attributes = []; + this.inputs = []; + this.outputs = []; + this.chain = []; + if (op.attrs) { + this.attributes = op.attrs.map((attr) => { + const name = attr.name; + const meta = metadata.attribute(this.type.name, name); + let value = '?'; + let visible = true; + let type = null; + switch (attr.type) { + case paddle.AttributeType.STRING: + type = 'string'; + value = attr.s; + break; + case paddle.AttributeType.STRINGS: + type = 'string[]'; + value = Array.from(attr.strings); + break; + case paddle.AttributeType.BOOLEAN: + type = 'boolean'; + value = attr.b; + break; + case paddle.AttributeType.BOOLEANS: + type = 'boolean[]'; + value = attr.bools ? Array.from(attr.bools) : attr.bools; + break; + case paddle.AttributeType.FLOAT: + type = 'float32'; + value = attr.f; + break; + case paddle.AttributeType.FLOATS: + type = 'float32[]'; + value = attr.floats ? Array.from(attr.floats) : attr.floats; + break; + case paddle.AttributeType.FLOAT64: + type = 'float64'; + value = attr.float64; + break; + case paddle.AttributeType.FLOAT64S: + type = 'float64[]'; + value = attr.float64s ? Array.from(attr.float64s) : attr.float64s; + break; + case paddle.AttributeType.INT: + type = 'int32'; + value = attr.i; + break; + case paddle.AttributeType.INTS: + type = 'int32[]'; + value = attr.ints ? Array.from(attr.ints) : attr.ints; + break; + case paddle.AttributeType.LONG: + type = 'int64'; + break; + case paddle.AttributeType.LONGS: + type = 'int64[]'; + break; + default: + break; + } + switch (name) { + case 'use_mkldnn': + case 'use_cudnn': + case 'op_callstack': + case 'op_role': + case 'op_role_var': + case 'op_namescope': + case 'is_test': + visible = false; + break; + default: + break; + } + if (meta) { + if (meta.default !== undefined) { + const defaultValue = meta.default; + if (defaultValue == value) { + visible = false; + } else if (Array.isArray(value) && Array.isArray(defaultValue) && value.length == defaultValue.length) { + if (value.every((item, index) => item == defaultValue[index])) { + visible = false; + } + } + } + } + return new paddle.Argument(name, value, type, visible); + }); + } + if (op.inputs) { + for (const input of op.inputs) { + if (input.arguments.length > 0) { + this.inputs.push(new paddle.Argument(input.parameter, input.arguments.map((name) => values.get(name)))); + } + } + } + if (op.outputs) { + for (const output of op.outputs) { + if (output.arguments.length > 0) { + this.outputs.push(new paddle.Argument(output.parameter, output.arguments.map((name) => values.get(name)))); + } + } + } + const updates = [ + [ this.inputs, 'X' ], + [ this.inputs, 'Input' ], + [ this.outputs, 'Y' ], + [ this.outputs, 'Out' ] + ]; + for (const [list, name] of updates) { + let item = null; + for (let i = 0; i < list.length; i++) { + if (list[i].name == name) { + item = list[i]; + list.splice(i, 1); + break; + } + } + if (item) { + list.splice(0, 0, item); + } + } + } +}; + +paddle.Tensor = class { + + constructor(type, data, category) { + this.type = type; + this.values = data; + this.category = category || ''; + } +}; + +paddle.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +paddle.TensorShape = class { + + constructor(dimensions) { + dimensions = dimensions.map((dimension) => Number.isInteger(dimension) ? dimension : dimension.toNumber()); + this.dimensions = dimensions.map((dimension) => { + return dimension != -1 ? dimension : '?'; + }); + } + + toString() { + return (this.dimensions && this.dimensions.length) ? (`[${this.dimensions.join(',')}]`) : ''; + } +}; + +paddle.Entries = class { + + static open(context) { + let entries = context.peek('zip'); + entries = entries instanceof Map ? entries : context.peek('tar'); + if (entries instanceof Map) { + entries = Array.from(entries); + entries = new Map(entries.filter(([name]) => !name.endsWith('/') && !name.split('/').pop().startsWith('.')).slice()); + if (entries.size > 2 && Array.from(entries).every(([name, value]) => name.split('_').length > 0 && value.peek(16).every((value) => value === 0x00))) { + return new paddle.Entries(entries); + } + } + return null; + } + + constructor(data) { + this._data = data; + } + + get format() { + return 'PaddlePaddle Weights'; + } + + get weights() { + this._read(); + return this._weights; + } + + _read() { + if (!this._weights) { + let rootFolder = null; + for (const [name] of this._data) { + if (!name.startsWith('.') || name.startsWith('./')) { + const parts = name.split('/'); + const folder = ((parts.length > 2 && parts[0] === '.') ? (`./${parts[1]}/`) : (parts.length > 1 ? `${parts[0]}/` : '')); + rootFolder = (rootFolder === null) ? folder : (rootFolder !== '' && folder !== rootFolder) ? '' : folder; + } + } + this._weights = new Map(); + for (const [name, stream] of this._data) { + if (name.startsWith(rootFolder)) { + const key = name.substring(rootFolder.length); + const tensor = paddle.Utility.openTensorDesc(stream); + this._weights.set(key, tensor); + } + } + } + } +}; + +paddle.Pickle = class { + + static open(context) { + const obj = context.peek('pkl'); + const container = new paddle.Pickle(obj); + return container.weights !== null ? container : null; + } + + constructor(obj) { + this._weights = null; + if (obj && !Array.isArray(obj) && (obj instanceof Map || Object(obj) === obj)) { + const entries = (obj) => { + return obj instanceof Map ? Array.from(obj) : Object(obj) === obj ? Object.entries(obj) : []; + }; + const filter = (obj) => { + const list = []; + if (obj && !Array.isArray(obj)) { + for (const [name, value] of entries(obj)) { + if (name !== 'StructuredToParameterName@@') { + const obj = value && Array.isArray(value) && value.length === 2 && value[0] === name ? value[1] : value; + if (obj && !Array.isArray(obj) && obj.__class__ && obj.__class__.__module__ === 'numpy' && obj.__class__.__name__ === 'ndarray') { + list.push([ name, obj ]); + } + } + } + } + return list; + }; + const weights = filter(obj); + if (weights.length > 0) { + this._weights = weights; + } else { + const list = entries(obj); + if (list.filter(([name]) => name !== 'StructuredToParameterName@@').length === 1) { + const weights = filter(list[0][1]); + if (weights.length > 0) { + this._weights = weights; + } + } + if (this._weights === null && list.filter(([name]) => name === 'StructuredToParameterName@@').length > 0) { + this._weights = []; + } + } + } + } + + get format() { + return 'PaddlePaddle Pickle'; + } + + get weights() { + if (this._weights && Array.isArray(this._weights)) { + const weights = new Map(); + for (const [name, value] of this._weights) { + const type = new paddle.TensorType(value.dtype.__name__, new paddle.TensorShape(value.shape)); + const data = value.data; + const tensor = new paddle.Tensor(type, data, 'NumPy Array'); + weights.set(name, tensor); + } + this._weights = weights; + } + return this._weights; + } +}; + +paddle.NaiveBuffer = class { + + static open(context) { + const stream = context.stream; + if (stream && stream.length > 4) { + const buffer = stream.peek(4); + if (context.identifier === '__model__.nb' || context.identifier === 'param.nb') { + if (buffer[0] > 2 || buffer[1] !== 0x00 || buffer[2] !== 0x76 || buffer[2] !== 0x32) { + return new paddle.NaiveBuffer(stream, -1); + } + } + if (buffer[1] === 0x00 && buffer[0] <= 2) { + return new paddle.NaiveBuffer(stream, buffer[0]); + } + } + return null; + } + + constructor(stream, meta_version) { + this.stream = stream; + this.meta_version = meta_version; + } + + get format() { + this._read(); + return this._format; + } + + get model() { + this._read(); + return this._model; + } + + get weights() { + this._read(); + return this._weights; + } + + _read() { + if (this.stream) { + const reader = new base.BinaryReader(this.stream); + if (this.meta_version >= 2) { + reader.skip(2); + } + delete this.stream; + const decoder = new TextDecoder(); + const opt_version = reader.read(16); + const version = decoder.decode(opt_version.slice(0, opt_version.indexOf(0x00))); + this._format = `Paddle Lite${version && version.match(/^v\d+\.\d+.\d+$/) ? ` ${version}` : ''}`; + const topo_size = reader.uint64(); + const openProgramDesc = (buffer) => { + const reader = flatbuffers.BinaryReader.open(buffer); + return paddle.schema.ProgramDesc.create(reader); + }; + const openParamDesc = (buffer) => { + const reader = flatbuffers.BinaryReader.open(buffer); + return paddle.schema.ParamDesc.create(reader); + }; + switch (this.meta_version) { + case -1: { + throw new paddle.Error('Paddle Lite naive buffer format is deprecated.'); + } + case 0: + case 1: { + throw new paddle.Error(`Paddle Lite meta format '${this.meta_version}' is deprecated.`); + } + case 2: { + const topo_data = new Uint8Array(topo_size); + topo_data.set(reader.read(topo_size), 0); + this._model = openProgramDesc(topo_data); + reader.uint16(); // version + reader.uint16(); // meta_size + const header_size = reader.uint16(); + const params_size = reader.uint16(); + reader.uint32(); // max_tensor_size + reader.skip(header_size - 6); + this._weights = new Map(); + for (let i = 0; i < params_size; i++) { + const total_size = reader.uint32(); + const offset = reader.uint32(); + const param_bytes = total_size - offset; + const param_data = reader.read(param_bytes); + const desc = openParamDesc(param_data); + const data = desc.variable.data; + const data_type = desc.variable.data_type; + const dim = desc.variable.dim; + const type = paddle.Utility.createTensorType(data_type, dim); + const tensor = new paddle.Tensor(type, data); + this._weights.set(desc.name, tensor); + } + break; + } + default: { + throw new paddle.Error(`Unsupported Paddle Lite naive buffer meta format '${this.meta_version}'.`); + } + } + } + } +}; + + +paddle.Utility = class { + + static createTensorType(data_type, shape) { + if (!paddle.Utility._dataTypes) { + const length = Math.max.apply(null, Object.entries(paddle.DataType).map(([, value]) => value)); + paddle.Utility._dataTypes = new Array(length); + const map = new Map([ [ 'bool', 'boolean' ], [ 'bf16', 'bfloat16' ], [ 'fp16', 'float16' ], [ 'fp32', 'float32' ], [ 'fp64', 'float64' ] ]); + for (const [name, index] of Object.entries(paddle.DataType)) { + const key = name.toLowerCase(); + paddle.Utility._dataTypes[index] = map.has(key) ? map.get(key) : key; + } + } + const dataType = data_type < paddle.Utility._dataTypes.length ? paddle.Utility._dataTypes[data_type] : '?'; + return new paddle.TensorType(dataType, new paddle.TensorShape(shape)); + } + + static openTensorDesc(stream) { + const signature = stream.read(16); + if (!signature.every((value) => value === 0x00)) { + throw new paddle.Error('Invalid paddle.TensorDesc signature.'); + } + const length = new base.BinaryReader(stream.read(4)).uint32(); + const buffer = stream.read(length); + const reader = protobuf.BinaryReader.open(buffer); + const tensorDesc = paddle.proto.VarType.TensorDesc.decode(reader); + const size = tensorDesc.dims.reduce((a, b) => a * b.toNumber(), 1); + let itemsize = 0; + switch (tensorDesc.data_type) { + case paddle.DataType.FP16: itemsize = 2; break; + case paddle.DataType.FP32: itemsize = 4; break; + case paddle.DataType.FP64: itemsize = 8; break; + case paddle.DataType.INT8: itemsize = 1; break; + case paddle.DataType.INT16: itemsize = 2; break; + case paddle.DataType.INT32: itemsize = 4; break; + case paddle.DataType.INT64: itemsize = 8; break; + case paddle.DataType.UINT8: itemsize = 1; break; + default: throw new paddle.Error(`Invalid inference params data type '${tensorDesc.data_type}'.`); + } + const type = paddle.Utility.createTensorType(tensorDesc.data_type, tensorDesc.dims); + const data = stream.read(itemsize * size); + return new paddle.Tensor(type, data); + } +}; + +paddle.DataType = { + BOOL: 0, + INT16: 1, + INT32: 2, + INT64: 3, + FP16: 4, + FP32: 5, + FP64: 6, + LOD_TENSOR: 7, + SELECTED_ROWS: 8, + FEED_MINIBATCH: 9, + FETCH_LIST: 10, + STEP_SCOPES: 11, + LOD_RANK_TABLE: 12, + LOD_TENSOR_ARRAY: 13, + PLACE_LIST: 14, + READER: 15, + RAW: 17, + TUPLE: 18, + SIZE_T: 19, + UINT8: 20, + INT8: 21, + BF16: 22, + COMPLEX64: 23, + COMPLEX128: 24, +}; + +paddle.AttributeType = { + INT: 0, + FLOAT: 1, + STRING: 2, + INTS: 3, + FLOATS: 4, + STRINGS: 5, + BOOLEAN: 6, + BOOLEANS: 7, + BLOCK: 8, + LONG: 9, + BLOCKS: 10, + LONGS: 11, + FLOAT64S: 12, + VAR: 13, + VARS: 14, + FLOAT64: 15 +}; + +paddle.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading PaddlePaddle model.'; + } +}; + +export const ModelFactory = paddle.ModelFactory; diff --git a/pickle.js b/pickle.js new file mode 100644 index 00000000000..b4f00c39667 --- /dev/null +++ b/pickle.js @@ -0,0 +1,214 @@ + +// Experimental + +const pickle = {}; + +pickle.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0x80, undefined, 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => signature[index] === undefined || signature[index] === value)) { + // Reject PyTorch models with .pkl file extension. + return null; + } + const obj = context.peek('pkl'); + if (obj !== undefined) { + const name = obj && obj.__class__ && obj.__class__.__module__ && obj.__class__.__name__ ? `${obj.__class__.__module__}.${obj.__class__.__name__}` : ''; + if (!name.startsWith('__torch__.')) { + return obj; + } + } + return null; + } + + async open(context, target) { + let format = 'Pickle'; + const obj = target; + if (obj === null || obj === undefined) { + context.exception(new pickle.Error("Unsupported Pickle null object.")); + } else if (obj instanceof Error) { + throw obj; + } else if (Array.isArray(obj)) { + if (obj.length > 0 && obj[0] && obj.every((item) => item && item.__class__ && obj[0].__class__ && item.__class__.__module__ === obj[0].__class__.__module__ && item.__class__.__name__ === obj[0].__class__.__name__)) { + const type = `${obj[0].__class__.__module__}.${obj[0].__class__.__name__}`; + context.exception(new pickle.Error(`Unsupported Pickle '${type}' array object.`)); + } else if (obj.length > 0) { + context.exception(new pickle.Error("Unsupported Pickle array object.")); + } + } else if (obj && obj.__class__) { + const formats = new Map([ + [ 'cuml.ensemble.randomforestclassifier.RandomForestClassifier', 'cuML' ] + ]); + const type = `${obj.__class__.__module__}.${obj.__class__.__name__}`; + if (formats.has(type)) { + format = formats.get(type); + } else { + context.exception(new pickle.Error(`Unsupported Pickle type '${type}'.`)); + } + } else { + context.exception(new pickle.Error('Unsupported Pickle object.')); + } + return new pickle.Model(obj, format); + } +}; + +pickle.Model = class { + + constructor(value, format) { + this.format = format; + this.graphs = [ new pickle.Graph(value) ]; + } +}; + +pickle.Graph = class { + + constructor(obj) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + if (Array.isArray(obj) && (obj.every((item) => item.__class__) || (obj.every((item) => Array.isArray(item))))) { + for (const item of obj) { + this.nodes.push(new pickle.Node(item)); + } + } else if (obj && obj instanceof Map && !Array.from(obj.values()).some((value) => typeof value === 'string' || typeof value === 'number')) { + for (const [name, value] of obj) { + const node = new pickle.Node(value, name); + this.nodes.push(node); + } + } else if (obj && obj.__class__) { + this.nodes.push(new pickle.Node(obj)); + } else if (obj && Object(obj) === obj) { + this.nodes.push(new pickle.Node(obj)); + } + } +}; + +pickle.Node = class { + + constructor(obj, name, stack) { + const type = obj.__class__ ? `${obj.__class__.__module__}.${obj.__class__.__name__}` : 'builtins.object'; + this.type = { name: type }; + this.name = name || ''; + this.inputs = []; + this.outputs = []; + this.attributes = []; + const isArray = (obj) => { + return obj && obj.__class__ && obj.__class__.__module__ === 'numpy' && obj.__class__.__name__ === 'ndarray'; + }; + const isObject = (obj) => { + if (obj && typeof obj === 'object') { + const proto = Object.getPrototypeOf(obj); + return proto === Object.prototype || proto === null; + } + return false; + }; + const entries = obj instanceof Map ? Array.from(obj) : Object.entries(obj); + for (const [name, value] of entries) { + if (name === '__class__') { + continue; + } else if (value && isArray(value)) { + const tensor = new pickle.Tensor(value); + const attribute = new pickle.Argument(name, tensor, 'tensor'); + this.attributes.push(attribute); + } else if (Array.isArray(value) && value.length > 0 && value.every((obj) => isArray(obj))) { + const tensors = value.map((obj) => new pickle.Tensor(obj)); + const attribute = new pickle.Argument(name, tensors, 'tensor[]'); + this.attributes.push(attribute); + } else if (value && value.__class__ && value.__class__.__module__ === 'builtins' && (value.__class__.__name__ === 'function' || value.__class__.__name__ === 'type')) { + const obj = {}; + obj.__class__ = value; + const node = new pickle.Node(obj, '', stack); + const attribute = new pickle.Argument(name, node, 'object'); + this.attributes.push(attribute); + } else { + stack = stack || new Set(); + if (value && Array.isArray(value) && value.every((obj) => typeof obj === 'string')) { + const attribute = new pickle.Argument(name, value, 'string[]'); + this.attributes.push(attribute); + } else if (value && Array.isArray(value) && value.every((obj) => typeof obj === 'number')) { + const attribute = new pickle.Argument(name, value); + this.attributes.push(attribute); + } else if (value && Array.isArray(value) && value.length > 0 && value.every((obj) => obj && (obj.__class__ || obj === Object(obj)))) { + const values = value.filter((value) => !stack.has(value)); + const nodes = values.map((value) => { + stack.add(value); + const node = new pickle.Node(value, '', stack); + stack.delete(value); + return node; + }); + const attribute = new pickle.Argument(name, nodes, 'object[]'); + this.attributes.push(attribute); + } else if (value && (value.__class__ || isObject(value))) { + if (!stack.has(value)) { + stack.add(value); + const node = new pickle.Node(value, '', stack); + const attribute = new pickle.Argument(name, node, 'object'); + this.attributes.push(attribute); + stack.delete(value); + } + } else { + const attribute = new pickle.Argument(name, value); + this.attributes.push(attribute); + } + } + } + } +}; + +pickle.Argument = class { + + constructor(name, value, type, visible) { + this.name = name.toString(); + this.value = value; + if (type) { + this.type = type; + } + if (visible === false) { + this.visible = visible; + } + } +}; + +pickle.Tensor = class { + + constructor(array) { + this.type = new pickle.TensorType(array.dtype.__name__, new pickle.TensorShape(array.shape)); + this.stride = array.strides.map((stride) => stride / array.itemsize); + this.layout = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.tolist() : array.tobytes(); + } +}; + +pickle.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +pickle.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +pickle.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Pickle model.'; + } +}; + +export const ModelFactory = pickle.ModelFactory; diff --git a/protobuf.js b/protobuf.js new file mode 100644 index 00000000000..7ed66668268 --- /dev/null +++ b/protobuf.js @@ -0,0 +1,1332 @@ + +import * as base from './base.js'; +import * as text from './text.js'; + +const protobuf = {}; + +protobuf.get = (name) => { + protobuf._roots = protobuf._roots || new Map(); + const roots = protobuf._roots; + if (!roots.has(name)) { + roots.set(name, {}); + } + return roots.get(name); +}; + +protobuf.BinaryReader = class { + + static open(data) { + return data ? new protobuf.BinaryReader(data) : null; + } + + constructor(data) { + const buffer = data instanceof Uint8Array ? data : data.peek(); + this._buffer = buffer; + this._length = buffer.length; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._utf8Decoder = new TextDecoder('utf-8'); + } + + signature() { + const tags = new Map(); + this._position = 0; + try { + if (this._length > 0) { + const type = this._buffer[0] & 7; + if (type !== 4 && type !== 6 && type !== 7) { + const length = this.length; + while (this._position < length) { + const tag = this.uint32(); + const field = tag >>> 3; + const type = tag & 7; + if (type > 5 || field === 0) { + tags.clear(); + break; + } + tags.set(field, type); + if (!this._skipType(type)) { + tags.clear(); + break; + } + } + } + } + } catch (err) { + tags.clear(); + } + this._position = 0; + return tags; + } + + decode() { + let tags = {}; + this._position = 0; + try { + const decodeMessage = (max) => { + const length = this._uint32(); + if (length === undefined) { + return undefined; + } + if (length === 0) { + // return 2; + } + const end = this.position + length; + if (end > max) { + return undefined; + } + try { + const tags = {}; + while (this.position < end) { + const tag = this._uint32(); + if (tag === undefined) { + this.seek(end); + return 2; + } + const field = tag >>> 3; + const type = tag & 7; + if (type > 5 || field === 0) { + this.seek(end); + return 2; + } + if (type === 2) { + const type = tags[field]; + if (type !== 2) { + const inner = decodeMessage(end); + if (this.position > end) { + this.seek(end); + return 2; + } + if (inner === undefined) { + this.seek(end); + return 2; + } + if (inner === 2) { + tags[field] = inner; + } else if (!type) { + tags[field] = inner; + } else { + for (const [key, value] of Object.entries(inner)) { + if (type[key] === 2 && value !== 2) { + continue; + } + type[key] = value; + } + } + continue; + } + } + tags[field] = type; + if (!this._skipType(type)) { + this.seek(end); + return 2; + } + } + if (this.position === end) { + return tags; + } + } catch (err) { + // continue regardless of error + } + this.seek(end); + return 2; + }; + if (this._length > 0) { + const type = this._buffer[0] & 7; + if (type !== 4 && type !== 6 && type !== 7) { + const length = this.length; + while (this.position < length) { + const tag = this.uint32(); + const field = tag >>> 3; + const type = tag & 7; + if (type > 5 || field === 0) { + tags = {}; + break; + } + if (type === 2) { + const type = tags[field]; + if (type !== 2) { + const inner = decodeMessage(length); + if (inner === undefined) { + tags = {}; + break; + } + if (inner === 2) { + tags[field] = inner; + } else if (!type) { + tags[field] = inner; + } else { + for (const [name, value] of Object.entries(inner)) { + if (type[name] === 2 && value !== 2) { + continue; + } + type[name] = value; + } + } + continue; + } + } + tags[field] = type; + if (!this._skipType(type)) { + tags = {}; + break; + } + } + } + } + } catch (err) { + tags = {}; + } + this._position = 0; + return tags; + } + + get length() { + return this._length; + } + + get position() { + return this._position; + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + } + + string() { + return this._utf8Decoder.decode(this.bytes()); + } + + bool() { + return this.uint32() !== 0; + } + + byte() { + if (this._position < this._length) { + return this._buffer[this._position++]; + } + throw new RangeError('Unexpected end of file.'); + } + + bytes() { + const length = this.uint32(); + const position = this._position; + this.skip(length); + return this._buffer.slice(position, this._position); + } + + uint32() { + let c; + c = this.byte(); + let value = (c & 127) >>> 0; + if (c < 128) { + return value; + } + c = this.byte(); + value = (value | (c & 127) << 7) >>> 0; + if (c < 128) { + return value; + } + c = this.byte(); + value = (value | (c & 127) << 14) >>> 0; + if (c < 128) { + return value; + } + c = this.byte(); + value = (value | (c & 127) << 21) >>> 0; + if (c < 128) { + return value; + } + c = this.byte(); + value = (value | (c & 15) << 28) >>> 0; + if (c < 128) { + return value; + } + if (this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 1) { + throw new protobuf.Error('Varint is not 32-bit.'); + } + return value; + } + + int32() { + return this.uint32() | 0; + } + + sint32() { + const value = this.uint32(); + return value >>> 1 ^ -(value & 1) | 0; + } + + int64() { + return this._varint().toInt64(); + } + + uint64() { + return this._varint().toInt64(); + } + + sint64() { + return this._varint().zzDecode().toInt64(); + } + + fixed64() { + const position = this._position; + this.skip(8); + return this._view.getUint64(position, true); + } + + sfixed64() { + const position = this._position; + this.skip(8); + return this._view.getInt64(position, true); + } + + fixed32() { + const position = this._position; + this.skip(4); + return this._view.getUint32(position, true); + } + + sfixed32() { + const position = this._position; + this.skip(4); + return this._view.getInt32(position, true); + } + + float() { + const position = this._position; + this.skip(4); + return this._view.getFloat32(position, true); + } + + double() { + const position = this._position; + this.skip(8); + return this._view.getFloat64(position, true); + } + + array(obj, item, tag) { + if ((tag & 7) === 2) { + const end = this.uint32() + this._position; + while (this._position < end) { + obj.push(item()); + } + } else { + obj.push(item()); + } + return obj; + } + + floats(obj, tag) { + if ((tag & 7) === 2) { + if (obj && obj.length > 0) { + throw new protobuf.Error('Invalid packed float array.'); + } + const size = this.uint32(); + const end = this._position + size; + if (end > this._length) { + this._unexpected(); + } + const length = size >>> 2; + obj = size > 1048576 ? new Float32Array(length) : new Array(length); + let position = this._position; + for (let i = 0; i < length; i++) { + obj[i] = this._view.getFloat32(position, true); + position += 4; + } + this._position = end; + } else if (obj !== undefined && obj.length < 1000000) { + obj.push(this.float()); + } else { + obj = undefined; + this.float(); + } + return obj; + } + + doubles(obj, tag) { + if ((tag & 7) === 2) { + if (obj && obj.length > 0) { + throw new protobuf.Error('Invalid packed float array.'); + } + const size = this.uint32(); + const end = this._position + size; + if (end > this._length) { + this._unexpected(); + } + const length = size >>> 3; + obj = size > 1048576 ? new Float64Array(length) : new Array(length); + let position = this._position; + for (let i = 0; i < length; i++) { + obj[i] = this._view.getFloat64(position, true); + position += 8; + } + this._position = end; + } else if (obj !== undefined && obj.length < 1000000) { + obj.push(this.double()); + } else { + obj = undefined; + this.double(); + } + return obj; + } + + skip(offset) { + this._position += offset; + if (this._position > this._length) { + this._unexpected(); + } + } + + skipVarint() { + do { + if (this._position >= this._length) { + this._unexpected(); + } + } + while (this._buffer[this._position++] & 128); + } + + _uint32() { + if (this._position < this._length) { + let c = this._buffer[this._position++]; + let value = (c & 127) >>> 0; + if (c < 128) { + return value; + } + if (this._position < this._length) { + c = this._buffer[this._position++]; + value = (value | (c & 127) << 7) >>> 0; + if (c < 128) { + return value; + } + if (this._position < this._length) { + c = this._buffer[this._position++]; + value = (value | (c & 127) << 14) >>> 0; + if (c < 128) { + return value; + } + if (this._position < this._length) { + c = this._buffer[this._position++]; + value = (value | (c & 127) << 21) >>> 0; + if (c < 128) { + return value; + } + if (this._position < this._length) { + c = this._buffer[this._position++]; + value = (value | (c & 15) << 28) >>> 0; + if (c < 128) { + return value; + } + if (this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 255 || this.byte() !== 1) { + return undefined; + } + return value; + } + } + } + } + } + return undefined; + } + + _skipType(wireType) { + switch (wireType) { + case 0: { + // const max = this._position + 9; + do { + if (this._position >= this._length /* || this._position > max */) { + return false; + } + } + while (this._buffer[this._position++] & 128); + break; + } + case 1: { + if (this._position + 8 >= this._length) { + return false; + } + this._position += 8; + break; + } + case 2: { + const length = this._uint32(); + if (length === undefined) { + return false; + } + if (this._position + length > this._end) { + return false; + } + this._position += length; + break; + } + case 3: { + for (;;) { + const tag = this._uint32(); + if (tag === undefined) { + return false; + } + const wireType = tag & 7; + if (wireType === 4) { + break; + } + if (!this._skipType(wireType)) { + return false; + } + } + break; + } + case 5: { + this._position += 4; + if (this._position > this._length) { + return false; + } + break; + } + default: { + return false; + } + } + return true; + } + + skipType(wireType) { + switch (wireType) { + case 0: + this.skipVarint(); + break; + case 1: + this.skip(8); + break; + case 2: + this.skip(this.uint32()); + break; + case 3: + while ((wireType = this.uint32() & 7) !== 4) { + this.skipType(wireType); + } + break; + case 5: + this.skip(4); + break; + default: + throw new protobuf.Error(`Invalid type ${wireType} at offset ${this._position}.`); + } + } + + entry(obj, key, value) { + this.skipVarint(); + this._position++; + let k = key(); + if (!Number.isInteger(k) && typeof k !== 'string') { + k = k.toNumber(); + } + this._position++; + const v = value(); + obj[k] = v; + } + + _varint() { + const bits = new protobuf.LongBits(0, 0); + let i = 0; + if (this._length - this._position > 4) { // fast route (lo) + for (; i < 4; ++i) { + // 1st..4th + bits.lo = (bits.lo | (this._buffer[this._position] & 127) << i * 7) >>> 0; + if (this._buffer[this._position++] < 128) { + return bits; + } + } + // 5th + bits.lo = (bits.lo | (this._buffer[this._position] & 127) << 28) >>> 0; + bits.hi = (bits.hi | (this._buffer[this._position] & 127) >> 4) >>> 0; + if (this._buffer[this._position++] < 128) { + return bits; + } + i = 0; + } else { + for (; i < 3; i++) { + if (this._position >= this._length) { + this._unexpected(); + } + bits.lo = (bits.lo | (this._buffer[this._position] & 127) << i * 7) >>> 0; + if (this._buffer[this._position++] < 128) { + return bits; + } + } + bits.lo = (bits.lo | (this._buffer[this._position++] & 127) << i * 7) >>> 0; + return bits; + } + if (this._length - this._position > 4) { + for (; i < 5; ++i) { + bits.hi = (bits.hi | (this._buffer[this._position] & 127) << i * 7 + 3) >>> 0; + if (this._buffer[this._position++] < 128) { + return bits; + } + } + } else { + for (; i < 5; ++i) { + if (this._position >= this._length) { + this._unexpected(); + } + bits.hi = (bits.hi | (this._buffer[this._position] & 127) << i * 7 + 3) >>> 0; + if (this._buffer[this._position++] < 128) { + return bits; + } + } + } + throw new protobuf.Error('Invalid varint encoding.'); + } + + _unexpected() { + throw new RangeError('Unexpected end of file.'); + } +}; + +protobuf.TextReader = class { + + static open(data) { + if (data) { + const buffer = data instanceof Uint8Array ? data : data.peek(); + const decoder = text.Decoder.open(buffer); + let first = true; + for (let i = 0; i < 0x100; i++) { + const c = decoder.decode(); + if (c === undefined) { + if (i === 0) { + return null; + } + break; + } + if (c === '\0') { + return null; + } + const whitespace = c === ' ' || c === '\n' || c === '\r' || c === '\t'; + if (c < ' ' && !whitespace) { + return null; + } + if (first && !whitespace) { + first = false; + if (c === '#') { + let c; + do { + c = decoder.decode(); + } + while (c !== undefined && c !== '\n'); + if (c === undefined) { + break; + } + continue; + } + if (c === '[') { + continue; + } + if (c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z') { + continue; + } + return null; + } + } + return new protobuf.TextReader(buffer); + } + return null; + } + + constructor(buffer) { + this._decoder = text.Decoder.open(buffer); + this.reset(); + } + + signature() { + const tags = new Map(); + this.reset(); + try { + this.start(false); + while (!this.end()) { + const tag = this.tag(); + if (this.token() === '{') { + this.start(); + tags.set(tag, true); + while (!this.end()) { + const subtag = this.tag(); + tags.set(`${tag}.${subtag}`, true); + this.skip(); + this.match(','); + } + } else { + this.skip(); + tags.set(tag, true); + } + } + } catch (err) { + if (tags.has('[')) { + tags.clear(); + } + } + this.reset(); + return tags; + } + + reset() { + this._decoder.position = 0; + this._position = 0; + this._token = undefined; + this._depth = 0; + this._arrayDepth = 0; + this._token = ''; + this.next(); + } + + start() { + if (this._depth > 0) { + this.expect('{'); + } + this._depth++; + } + + end() { + if (this._depth <= 0) { + throw new protobuf.Error(`Invalid depth ${this.location()}`); + } + if (this._token === '}') { + this.expect('}'); + this.match(';'); + this._depth--; + return true; + } + if (this._token === undefined) { + if (this._depth !== 1) { + throw new protobuf.Error(`Unexpected end of input ${this.location()}`); + } + this._depth--; + return true; + } + return false; + } + + tag() { + const name = this._token; + this.next(); + if (this._token !== '[' && this._token !== '{') { + this.expect(':'); + } + return name; + } + + integer() { + const token = this._token; + const value = Number.parseInt(token, 10); + if (Number.isNaN(token - value)) { + throw new protobuf.Error(`Couldn't parse integer '${token}' ${this.location()}`); + } + this.next(); + this.semicolon(); + return value; + } + + double() { + let value; + let token = this._token; + switch (token) { + case 'nan': value = NaN; break; + case 'inf': value = Infinity; break; + case '-inf': value = -Infinity; break; + default: + if (token.endsWith('f')) { + token = token.substring(0, token.length - 1); + } + value = Number.parseFloat(token); + if (Number.isNaN(token - value)) { + throw new protobuf.Error(`Couldn't parse float '${token}' ${this.location()}`); + } + break; + } + this.next(); + this.semicolon(); + return value; + } + + float() { + return this.double(); + } + + uint32() { + return this.integer(); + } + + int32() { + return this.integer(); + } + + sint32() { + return this.integer(); + } + + int64() { + return base.Int64.create(this.integer()); + } + + uint64() { + return base.Uint64.create(this.integer()); + } + + sint64() { + return base.Int64.create(this.integer()); + } + + fixed64() { + return base.Uint64.create(this.integer()); + } + + sfixed64() { + return base.Int64.create(this.integer()); + } + + fixed32() { + return this.integer(); + } + + sfixed32() { + return this.integer(); + } + + string() { + const token = this._token; + if (token.length < 2) { + throw new protobuf.Error(`String is too short ${this.location()}`); + } + const quote = token.substring(0, 1); + if (quote !== "'" && quote !== '"') { + throw new protobuf.Error(`String is not in quotes ${this.location()}`); + } + if (quote !== token.substring(token.length - 1)) { + throw new protobuf.Error(`String quotes do not match ${this.location()}`); + } + const value = token.substring(1, token.length - 1); + this.next(); + this.semicolon(); + return value; + } + + bool() { + const token = this._token; + switch (token) { + case 'true': + case 'True': + case '1': + this.next(); + this.semicolon(); + return true; + case 'false': + case 'False': + case '0': + this.next(); + this.semicolon(); + return false; + default: + throw new protobuf.Error(`Couldn't parse boolean '${token}' ${this.location()}`); + } + } + + bytes() { + const token = this.string(); + const length = token.length; + const array = new Uint8Array(length); + for (let i = 0; i < length; i++) { + array[i] = token.charCodeAt(i); + } + return array; + } + + enum(type) { + const token = this._token; + let value; + if (Object.prototype.hasOwnProperty.call(type, token)) { + value = type[token]; + } else { + value = Number.parseInt(token, 10); + if (Number.isNaN(token - value)) { + throw new protobuf.Error(`Couldn't parse enum '${token === undefined ? '' : token}' ${this.location()}`); + } + } + this.next(); + this.semicolon(); + return value; + } + + any(type) { + this.start(); + const message = type(); + if (this._token.startsWith('[') && this._token.endsWith(']')) { + message.type_url = this._token.substring(1, this._token.length - 1).trim(); + this.next(); + this.match(':'); + message.value = this.read(); + this.match(';'); + if (!this.end()) { + this.expect('}'); + } + } else { + while (!this.end()) { + const tag = this.tag(); + switch (tag) { + case "type_url": + message.type_url = this.string(); + break; + case "value": + message.value = this.bytes(); + break; + default: + this.field(tag, message); + break; + } + } + } + return message; + } + + anyarray(obj, type) { + this.start(); + if (this._token.startsWith('[') && this._token.endsWith(']')) { + while (!this.end()) { + if (this._token.startsWith('[') && this._token.endsWith(']')) { + const message = type(); + message.type_url = this._token.substring(1, this._token.length - 1).trim(); + this.next(); + this.match(':'); + message.value = this.read(); + this.match(';'); + obj.push(message); + continue; + } + this.expect('['); + } + } else { + const message = type(); + while (!this.end()) { + const tag = this.tag(); + switch (tag) { + case "type_url": + message.type_url = this.string(); + break; + case "value": + message.value = this.bytes(); + break; + default: + this.field(tag, message); + break; + } + } + obj.push(message); + } + } + + entry(obj, key, value) { + this.start(); + let k; + let v; + while (!this.end()) { + const tag = this.tag(); + switch (tag) { + case 'key': + k = key(); + break; + case 'value': + v = value(); + break; + default: + throw new protobuf.Error(`Unsupported entry tag '${tag}'.`); + } + } + obj[k] = v; + } + + array(obj, item) { + if (this.first()) { + while (!this.last()) { + obj.push(item()); + switch (this._token) { + case ',': + this.next(); + break; + case ']': + break; + default: + this.handle(this._token); + break; + } + } + } else { + obj.push(item()); + } + } + + first() { + if (this.match('[')) { + this._arrayDepth++; + return true; + } + return false; + } + + last() { + if (this.match(']')) { + this._arrayDepth--; + return true; + } + return false; + } + + read() { + const start = this._position; + this.skip(); + const end = this._position; + const position = this._decoder.position; + this._decoder.position = start; + let content = ''; + while (this._decoder.position < end) { + content += this._decoder.decode(); + } + this._decoder.position = position; + return content; + } + + skip() { + switch (this._token) { + case '{': { + const depth = this._depth; + this.start(); + while (!this.end() || depth < this._depth) { + if (this._token === '{') { + this.start(); + } else if (this._token !== '}') { + this.next(); + this.match(';'); + } + } + break; + } + case '[': { + const depth = this._arrayDepth; + this.first(); + while (!this.last() || depth < this._arrayDepth) { + this.next(); + if (this._token === '[') { + this.first(); + } else if (this._token === undefined) { + this.handle(this._token); + } + } + break; + } + default: { + this.next(); + this.semicolon(); + break; + } + } + } + + handle(token) { + throw new protobuf.Error(`Unexpected token '${token}' ${this.location()}`); + } + + field(token /*, module */) { + throw new protobuf.Error(`Unsupported field '${token}' ${this.location()}`); + } + + token() { + return this._token; + } + + next() { + if (this._token === undefined) { + throw new protobuf.Error(`Unexpected end of input ${this.location()}`); + } + this._position = this._decoder.position; + let c = this._decoder.decode(); + for (;;) { + switch (c) { + case ' ': + case '\n': + case '\r': + case '\t': + this._position = this._decoder.position; + c = this._decoder.decode(); + continue; + case '#': + do { + c = this._decoder.decode(); + if (c === undefined) { + this._token = undefined; + return; + } + } + while (c !== '\n'); + this._position = this._decoder.position; + c = this._decoder.decode(); + continue; + default: + break; + } + break; + } + if (c === undefined) { + this._token = undefined; + return; + } + if (c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c === '_' || c === '$') { + let token = c; + let position = this._decoder.position; + for (;;) { + c = this._decoder.decode(); + if (c === undefined || c === '\n') { + break; + } + if (c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c === '_' || c === '+' || c === '-') { + token += c; + position = this._decoder.position; + continue; + } + break; + } + this._decoder.position = position; + this._token = token; + return; + } + switch (c) { + case '{': + case '}': + case ':': + case ',': + case ']': + case ';': + this._token = c; + return; + case '[': { + let token = c; + let position = this._decoder.position; + let x = this._decoder.decode(); + if ((x !== undefined) && x >= 'a' && x <= 'z' || x >= 'A' && x <= 'Z') { + token += x; + for (;;) { + x = this._decoder.decode(); + if (x === undefined || x === '\n') { + break; + } + if (x >= 'a' && x <= 'z' || x >= 'A' && x <= 'Z' || x >= '0' && x <= '9' || x === '.' || x === '/') { + token += x; + position = this._decoder.position; + continue; + } + if (x === ']') { + this._token = token + x; + return; + } + } + } + this._decoder.position = position; + this._token = '['; + return; + } + case '"': + case "'": { + const quote = c; + let content = c; + for (;;) { + c = this._decoder.decode(); + if (c === undefined || c === '\n') { + throw new protobuf.Error(`Unexpected end of string ${this.location()}`); + } + if (c == '\\') { + c = this._decoder.decode(); + if (c === undefined || c === '\n') { + throw new protobuf.Error(`Unexpected end of string ${this.location()}`); + } + switch (c) { + case '\\': c = '\\'; break; + case "'": c = "'"; break; + case '"': c = '"'; break; + case 'r': c = '\r'; break; + case 'n': c = '\n'; break; + case 't': c = '\t'; break; + case 'b': c = '\b'; break; + case 'x': + case 'X': { + let value = 0; + for (let xi = 0; xi < 2; xi++) { + let xd = this._decoder.decode(); + if (xd === undefined) { + throw new protobuf.Error(`Unexpected end of string ${this.location()}`); + } + xd = xd.charCodeAt(0); + xd = xd >= 65 && xd <= 70 ? xd - 55 : xd >= 97 && xd <= 102 ? xd - 87 : xd >= 48 && xd <= 57 ? xd - 48 : -1; + if (xd === -1) { + throw new protobuf.Error(`Unexpected hex digit '${xd}' in bytes string ${this.location()}`); + } + value = value << 4 | xd; + } + c = String.fromCharCode(value); + break; + } + default: { + if (c < '0' || c > '9') { + throw new protobuf.Error(`Unexpected character '${c}' in string ${this.location()}`); + } + let value = 0; + let od = c; + if (od < '0' || od > '9') { + throw new protobuf.Error(`Unexpected octal digit '${od}' in bytes string ${this.location()}`); + } + od = od.charCodeAt(0); + value = value << 3 | od - 48; + od = this._decoder.decode(); + if (od === undefined) { + throw new protobuf.Error(`Unexpected end of string ${this.location()}`); + } + if (od < '0' || od > '9') { + throw new protobuf.Error(`Unexpected octal digit '${od}' in bytes string ${this.location()}`); + } + od = od.charCodeAt(0); + value = value << 3 | od - 48; + od = this._decoder.decode(); + if (od === undefined) { + throw new protobuf.Error(`Unexpected end of string ${this.location()}`); + } + if (od < '0' || od > '9') { + throw new protobuf.Error(`Unexpected octal digit '${od}' in bytes string ${this.location()}`); + } + od = od.charCodeAt(0); + value = value << 3 | od - 48; + c = String.fromCharCode(value); + break; + } + } + content += c; + continue; + } else { + content += c; + if (c === quote) { + break; + } + } + } + this._token = content; + return; + } + case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + case '-': case '+': case '.': { + let token = c; + let position = this._decoder.position; + for (;;) { + c = this._decoder.decode(); + if (c === undefined || c === '\n') { + break; + } + if ((c >= '0' && c <= '9') || c === '_' || c === '+' || c === '-' || c === '.' || c === 'e' || c === 'E') { + token += c; + position = this._decoder.position; + continue; + } + break; + } + if (token === '-' && c === 'i' && this._decoder.decode() === 'n' && this._decoder.decode() === 'f') { + token = '-inf'; + position = this._decoder.position; + } + if (token === '-' || token === '+' || token === '.') { + throw new protobuf.Error(`Unexpected token '${token}' ${this.location()}`); + } + this._decoder.position = position; + this._token = token; + return; + } + default: { + throw new protobuf.Error(`Unexpected token '${c}' ${this.location()}`); + } + } + } + + expect(value) { + if (this._token !== value) { + throw new protobuf.Error(`Unexpected '${this._token}' instead of '${value}' ${this.location()}`); + } + this.next(); + } + + match(value) { + if (value == this._token) { + this.next(); + return true; + } + return false; + } + + location() { + let line = 1; + let column = 1; + this._decoder.position = 0; + let c; + do { + if (this._decoder.position === this._position) { + return `at ${line}:${column}.`; + } + c = this._decoder.decode(); + if (c === '\n') { + line++; + column = 1; + } else { + column++; + } + } + while (c !== undefined); + return `at ${line}:${column}.`; + } + + semicolon() { + if (this._arrayDepth === 0) { + this.match(';'); + } + } +}; + +protobuf.Int64 = base.Int64; +protobuf.Uint64 = base.Uint64; + +protobuf.LongBits = class { + + constructor(lo, hi) { + this.lo = lo >>> 0; + this.hi = hi >>> 0; + } + + zzDecode() { + const mask = -(this.lo & 1); + this.lo = ((this.lo >>> 1 | this.hi << 31) ^ mask) >>> 0; + this.hi = (this.hi >>> 1 ^ mask) >>> 0; + return this; + } + + toUint64() { + return new base.Uint64(this.lo, this.hi); + } + + toInt64() { + return new base.Int64(this.lo, this.hi); + } +}; + +protobuf.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Protocol Buffer Error'; + this.message = message; + } +}; + +export const BinaryReader = protobuf.BinaryReader; +export const TextReader = protobuf.TextReader; +export const Int64 = protobuf.Int64; +export const Uint64 = protobuf.Uint64; +export const get = protobuf.get; diff --git a/python.js b/python.js new file mode 100644 index 00000000000..95355f5bb16 --- /dev/null +++ b/python.js @@ -0,0 +1,7615 @@ + +// Experimental Python Execution + +const python = {}; + +python.Parser = class { + + constructor(text, file, debug) { + this._tokenizer = new python.Tokenizer(text, file); + this._debug = debug; + python.Parser._precedence = python.Parser._precedence || { + 'or': 2, 'and': 3, 'not' : 4, + 'in': 5, 'instanceof': 5, 'is': 5, '<': 5, '>': 5, '<=': 5, '>=': 5, '<>': 5, '==': 5, '!=': 5, + '|': 6, '^' : 7, '&' : 8, + '<<': 9, '>>': 9, '+': 10, '-': 10, '*': 11, '@': 11, '/': 11, '//': 11, '%': 11, + // '+': 12, '-': 12, + '~': 13, '**': 14 + }; + } + + parse() { + const node = this._node('program'); + node.body = []; + while (!this._tokenizer.match('eof')) { + const statement = this._statement(); + if (statement) { + node.body.push(statement); + continue; + } + if (this._tokenizer.eat('\n') || this._tokenizer.eat(';') || this._tokenizer.peek().type == 'eof') { + continue; + } + if (this._tokenizer.eat('indent') && this._tokenizer.peek().type == 'eof') { + continue; + } + throw new python.Error(`Unsupported statement ${this._tokenizer.location()}`); + } + return node; + } + + _suite() { + const node = this._node('block'); + node.statements = []; + let statement = null; + if (this._tokenizer.eat('\n')) { + if (this._tokenizer.eat('indent')) { + while (!this._tokenizer.eat('eof') && !this._tokenizer.eat('dedent')) { + if (this._tokenizer.eat(';')) { + continue; + } + statement = this._statement(); + if (statement) { + node.statements.push(statement); + continue; + } + if (this._tokenizer.eat('\n')) { + continue; + } + if (this._tokenizer.match('dedent') || this._tokenizer.match('eof')) { + continue; + } + throw new python.Error(`Empty statement ${this._tokenizer.location()}`); + } + } + } else if (!this._tokenizer.eat('eof')) { + while (!this._tokenizer.match('\n') && !this._tokenizer.match('eof') && !this._tokenizer.match('dedent')) { + if (this._tokenizer.eat(';')) { + continue; + } + statement = this._statement(); + if (statement) { + node.statements.push(statement); + continue; + } + throw new python.Error(`Empty statement ${this._tokenizer.location()}`); + } + this._tokenizer.eat('\n'); + } + + return node; + } + + _statement() { + let node = this._eat('id', 'break'); + if (node) { + return node; + } + node = this._eat('id', 'continue'); + if (node) { + return node; + } + node = this._eat('id', 'return'); + if (node) { + node.expression = this._expression(-1, [], true); + return node; + } + node = this._eat('id', 'raise'); + if (node) { + node.exception = this._expression(-1, [ 'from' ]); + if (this._tokenizer.eat('id', 'from')) { + node.from = this._expression(); + } else if (this._tokenizer.eat(',')) { + node.exception = [ node.exception ]; + node.exception.push(this._expression()); + if (this._tokenizer.eat(',')) { + node.exception.push(this._expression()); + } + } + return node; + } + node = this._eat('id', 'assert'); + if (node) { + node.condition = this._expression(-1, [ ',' ]); + if (this._tokenizer.eat(',')) { + node.message = this._expression(); + } + return node; + } + node = this._eat('id', 'exec'); + if (node) { + node.variable = this._expression(-1, [ 'in' ]); + if (this._tokenizer.eat('in')) { + do { + node.target = node.target || []; + node.target.push(this._expression(-1, [ 'in' ], false)); + } + while (this._tokenizer.eat(',')); + } + return node; + } + + node = this._eat('id', 'global'); + if (node) { + node.names = []; + do { + node.names.push(this._name(true).value); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'nonlocal'); + if (node) { + node.names = []; + do { + node.names.push(this._name(true).value); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'import'); + if (node) { + node.names = []; + do { + const alias = this._node('alias'); + alias.name = this._dottedName(); + if (this._tokenizer.eat('id', 'as')) { + alias.asname = this._name(true).value; + } + node.names.push(alias); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'from'); + if (node) { + node.type = 'import_from'; + node.level = 0; + const dots = this._tokenizer.peek(); + if (dots && Array.from(dots.type).every((c) => c == '.')) { + this._eat(dots.type); + node.level = Array.from(dots.type).length; + } + node.module = this._dottedName(); + this._tokenizer.expect('id', 'import'); + node.names = []; + const close = this._tokenizer.eat('('); + do { + const alias = this._node('alias'); + alias.name = this._name(true).value; + if (this._tokenizer.eat('id', 'as')) { + alias.asname = this._name(true).value; + } + node.names.push(alias); + } + while (this._tokenizer.eat(',')); + if (close) { + this._tokenizer.expect(')'); + } + return node; + } + + let decorator_list = this._decorator(); + + node = this._eat('id', 'class'); + if (node) { + node.name = this._name(true).value; + if (decorator_list) { + node.decorator_list = Array.from(decorator_list); + decorator_list = null; + } + node.bases = this._tokenizer.peek().type === '(' ? this._arguments() : []; + this._tokenizer.expect(':'); + node.body = this._suite(); + return node; + } + + const async = this._eat('id', 'async'); + if (async && + !this._tokenizer.match('id', 'def') && + !this._tokenizer.match('id', 'with') && + !this._tokenizer.match('id', 'for')) { + throw new python.Error(`Expected 'def', 'with' or 'for' ${this._tokenizer.location()}`); + } + + node = this._eat('id', 'def'); + if (node) { + if (async) { + node.async = async; + } + node.name = this._name(true).value; + if (decorator_list) { + node.decorator_list = Array.from(decorator_list); + decorator_list = null; + } + this._tokenizer.expect('('); + node.parameters = this._parameters(')'); + if (this._tokenizer.eat('->')) { + node.returnType = this._type(); + } + this._tokenizer.expect(':'); + node.body = this._suite(); + return node; + } + + if (decorator_list && decorator_list.length > 0) { + throw new python.Error('Unexpected decorator.'); + } + + node = this._eat('id', 'del'); + if (node) { + node.expression = this._expression(-1, [], true); + return node; + } + node = this._eat('id', 'print'); + if (node) { + node.expression = this._expression(-1, [], true); + return node; + } + node = this._eat('id', 'if'); + if (node) { + node.condition = this._expression(); + this._tokenizer.expect(':'); + node.then = this._suite(); + let current = node; + this._tokenizer.eat('\n'); + while (this._tokenizer.eat('id', 'elif')) { + current.else = this._node('if'); + current = current.else; + current.condition = this._expression(); + this._tokenizer.expect(':'); + current.then = this._suite(); + this._tokenizer.eat('\n'); + } + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + current.else = this._suite(); + } + return node; + } + node = this._eat('id', 'while'); + if (node) { + node.condition = this._expression(); + this._tokenizer.expect(':'); + node.body = this._suite(); + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + node.else = this._suite(); + } + return node; + } + node = this._eat('id', 'pass'); + if (node) { + return node; + } + node = this._eat('id', 'for'); + if (node) { + node.variable = []; + node.variable.push(this._expression(-1, [ 'in' ])); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match('id', 'in')) { + node.variable.push({}); + break; + } + node.variable.push(this._expression(-1, [ 'in' ])); + } + this._tokenizer.expect('id', 'in'); + node.target = []; + node.target.push(this._expression()); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match(':')) { + node.target.push({}); + break; + } + node.target.push(this._expression(-1, [ 'in' ])); + } + this._tokenizer.expect(':'); + node.body = this._suite(); + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + node.else = this._suite(); + } + return node; + } + node = this._eat('id', 'with'); + if (node) { + if (async) { + node.async = async; + } + node.item = []; + do { + const item = this._node(); + item.type = 'with_item'; + item.expression = this._expression(); + if (this._tokenizer.eat('id', 'as')) { + item.variable = this._expression(); + } + node.item.push(item); + } + while (this._tokenizer.eat(',')); + this._tokenizer.expect(':'); + node.body = this._suite(); + return node; + } + node = this._eat('id', 'try'); + if (node) { + this._tokenizer.expect(':'); + node.body = this._suite(); + node.except = []; + while (this._tokenizer.match('id', 'except')) { + const except = this._node('except'); + this._tokenizer.expect('id', 'except'); + except.clause = []; + except.clause.push(this._expression()); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match(':') || this._tokenizer.match('as')) { + except.clause.push({}); + break; + } + except.clause.push(this._expression()); + } + if (this._tokenizer.eat('id', 'as')) { + except.variable = this._expression(); + } + this._tokenizer.expect(':'); + except.body = this._suite(); + node.except.push(except); + } + if (this._tokenizer.match('id', 'else')) { + node.else = this._node('else'); + this._tokenizer.expect('id', 'else'); + this._tokenizer.expect(':'); + node.else.body = this._suite(); + } + if (this._tokenizer.match('id', 'finally')) { + node.finally = this._node('finally'); + this._tokenizer.expect('id', 'finally'); + this._tokenizer.expect(':'); + node.finally.body = this._suite(); + } + return node; + } + + const expression = this._expression(-1, [], true); + if (expression) { + if (expression.type == 'id' && this._tokenizer.eat(':')) { + node = this._node('var'); + node.name = expression.value; + node.location = expression.location; + node.variableType = this._expression(-1, [ '=' ]); + if (this._tokenizer.eat('=')) { + node.initializer = this._expression(); + } + return node; + } + switch (expression.type) { + case '=': + case ':=': + case '==': + case '!=': + case '+=': + case '-=': + case '*=': + case '@=': + case '/=': + case '//=': + case '**=': + case '&=': + case '|=': + case '%=': + case '>>=': + case '<<=': + case '>>': + case '<<': + case '>=': + case '<=': + case '<': + case '>': + case '%': + case '^=': + case '...': + case 'call': + case 'assert': + case 'raise': + case 'string': + case 'list': + case 'var': + case '.': + case '[]': + case 'yield': + case '+': + case '-': + case '*': + case '**': + case '@': + case '/': + case '//': + case '~': + case '&': + case '^': + case '|': + case 'not': + case 'id': + case 'number': + case 'in': + case 'and': + case 'or': + case 'if': + case 'for': + case 'tuple': + case 'lambda': + case 'await': + return expression; + default: + throw new python.Error(`Unhandled expression ${this._tokenizer.location()}`); + } + } + + return null; + } + + _expression(minPrecedence, terminal, tuple) { + minPrecedence = minPrecedence || -1; + const terminalSet = new Set(terminal); + const stack = []; + for (;;) { + let node = this._node(); + const token = this._tokenizer.peek(); + if (stack.length == 1 && terminalSet.has(token.value)) { + break; + } + const precedence = python.Parser._precedence[token.value]; + if (precedence) { + if (precedence >= minPrecedence) { + this._tokenizer.read(); + node.type = token.value; + if (token.type == 'id' && (token.value === 'in' || token.value === 'not')) { + if (token.value === 'in') { + node.type = 'in'; + } else if (this._tokenizer.eat('id', 'in')) { + node.type = 'not in'; + } else { + node.type = 'not'; + node.expression = this._expression(precedence, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + } else if (token.value == '~') { + node.type = '~'; + node.expression = this._expression(precedence, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } else if (token.type == 'id' && token.value == 'is') { + if (this._tokenizer.eat('id', 'not')) { + node.type = 'is not'; + } + } + if (stack.length > 0) { + node.op = node.type; + node.type = 'binary'; + node.left = stack.pop(); + node.right = this._expression(precedence, terminal, tuple === true ? true : false); + } else { + node.op = node.type; + node.type = 'unary'; + node.operand = this._expression(precedence, terminal, tuple === true ? true : false); + } + stack.push(node); + continue; + } + } + if (this._tokenizer.eat(':=')) { + node.type = ':='; + node.target = stack.pop(); + node.expression = this._expression(-1, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + if (this._tokenizer.eat('=')) { + node.type = '='; + node.target = stack.pop(); + node.expression = this._expression(-1, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + switch (token.type) { + case '-=': + case '**=': + case '*=': + case '//=': + case '/=': + case '&=': + case '%=': + case '^=': + case '+=': + case '<<=': + case '>>=': + case '|=': + case '@=': + node = this._node(token.type); + this._tokenizer.expect(token.type); + node.target = stack.pop(); + node.expression = this._expression(-1, terminal, true); + stack.push(node); + continue; + default: + break; + } + node = this._eat('id', 'if'); + if (node) { + node.then = stack.pop(); + node.condition = this._expression(); + this._tokenizer.expect('id', 'else'); + node.else = this._expression(); + stack.push(node); + continue; + } + while (this._tokenizer.match('id', 'for') || this._tokenizer.match('id', 'async')) { + const async = this._eat('id', 'async'); + if (async && !this._tokenizer.match('id', 'for')) { + throw new python.Error(`Expected 'for' ${this._tokenizer.location()}`); + } + node = this._eat('id', 'for'); + if (node) { + if (async) { + node.async = async; + } + node.expression = stack.pop(); + node.variable = this._expression(-1, [ 'in' ], true); + this._tokenizer.expect('id', 'in'); + node.target = this._expression(-1, [ 'for', 'if' ], true); + while (this._tokenizer.eat('id', 'if')) { + node.condition = node.condition || []; + node.condition.push(this._expression(-1, [ 'for', 'if' ])); + } + stack.push(node); + } + } + node = this._eat('id', 'lambda'); + if (node) { + node.parameters = this._parameters(':'); + node.body = this._expression(-1, terminal, false); + stack.push(node); + continue; + } + node = this._eat('id', 'yield'); + if (node) { + if (this._tokenizer.eat('id', 'from')) { + node.from = this._expression(-1, [], true); + } else { + node.expression = []; + do { + node.expression.push(this._expression(-1, [], false)); + } + while (this._tokenizer.eat(',')); + } + stack.push(node); + continue; + } + node = this._eat('id', 'await'); + if (node) { + node.expression = this._expression(minPrecedence, terminal, tuple); + stack.push(node); + continue; + } + node = this._eat('.'); + if (node) { + this._tokenizer.eat('\n'); + node.target = stack.pop(); + node.member = this._name(); + stack.push(node); + continue; + } + if (this._tokenizer.peek().type === '(') { + if (stack.length == 0) { + node = this._node('tuple'); + const args = this._arguments(); + if (args.length == 1) { + stack.push(args[0]); + } else { + node.value = args; + stack.push(node); + } + } else { + node = this._node('call'); + node.target = stack.pop(); + node.args = this._arguments(); + stack.push(node); + } + continue; + } + if (this._tokenizer.peek().type === '[') { + if (stack.length == 0) { + stack.push(this._expressions()); + } else { + node = this._node('[]'); + node.target = stack.pop(); + node.arguments = this._slice(); + stack.push(node); + } + continue; + } + if (this._tokenizer.peek().type == '{') { + stack.push(this._dictOrSetMaker()); + continue; + } + node = this._node(); + const literal = this._literal(); + if (literal) { + if (stack.length > 0 && literal.type == 'number' && + (literal.value.startsWith('-') || literal.value.startsWith('+'))) { + node.type = literal.value.substring(0, 1); + literal.value = literal.value.substring(1); + node.left = stack.pop(); + node.right = literal; + stack.push(node); + } else if (stack.length == 1 && literal.type == 'string' && stack[0].type == 'string') { + stack[0].value += literal.value; + } else { + if (literal.type === 'number') { + switch (literal.value) { + case 'inf': literal.value = Infinity; break; + case '-inf': literal.value = -Infinity; break; + default: break; + } + } + stack.push(literal); + } + continue; + } + if (this._tokenizer.peek().keyword) { + break; + } + node = this._eat('...'); + if (node) { + stack.push(node); + continue; + } + const identifier = this._name(); + if (identifier) { + stack.push(identifier); + continue; + } + + if (tuple === true && stack.length == 1 && this._tokenizer.eat(',')) { + if (stack[0].type === 'tuple') { + [node] = stack; + } else { + node = this._node('tuple'); + node.value = [ stack.pop() ]; + stack.push(node); + } + // for, bar, = + if (this._tokenizer.peek().type === '=') { + continue; + } + if (!this._tokenizer.match('=') && !terminalSet.has(this._tokenizer.peek().value)) { + const nextTerminal = terminal.slice(0).concat([ ',', '=' ]); + const expression = this._expression(minPrecedence, nextTerminal, tuple); + if (expression) { + node.value.push(expression); + continue; + } + } + break; + } + break; + } + + if (stack.length == 1) { + return stack.pop(); + } + if (stack.length != 0) { + throw new python.Error(`Unexpected expression ${this._tokenizer.location()}`); + } + return null; + } + + _decorator() { + let list = null; + while (this._tokenizer.eat('@')) { + const node = this._node('decorator'); + node.value = this._expression(); + if (!node.value || (node.value.type !== 'call' && node.value.type !== 'id' && node.value.type !== '.')) { + throw new python.Error(`Invalid decorator ${this._tokenizer.location()}`); + } + this._tokenizer.eat('\n'); + list = list !== null ? list : []; + list.push(node); + } + return list; + } + + _dictOrSetMaker() { + const list = []; + this._tokenizer.expect('{'); + let dict = true; + while (!this._tokenizer.eat('}')) { + const item = this._expression(-1, [], false); + if (item == null) { + throw new python.Error(`Expected expression ${this._tokenizer.location()}`); + } + if (!this._tokenizer.eat(':')) { + dict = false; + } + if (dict) { + const value = this._expression(-1, [], false); + if (value == null) { + throw new python.Error(`Expected expression ${this._tokenizer.location()}`); + } + list.push({ type: 'pair', key: item, value: value }); + } else { + list.push(item); + } + this._tokenizer.eat(','); + this._tokenizer.eat('\n'); + if (this._tokenizer.eat('}')) { + break; + } + } + if (dict) { + return { type: 'dict', value: list }; + } + return { type: 'set', value: list }; + } + + _expressions() { + const list = []; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + const expression = this._expression(); + if (expression == null) { + throw new python.Error(`Expected expression ${this._tokenizer.location()}`); + } + list.push(expression); + this._tokenizer.eat(','); + while (this._tokenizer.eat('\n')) { + // continue + } + if (this._tokenizer.eat(']')) { + break; + } + } + return { type: 'list', value: list }; + } + + _slice() { + let node = { type: '::' }; + let list = []; + const group = [ 'start', 'stop', 'step' ]; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + if (this._tokenizer.eat(':')) { + node[group.shift()] = { type: 'list', value: list }; + list = []; + continue; + } + if (this._tokenizer.eat(',')) { + // list.push({}); + continue; + } + if (this._tokenizer.peek().type != ']') { + const expression = this._expression(); + if (expression == null) { + throw new python.Error(`Expected expression ${this._tokenizer.location()}`); + } + list.push(expression); + } + } + if (list.length > 0) { + node[group.shift()] = { type: 'list', value: list }; + } + if (node.start && !node.stop && !node.step) { + node = node.start; + } + return node; + } + + _name(required) { + const token = this._tokenizer.peek(); + if (token.type == 'id' && !token.keyword) { + this._tokenizer.read(); + return token; + } + if (required) { + throw new python.Error(`Invalid syntax ${this._tokenizer.location()}`); + } + return null; + } + + _dottedName() { + const list = []; + do { + list.push(this._name(true).value); + } + while (this._tokenizer.eat('.')); + return list.join('.'); + } + + _literal() { + const token = this._tokenizer.peek(); + if (token.type == 'string' || token.type == 'number' || token.type == 'boolean') { + this._tokenizer.read(); + return token; + } + return null; + } + + _typeArguments() { + const list = []; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + const type = this._type(); + if (type == null) { + throw new python.Error(`Expected type ${this._tokenizer.location()}`); + } + list.push(type); + if (!this._tokenizer.eat(',')) { + this._tokenizer.expect(']'); + break; + } + } + return list; + } + + _type() { + const type = this._node(); + type.type = 'type'; + type.name = this._expression(-1, [ '[', '=' ]); + if (type.name) { + if (this._tokenizer.peek().value === '[') { + type.arguments = this._typeArguments(); + } + return type; + } + return null; + } + + _parameter(terminal) { + const node = this._node('parameter'); + if (this._tokenizer.eat('/')) { + node.name = '/'; + return node; + } + if (this._tokenizer.eat('**')) { + node.parameterType = '**'; + } + if (this._tokenizer.eat('*')) { + node.parameterType = '*'; + } + const identifier = this._name(); + if (identifier !== null) { + node.name = identifier.value; + if (terminal !== ':' && this._tokenizer.eat(':')) { + node.parameterType = this._type(); + } + if (this._tokenizer.eat('=')) { + node.initializer = this._expression(); + } + return node; + } + return null; + } + + _parameters(terminal) { + const list = []; + while (!this._tokenizer.eat(terminal)) { + this._tokenizer.eat('\n'); + if (this._tokenizer.eat('(')) { + list.push(this._parameters(')')); + } else { + list.push(this._parameter(terminal)); + } + this._tokenizer.eat('\n'); + if (!this._tokenizer.eat(',')) { + this._tokenizer.expect(terminal); + break; + } + } + return list; + } + + _arguments() { + const list = []; + this._tokenizer.expect('('); + while (!this._tokenizer.eat(')')) { + if (this._tokenizer.eat('\n')) { + continue; + } + const expression = this._expression(-1, [], false); + if (expression == null) { + throw new python.Error(`Expected expression ${this._tokenizer.location()}`); + } + list.push(expression); + if (!this._tokenizer.eat(',')) { + this._tokenizer.eat('\n'); + this._tokenizer.expect(')'); + break; + } + } + return list; + } + + _node(type) { + const node = {}; + node.location = this._tokenizer.location(); + if (type) { + node.type = type; + } + return node; + } + + _eat(type, value) { + if (this._tokenizer.match(type, value)) { + const node = this._node(type === 'id' ? value : type); + this._tokenizer.expect(type, value); + return node; + } + return null; + } +}; + +python.Tokenizer = class { + + constructor(text, file) { + this._text = text; + this._file = file; + this._position = 0; + this._lineStart = 0; + this._line = 0; + this._token = { type: '', value: '' }; + this._brackets = 0; + this._indentation = []; + this._outdent = 0; + if (!python.Tokenizer._whitespace) { + python.Tokenizer._whitespace = new RegExp('[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]'); + const identifierStartChars = '\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'; + const identifierChars = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f'; + python.Tokenizer._identifierStart = new RegExp(`[${identifierStartChars}]`); + /* eslint-disable no-misleading-character-class */ + python.Tokenizer._identifierChar = new RegExp(`[${identifierStartChars}${identifierChars}]`); + /* eslint-enable no-misleading-character-class */ + } + } + + peek() { + if (!this._cache) { + this._tokenize(); + this._cache = true; + } + return this._token; + } + + read() { + if (!this._cache) { + this._tokenize(); + } + const next = this._position + this._token.value.length; + while (this._position < next) { + if (python.Tokenizer._isNewline(this._get(this._position))) { + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } else { + this._position++; + } + } + this._cache = false; + return this._token; + } + + match(type, value) { + const token = this.peek(); + if (token.type === type && (!value || token.value === value)) { + return true; + } + return false; + } + + eat(type, value) { + const token = this.peek(); + if (token.type === type && (!value || token.value === value)) { + this.read(); + return true; + } + return false; + } + + expect(type, value) { + const token = this.peek(); + if (token.type !== type) { + throw new python.Error(`Unexpected '${token.value}' instead of '${type}' ${this.location()}`); + } + if (value && token.value !== value) { + throw new python.Error(`Unexpected '${token.value}' instead of '${value}' ${this.location()}`); + } + this.read(); + } + + location() { + const line = this._line + 1; + const column = this._position - this._lineStart + 1; + return `at ${this._file}:${line}:${column}.`; + } + + static _isSpace(c) { + switch (c) { + case ' ': + case '\t': + case '\v': // 11 + case '\f': // 12 + case '\xA0': // 160 + return true; + default: + if (c.charCodeAt(0) >= 0x1680) { + return python.Tokenizer._whitespace.test(c); + } + return false; + } + } + + static _isNewline(c) { + switch (c) { + case '\n': + case '\r': + case '\u2028': // 8232 + case '\u2029': // 8233 + return true; + default: + return false; + } + } + + static _isIdentifierStartChar(c) { + if (c < 'A') { + return c === '$'; + } + if (c <= 'Z') { + return true; + } + if (c < 'a') { + return c === '_'; + } + if (c <= 'z') { + return true; + } + const code = c.charCodeAt(0); + if (code >= 0xAA) { + return python.Tokenizer._identifierStart.test(c); + } + return false; + } + + static _isIdentifierChar(c) { + if (c < '0') { + return c === '$'; + } + if (c <= '9') { + return true; + } + if (c < 'A') { + return false; + } + if (c <= 'Z') { + return true; + } + if (c < 'a') { + return c === '_'; + } + if (c <= 'z') { + return true; + } + const code = c.charCodeAt(0); + if (code >= 0xAA) { + return python.Tokenizer._identifierChar.test(c); + } + return false; + } + + _get(position) { + return position >= this._text.length ? '\0' : this._text[position]; + } + + _skipLine() { + while (this._position < this._text.length) { + if (python.Tokenizer._isNewline(this._get(this._position))) { + break; + } + this._position++; + } + } + + _skipWhitespace() { + while (this._position < this._text.length) { + const c = this._text[this._position]; + if (c == '#') { + this._skipLine(); + } else if (python.Tokenizer._isSpace(c)) { + this._position++; + } else if (c == '\\') { + // Explicit Line Continuation + this._position++; + if (python.Tokenizer._isNewline(this._get(this._position))) { + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } else { + throw new python.Error(`Unexpected '${this._text[this._position]}' after line continuation ${this.location()}`); + } + } else if (this._brackets > 0 && python.Tokenizer._isNewline(c)) { + // Implicit Line Continuation + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } else { + break; + } + } + } + + _newLine(position) { + if ((this._get(position) === '\n' && this._get(position + 1) === '\r') || + (this._get(position) === '\r' && this._get(position + 1) === '\n')) { + return position + 2; + } + return position + 1; + } + + _tokenize() { + if (this._token.type !== '\n') { + this._skipWhitespace(); + } + if (this._token.type === 'dedent') { + this._indentation.pop(); + this._outdent--; + if (this._outdent > 0) { + this._token = { type: 'dedent', value: '' }; + return; + } + } + if (this._token.type == '\n') { + let indent = ''; + let i = this._position; + while (i < this._text.length) { + const c = this._text[i]; + if (python.Tokenizer._isSpace(c)) { + indent += c; + i++; + } else if (python.Tokenizer._isNewline(c)) { + indent = ''; + i = this._newLine(i); + this._position = i; + this._lineStart = i; + this._line++; + } else if (c == '#') { + indent = ''; + while (i < this._text.length && !python.Tokenizer._isNewline(this._text[i])) { + i++; + } + continue; + } else { + break; + } + } + let type = null; + if (indent.length > 0) { + const current = this._indentation.length > 0 ? this._indentation[this._indentation.length - 1] : ''; + if (indent.length > current.length) { + type = 'indent'; + this._indentation.push(indent); + } else if (indent.length > 0 && indent.length < current.length) { + type = 'dedent'; + this._outdent = 0; + for (let j = this._indentation.length - 1; j >= 0 && indent.length < this._indentation[j].length; j--) { + this._outdent++; + } + } else { + this._position += indent.length; + } + } else if (i >= this._text.length) { + this._token = { type: 'eof', value: '' }; + return; + } else if (this._indentation.length > 0) { + type = 'dedent'; + this._outdent = this._indentation.length; + } + if (type === 'indent' || type === 'dedent') { + this._token = { type: type, value: indent }; + return; + } + } + if (this._position >= this._text.length) { + this._token = { type: 'eof', value: '' }; + return; + } + const c = this._get(this._position); + const string = this._string(); + if (string) { + this._token = string; + return; + } + switch (c) { + case '(': + case '[': + case '{': + this._brackets++; + this._token = { type: c, value: c }; + return; + case ')': + case ']': + case '}': + if (this._brackets === 0) { + throw new python.Error(`Unexpected '${c}' ${this.location}`); + } + this._brackets--; + this._token = { type: c, value: c }; + return; + case ',': + case ';': + case '?': + this._token = { type: c, value: c }; + return; + default: { + const number = this._number(); + if (number) { + this._token = number; + return; + } + if (c === '.') { + let end = this._position + 1; + while (this._get(end) === '.') { + end++; + } + const text = this._text.substring(this._position, end); + this._token = { type: text, value: text }; + return; + } + const identifier = this._identifier(); + if (identifier) { + this._token = identifier; + return; + } + const operator = this._operator(); + if (operator) { + this._token = operator; + return; + } + break; + } + } + if (c === '.') { + this._token = { type: c, value: c }; + return; + } + if (c === '\\') { + this._token = { type: '\\', value: c }; + return; + } + if (python.Tokenizer._isNewline(c)) { + this._token = { type: '\n', value: this._text.substring(this._position, this._newLine(this._position)) }; + return; + } + throw new python.Error(`Unexpected token '${c}' ${this.location()}`); + } + + _number() { + const octal = (c) => c >= '0' && c <= '7' || c === '_'; + const binary = (c) => c === '0' || c === '1' || c === '_'; + const decimal = (c) => c >= '0' && c <= '9' || c === '_'; + const hex = (c) => decimal(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || c === '_'; + let c = this._get(this._position); + const sign = (c === '-' || c === '+') ? 1 : 0; + let i = this._position + sign; + c = this._get(i); + if (c === '0') { + let radix = 0; + const n = this._get(i + 1); + if ((n === 'x' || n === 'X') && hex(this._get(i + 2))) { + i += 2; + while (hex(this._get(i))) { + i += 1; + } + if (this._get(i) === 'l' || this._get(i) === 'L') { + i += 1; + } + radix = 16; + } else if ((n === 'b' || n === 'B') && binary(this._get(i + 2))) { + i += 2; + while (binary(this._get(i))) { + i++; + } + radix = 2; + } else if ((n === 'o' || n === 'O') && octal(this._get(i + 2))) { + i += 2; + while (octal(this._get(i))) { + i++; + } + radix = 8; + } else if (n >= '0' && n <= '7') { + i++; + while (octal(this._get(i))) { + i += 1; + } + if (this._get(i) === 'l' || this._get(i) === 'L') { + i += 1; + } + radix = 8; + } + if (radix > 0 && this._get(i) !== '.') { + const radixText = this._text.substring(this._position, i); + const radixParseText = radixText.indexOf('_') !== -1 ? radixText.split('_').join('') : radixText; + if (!isNaN(parseInt(radixParseText, radix))) { + return { type: 'number', value: radixText }; + } + } + } + i = this._position + sign; + let isDecimal = false; + if (this._get(i) >= '1' && this._get(i) <= '9') { + while (decimal(this._get(i))) { + i++; + } + c = this._get(i).toLowerCase(); + isDecimal = c !== '.' && c !== 'e'; + } + if (this._get(i) === '0') { + i++; + c = this._get(i).toLowerCase(); + isDecimal = !decimal(c) && c !== '.' && c !== 'e' && c !== 'j'; + } + if (isDecimal) { + if (this._get(i) === 'j' || this._get(i) === 'J' || this._get(i) === 'l' || this._get(i) === 'L') { + return { 'type': 'number', value: this._text.substring(this._position, i + 1) }; + } + const intText = this._text.substring(this._position, i); + if (!isNaN(parseInt(intText, 10))) { + return { type: 'number', value: intText }; + } + } + i = this._position + sign; + if ((this._get(i) >= '0' && this._get(i) <= '9') || + (this._get(i) === '.' && this._get(i + 1) >= '0' && this._get(i + 1) <= '9')) { + while (decimal(this._get(i))) { + i++; + } + if (this._get(i) === '.') { + i++; + } + while (decimal(this._get(i))) { + i++; + } + if (i > (this._position + sign)) { + if (this._get(i) === 'e' || this._get(i) === 'E') { + i++; + if (this._get(i) == '-' || this._get(i) == '+') { + i++; + } + if (!decimal(this._get(i))) { + i = this._position; + } else { + while (decimal(this._get(i))) { + i++; + } + } + } else { + while (decimal(this._get(i))) { + i++; + } + } + } + if (i > (this._position + sign)) { + if (this._get(i) === 'j' || this._get(i) === 'J') { + return { type: 'number', value: this._text.substring(this._position, i + 1) }; + } + const floatText = this._text.substring(this._position, i); + const floatParseText = floatText.indexOf('_') != -1 ? floatText.split('_').join('') : floatText; + if (!isNaN(parseFloat(floatParseText))) { + return { type: 'number', value: floatText }; + } + } + } + return null; + } + + _identifier() { + let i = this._position; + if (python.Tokenizer._isIdentifierStartChar(this._get(i))) { + i++; + while (python.Tokenizer._isIdentifierChar(this._get(i))) { + i++; + } + } + if (i > this._position) { + const text = this._text.substring(this._position, i); + let keyword = false; + switch (text) { + case 'and': + case 'as': + case 'else': + case 'for': + case 'if': + case 'import': + case 'in': + case 'is': + case 'not': + case 'or': + keyword = true; + break; + default: + keyword = false; + break; + } + return { type: 'id', value: text, keyword: keyword }; + } + return null; + } + + _operator() { + let length = 0; + const c0 = this._get(this._position); + const c1 = this._get(this._position + 1); + const c2 = this._get(this._position + 2); + switch (c0) { + case '+': + case '&': + case '|': + case '^': + case '=': + case '!': + case '%': + case '~': + length = c1 === '=' ? 2 : 1; + break; + case '-': + length = c1 === '=' || c1 === '>' ? 2 : 1; + break; + case '*': + length = c1 === '*' ? (c2 === '=' ? 3 : 2) : (c1 === '=' ? 2 : 1); + break; + case '/': + length = c1 === '/' ? (c2 === '=' ? 3 : 2) : (c1 === '=' ? 2 : 1); + break; + case '<': + length = c1 === '>' ? 2 : (c1 === '<' ? (c2 === '=' ? 3 : 2) : (c1 === '=' ? 2 : 1)); + break; + case '>': + length = c1 === '>' ? (c2 === '=' ? 3 : 2) : (c1 === '=' ? 2 : 1); + break; + case '@': + length = c1 === '=' ? 2 : 1; + break; + case ':': + length = c1 === '=' ? 2 : 1; + break; + default: + return null; + } + const text = this._text.substring(this._position, this._position + length); + return { type: text, value: text }; + } + + _string() { + let i = this._position; + let prefix = -1; + if (this._get(i) === "'" || this._get(i) === '"') { + prefix = ''; + } else if (this._get(i + 1) === "'" || this._get(i + 1) === '"') { + const c = this._get(i); + switch (c.toLowerCase()) { + case 'b': + case 'f': + case 'r': + case 'u': + prefix = c; + break; + default: + break; + } + } else if (this._get(i + 2) === "'" || this._get(i + 2) === '"') { + const cc = this._text.substr(this._position, 2); + switch (cc.toLowerCase()) { + case 'br': + case 'fr': + case 'rb': + case 'rf': + case 'ur': + prefix = cc; + break; + default: + break; + } + } + if (prefix.length >= 0) { + i += prefix.length; + let quote = ''; + let count = 0; + const q0 = this._get(i); + const q1 = this._get(i + 1); + const q2 = this._get(i + 2); + switch (q0) { + case "'": + quote = q0; + count = (q1 === "'" && q2 === "'") ? 3 : 1; + break; + case '"': + quote = q0; + count = (q1 === '"' && q2 === '"') ? 3 : 1; + break; + default: + throw new python.Error(`Unsupported string quote '${q0}'.`); + } + i += count; + if (count == 1) { + while (i < this._text.length) { + if (this._text[i] === quote) { + return { type: 'string', value: this._text.substring(this._position, i + 1) }; + } else if (this._text[i] === '\\' && + (this._get(i + 1) == quote || this._get(i + 1) == '\n' || this._get(i + 1) == '\\')) { + i += 2; + } else if (this._text[i] === '\r' || this._text[i] === '\n') { + break; + } else { + i++; + } + } + } else if (count == 3) { + while (i < this._text.length) { + if (this._get(i) === quote && this._get(i + 1) === quote && this._get(i + 2) === quote) { + return { type: 'string', value: this._text.substring(this._position, i + 3) }; + } else if (this._get(i) === '\\' && this._get(i + 1) === quote) { + i += 2; + continue; + } + i++; + } + } + } + i = this._position; + if (this._get(i) === '`') { + i++; + while (i < this._text.length) { + if (this._text[i] === '`') { + return { type: 'string', value: this._text.substring(this._position, i + 1) }; + } + i++; + } + } + return null; + } +}; + +python.Execution = class { + + constructor(sources) { + const self = this; + const execution = self; + this._sources = sources || new Map(); + this._events = new Map(); + this._utf8Decoder = new TextDecoder('utf-8'); + this._unresolved = new Map(); + const dict = class extends Map { + constructor(items) { + super(); + if (items) { + for (const [name, value] of items) { + this.__setitem__(name, value); + } + } + } + __contains__(key) { + return this.has(key); + } + __setitem__(key, value) { + this.set(key, value); + } + __getitem__(key) { + return this.get(key); + } + }; + this._modules = new dict(); + this._registry = new Map(); + const module = class { + constructor(name) { + this.__name__ = name; + } + }; + const builtins = this.register('builtins', new module('builtins')); + this._builtins = builtins; + this._registry.set('__builtin__', builtins); + this.registerType('builtins.type', class {}).__class__ = builtins.type; + this.registerType('builtins.module', module); + this.registerType('builtins.method', class {}); + this.registerType('builtins.function', class {}); + this.registerType('builtins.code', class {}); + this.import('builtins'); + this.registerType('builtins.builtin_function_or_method', class {}); + const typing = this.register('typing'); + this._typing = typing; + const operator = this.register('operator'); + this.register('_codecs'); + this.register('argparse'); + this.register('collections'); + this.register('copy_reg'); + this.register('cuml'); + this.register('gensim'); + this.register('io'); + this.register('joblib'); + const functools = this.register('functools'); + this.registerType('functools.partial', class {}); + const keras = this.register('keras'); + this.register('lightgbm'); + this.register('nolearn'); + const fastcore = this.register('fastcore'); + const fastai = this.register('fastai'); + const math = this.register('math'); + math.inf = Infinity; + const numpy = this.register('numpy'); + const pickle = this.register('pickle'); + const sklearn = this.register('sklearn'); + const torch = this.register('torch'); + const torchvision = this.register('torchvision'); + this.register('torch.storage'); + this.register('torch.nn.parameter'); + this.register('torch.ops'); + this.register('torch._ops'); + this.register('torch.ops.torchvision'); + this.register('torch.ops.torchaudio'); + this.register('torch.ops._caffe2'); + this.register('torchvision'); + this.register('__torch__'); + this.register('sys').modules = this._modules; + this.register('xgboost'); + this.registerType('builtins.dict', dict); + this.registerType('builtins.ellipsis', class {}); + this.registerType('builtins.cell', class {}); + this.registerType('builtins.list', class extends Array {}); + this.registerType('builtins.number', class {}); + this.registerFunction('builtins.__import__', function(name, globals, locals, fromlist, level) { + return execution.__import__(name, globals, locals, fromlist, level); + }); + this.registerFunction('builtins.bool', function(value) { + if (value) { + if (value.__bool__) { + return value.__bool__(); + } + if (value.__len__) { + return value.__len__() > 0; + } + } + return false; + }); + this.registerFunction('builtins.int', function(value) { + if (value) { + if (value.__int__) { + return value.__int__(); + } + if (Number.isInteger(value)) { + return value; + } + } + return NaN; + }); + this.registerFunction('builtins.float', function(value) { + if (value) { + if (value.__float__) { + return value.__float__(); + } + if (Number(value) === value) { + return value; + } + } + return NaN; + }); + this.registerFunction('builtins.str', function(value) { + if (value) { + if (value.__str__) { + return value.__str__(); + } + } + return JSON.stringify(value); + }); + this.registerType('builtins.NoneType', class {}); + this.registerType('builtins.object', class {}); + this.registerType('builtins.tuple', class extends Array { + constructor(items) { + super(items ? items.length : 0); + if (items) { + for (let i = 0; i < items.length; i++) { + this[i] = items[i]; + } + } + } + }); + this.registerType('builtins.staticmethod', class {}); + this.registerFunction('builtins.long', this.builtins.int); + this.registerFunction('builtins.print', function() {}); + this.registerFunction('builtins.unicode', function(/* value */) { + throw new python.Error("'builtins.unicode' not implemented."); + }); + this.registerType('builtins.Warning', class {}); + this.registerType('builtins.FutureWarning', class extends builtins.Warning {}); + this.registerType('builtins.BaseException', class {}); + this.registerType('builtins.Exception', class extends builtins.BaseException {}); + this.registerType('builtins.SyntaxError', class extends builtins.Exception {}); + this.registerType('typing._Final', class {}); + this.registerType('typing._SpecialForm', class extends typing._Final {}); + this.registerType('typing._BaseGenericAlias', class extends typing._Final {}); + this.registerType('typing._GenericAlias', class extends typing._BaseGenericAlias {}); + this.registerType('typing._SpecialGenericAlias', class extends typing._BaseGenericAlias {}); + this.registerType('typing._TupleType', class extends typing._SpecialGenericAlias {}); + this.registerType('typing._CallableType', class {}); + this.registerFunction('typing.cast', function() { + throw new python.Error("'typing.cast' not implemented."); + }); + typing.Any = Reflect.construct(typing._SpecialForm, []); + typing.Callable = Reflect.construct(typing._CallableType, []); + typing.Dict = Reflect.construct(typing._SpecialGenericAlias, []); + typing.List = Reflect.construct(typing._SpecialGenericAlias, []); + typing.Optional = Reflect.construct(typing._SpecialForm, []); + typing.OrderedDict = Reflect.construct(typing._SpecialGenericAlias, []); + typing.Sequence = Reflect.construct(typing._SpecialGenericAlias, []); + typing.Tuple = Reflect.construct(typing._TupleType, []); + typing.Union = Reflect.construct(typing._SpecialForm, []); + this.registerFunction('operator.add', function() { + throw new python.Error("'operator.add' not implemented."); + }); + this.registerFunction('operator.eq', function() { + throw new python.Error("'operator.eq' not implemented."); + }); + this.registerFunction('operator.ge', function() { + throw new python.Error("'operator.ge' not implemented."); + }); + this.registerFunction('operator.gt', function() { + throw new python.Error("'operator.gt' not implemented."); + }); + this.registerFunction('operator.mul', function() { + throw new python.Error("'operator.mul' not implemented."); + }); + this.registerFunction('operator.mod', function() { + throw new python.Error("'operator.mod' not implemented."); + }); + this.registerFunction('operator.le', function() { + throw new python.Error("'operator.le' not implemented."); + }); + this.registerFunction('operator.lt', function() { + throw new python.Error("'operator.lt' not implemented."); + }); + this.registerFunction('operator.ne', function() { + throw new python.Error("'operator.ne' not implemented."); + }); + this.registerFunction('operator.floordiv', function() { + throw new python.Error("'operator.floordiv' not implemented."); + }); + this.registerFunction('operator.sub', function() { + throw new python.Error("'operator.sub' not implemented."); + }); + this.registerType('argparse.Namespace', class { + constructor(args) { + this.args = args; + } + }); + this.registerType('collections.deque', class extends Array { + constructor(iterable) { + super(); + if (Array.isArray(iterable)) { + for (const value of iterable) { + this.push(value); + } + } + } + }); + this.registerType('collections.OrderedDict', class extends dict {}); + this.registerType('cuml.common.array_descriptor.CumlArrayDescriptorMeta', class {}); + this.registerType('cuml.ensemble.randomforestclassifier.RandomForestClassifier', class {}); + this.registerType('cuml.raft.common.handle.Handle', class { + __setstate__(state) { + this._handle = state; + } + }); + this.registerType('dnnlib.tflib.network.Network', class {}); + this.registerType('dnnlib.util.EasyDict', class extends dict {}); + this.registerType('haiku._src.data_structures.FlatMapping', class { + constructor(dict) { + Object.assign(this, dict); + } + }); + this.registerType('haiku._src.data_structures.frozendict', class { + constructor(obj) { + Object.assign(this, obj); + } + }); + this.registerType('hmmlearn.hmm.MultinomialHMM', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('hmmlearn.base.ConvergenceMonitor', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('io.BytesIO', class { + constructor(buf, mode) { + this.mode = mode || 'r'; + this._buf = this.mode === 'w' ? null : buf; + this._point = 0; + } + seek(offset) { + this._point = offset; + } + read(size) { + const start = this._point; + this._point = size !== undefined ? start + size : this._buf.length; + return this._buf.subarray(start, this._point); + } + write(data) { + const src = this._buf || new Uint8Array(); + this._point = src.length + data.length; + this._buf = new Uint8Array(this._point); + this._buf.set(src, 0); + this._buf.set(data, src.length); + } + }); + this.registerType('numpy.dtype', class { + constructor(obj, align, copy) { + if (typeof obj === 'string' && (obj.startsWith('<') || obj.startsWith('>'))) { + this.byteorder = obj.substring(0, 1); + obj = obj.substring(1); + } else { + this.byteorder = '='; + } + switch (obj) { + case 'b1': case 'bool': this.itemsize = 1; this.kind = 'b'; break; + case 'i1': case 'int8': this.itemsize = 1; this.kind = 'i'; break; + case 'i2': case 'int16': this.itemsize = 2; this.kind = 'i'; break; + case 'i4': case 'int32': this.itemsize = 4; this.kind = 'i'; break; + case 'i8': case 'int64': case 'int': this.itemsize = 8; this.kind = 'i'; break; + case 'u1': case 'uint8': this.itemsize = 1; this.kind = 'u'; break; + case 'u2': case 'uint16': this.itemsize = 2; this.kind = 'u'; break; + case 'u4': case 'uint32': this.itemsize = 4; this.kind = 'u'; break; + case 'u8': case 'uint64': case 'uint': this.itemsize = 8; this.kind = 'u'; break; + case 'f2': case 'float16': this.itemsize = 2; this.kind = 'f'; break; + case 'f4': case 'float32': this.itemsize = 4; this.kind = 'f'; break; + case 'f8': case 'float64': case 'float': this.itemsize = 8; this.kind = 'f'; break; + case 'c8': case 'complex64': this.itemsize = 8; this.kind = 'c'; break; + case 'c16': case 'complex128': case 'complex': this.itemsize = 16; this.kind = 'c'; break; + case 'M8': case 'M': this.itemsize = 8; this.kind = 'M'; break; + default: + if (obj.startsWith('V')) { + this.itemsize = parseInt(obj.substring(1), 10); + this.kind = 'V'; + } else if (obj.startsWith('O')) { + this.itemsize = obj === 'O' ? 8 : parseInt(obj.substring(1), 10); + this.kind = 'O'; + } else if (obj.startsWith('S')) { + this.itemsize = parseInt(obj.substring(1), 10); + this.kind = 'S'; + } else if (obj.startsWith('U')) { // Unicode string + this.kind = 'U'; + this.itemsize = 4 * parseInt(obj.substring(1), 10); + } else { + throw new python.Error(`Unsupported dtype '${obj}'.`); + } + break; + } + if (align) { + this.align = align; + } + if (copy) { + this.copy = copy; + } + } + get str() { + return (this.byteorder === '=' ? '<' : this.byteorder) + this.kind + this.itemsize.toString(); + } + get name() { + switch (this.kind) { + case 'V': return `void${this.itemsize === 0 ? '' : (this.itemsize * 8)}`; + case 'S': return `bytes${this.itemsize === 0 ? '' : (this.itemsize * 8)}`; + case 'U': return `str${this.itemsize === 0 ? '' : (this.itemsize * 8)}`; + case 'M': return 'datetime64'; + case 'b': return 'bool'; + default: return this.__name__; + } + } + __setstate__(state) { + switch (state.length) { + case 8: + [ + this.version, this.byteorder, this.subarray, this.names, + this.fields, this.elsize, this.alignment, this.int_dtypeflags + ] = state; + break; + case 9: + [ + this.version, this.byteorder, this.subarray, this.names, + this.fields, this.elsize, this.alignment, this.int_dtypeflags, + this.metadata + ] = state; + break; + default: + throw new python.Error(`Unsupported numpy.dtype setstate length '${state.length}'.`); + } + } + get __name__() { + switch (this.kind) { + case 'b': + switch (this.itemsize) { + case 1: return 'boolean'; + default: throw new python.Error(`Unsupported boolean itemsize '${this.itemsize}'.`); + } + case 'i': + switch (this.itemsize) { + case 1: return 'int8'; + case 2: return 'int16'; + case 4: return 'int32'; + case 8: return 'int64'; + default: throw new python.Error(`Unsupported int itemsize '${this.itemsize}'.`); + } + case 'u': + switch (this.itemsize) { + case 1: return 'uint8'; + case 2: return 'uint16'; + case 4: return 'uint32'; + case 8: return 'uint64'; + default: throw new python.Error(`Unsupported uint itemsize '${this.itemsize}'.`); + } + case 'f': + switch (this.itemsize) { + case 2: return 'float16'; + case 4: return 'float32'; + case 8: return 'float64'; + default: throw new python.Error(`Unsupported float itemsize '${this.itemsize}'.`); + } + case 'c': + switch (this.itemsize) { + case 8: return 'complex64'; + case 16: return 'complex128'; + default: throw new python.Error(`Unsupported complex itemsize '${this.itemsize}'.`); + } + case 'S': + case 'U': + return 'string'; + case 'M': + return 'datetime'; + case 'O': + return 'object'; + case 'V': + return 'void'; + default: + throw new python.Error(`Unsupported dtype kind '${this.kind}'.`); + } + } + }); + this.registerType('numpy.generic', class {}); + this.registerType('numpy.inexact', class {}); + this.registerType('numpy.bool_', class extends numpy.generic {}); + this.registerType('numpy.number', class extends numpy.generic {}); + this.registerType('numpy.integer', class extends numpy.number {}); + this.registerType('numpy.floating', class extends numpy.inexact {}); + this.registerType('numpy.float16', class extends numpy.floating {}); + this.registerType('numpy.float32', class extends numpy.floating {}); + this.registerType('numpy.float64', class extends numpy.floating {}); + this.registerType('numpy.signedinteger', class extends numpy.integer {}); + this.registerType('numpy.int8', class extends numpy.signedinteger {}); + this.registerType('numpy.int16', class extends numpy.signedinteger {}); + this.registerType('numpy.int32', class extends numpy.signedinteger {}); + this.registerType('numpy.int64', class extends numpy.signedinteger {}); + this.registerType('numpy.unsignedinteger', class extends numpy.integer {}); + this.registerType('numpy.uint8', class extends numpy.unsignedinteger {}); + this.registerType('numpy.uint16', class extends numpy.unsignedinteger {}); + this.registerType('numpy.uint32', class extends numpy.unsignedinteger {}); + this.registerType('numpy.uint64', class extends numpy.unsignedinteger {}); + this.registerType('numpy.datetime64', class extends numpy.generic {}); + this.registerType('gensim.models.doc2vec.Doctag', class {}); + this.registerType('gensim.models.doc2vec.Doc2Vec', class {}); + this.registerType('gensim.models.doc2vec.Doc2VecTrainables', class {}); + this.registerType('gensim.models.doc2vec.Doc2VecVocab', class {}); + this.registerType('gensim.models.fasttext.FastText', class {}); + this.registerType('gensim.models.fasttext.FastTextTrainables', class {}); + this.registerType('gensim.models.fasttext.FastTextVocab', class {}); + this.registerType('gensim.models.fasttext.FastTextKeyedVectors', class {}); + this.registerType('gensim.models.keyedvectors.Doc2VecKeyedVectors', class {}); + this.registerType('gensim.models.keyedvectors.FastTextKeyedVectors', class {}); + this.registerType('gensim.models.keyedvectors.KeyedVectors', class {}); + this.registerType('gensim.models.keyedvectors.Vocab', class {}); + this.registerType('gensim.models.keyedvectors.Word2VecKeyedVectors', class {}); + this.registerType('gensim.models.phrases.Phrases', class {}); + this.registerType('gensim.models.tfidfmodel.TfidfModel', class {}); + this.registerType('gensim.models.word2vec.Vocab', class {}); + this.registerType('gensim.models.word2vec.Word2Vec', class {}); + this.registerType('gensim.models.word2vec.Word2VecTrainables', class {}); + this.registerType('gensim.models.word2vec.Word2VecVocab', class {}); + this.registerFunction('gensim.utils.call_on_class_only', function() { + throw new python.Error('This method should be called on a class object.'); + }); + this.registerType('google3.learning.deepmind.research.nbr.pbl_jax.clean_jaxline.utils.optimizers.ScaleByLarsState', class { + constructor(obj) { + Object.assign(this, obj); + } + }); + this.registerType('joblib.numpy_pickle.NumpyArrayWrapper', class { + constructor(/* subtype, shape, dtype */) { + } + __setstate__(state) { + this.subclass = state.subclass; + this.dtype = state.dtype; + this.shape = state.shape; + this.order = state.order; + this.allow_mmap = state.allow_mmap; + } + __read__(unpickler) { + if (this.dtype.__name__ == 'object') { + return unpickler.load(); + } + const size = this.dtype.itemsize * this.shape.reduce((a, b) => a * b, 1); + this.data = unpickler.read(size); + return execution.invoke(this.subclass, [ this.shape, this.dtype, this.data ]); + } + }); + this.registerType('keras.engine.sequential.Sequential', class {}); + this.registerType('lasagne.layers.conv.Conv2DLayer', class {}); + this.registerType('lasagne.layers.dense.DenseLayer', class {}); + this.registerType('lasagne.layers.input.InputLayer', class {}); + this.registerType('lasagne.layers.pool.MaxPool2DLayer', class {}); + this.registerType('lightgbm.sklearn.LGBMRegressor', class {}); + this.registerType('lightgbm.sklearn.LGBMClassifier', class {}); + this.registerType('lightgbm.basic.Booster', class { + constructor() { + this.average_output = false; + this.models = []; + this.loaded_parameter = ''; + } + __setstate__(state) { + if (typeof state.handle === 'string') { + this.LoadModelFromString(state.handle); + return; + } + Object.assign(this, state); + } + LoadModelFromString(model_str) { + const lines = model_str.split('\n'); + const signature = lines.shift() || '?'; + if (signature.trim() !== 'tree') { + throw new python.Error(`Invalid signature '${signature.trim()}'.`); + } + // GBDT::LoadModelFromString() in https://github.com/microsoft/LightGBM/blob/master/src/boosting/gbdt_model_text.cpp + const key_vals = new Map(); + while (lines.length > 0 && !lines[0].startsWith('Tree=')) { + const cur_line = lines.shift().trim(); + if (cur_line.length > 0) { + const strs = cur_line.split('='); + if (strs.length === 1) { + key_vals.set(strs[0], ''); + } else if (strs.length === 2) { + key_vals.set(strs[0], strs[1]); + } else if (strs.length > 2) { + if (strs[0] === "feature_names") { + key_vals.set(strs[0], cur_line.substring("feature_names=".length)); + } else if (strs[0] == 'monotone_constraints') { + key_vals.set(strs[0], cur_line.substring('monotone_constraints='.length)); + } else { + throw new python.Error(`Wrong line: ${cur_line.substring(0, Math.min(128, cur_line.length))}`); + } + } + } + } + const atoi = (key, value) => { + if (key_vals.has(key)) { + return parseInt(key_vals.get(key), 10); + } + if (value !== undefined) { + return value; + } + throw new python.Error(`Model file does not specify ${key}.`); + }; + const list = (key, size) => { + if (key_vals.has(key)) { + const value = key_vals.get(key).split(' '); + if (value.length !== size) { + throw new python.Error(`Wrong size of ${key}.`); + } + return value; + } + throw new python.Error(`Model file does not contain ${key}.`); + }; + this.version = key_vals.get('version') || ''; + this.num_class = atoi('num_class'); + this.num_tree_per_iteration = atoi('num_tree_per_iteration', this.num_class); + this.label_index = atoi('label_index'); + this.max_feature_idx = atoi('max_feature_idx'); + if (key_vals.has('average_output')) { + this.average_output = true; + } + this.feature_names = list('feature_names', this.max_feature_idx + 1); + this.feature_infos = list('feature_infos', this.max_feature_idx + 1); + if (key_vals.has('monotone_constraints')) { + this.monotone_constraints = list('monotone_constraints', this.max_feature_idx + 1); + } + if (key_vals.has('objective')) { + this.objective = key_vals.get('objective'); + } + let tree = null; + while (lines.length > 0) { + const text = lines.shift(); + const line = text.trim(); + if (line.length === 0) { + continue; + } + if (line.startsWith('Tree=')) { + tree = { index: parseInt(line.split('=').pop(), 10) }; + this.models.push(tree); + continue; + } + if (line === 'end of trees') { + break; + } + const param = line.split('='); + if (param.length !== 2) { + throw new python.Error(`Invalid property '${line}'.`); + } + const name = param[0].trim(); + const value = param[1].trim(); + tree[name] = value; + } + const ss = []; + let is_inparameter = false; + while (lines.length > 0) { + const text = lines.shift(); + const line = text.trim(); + if (line === 'parameters:') { + is_inparameter = true; + continue; + } else if (line === 'end of parameters') { + break; + } else if (is_inparameter) { + ss.push(line); + } + } + if (ss.length > 0) { + this.loaded_parameter = ss.join('\n'); + } + } + }); + this.registerFunction('megengine.functional.elemwise.clip', function() {}); + this.registerFunction('megengine.functional.elemwise.sqrt', function() {}); + this.registerFunction('megengine.functional.nn.conv2d', function() {}); + this.registerFunction('megengine.functional.nn.relu', function() {}); + this.registerFunction('megengine.functional.nn.sigmoid', function() {}); + this.registerFunction('megengine.functional.tensor.arange', function() {}); + this.registerFunction('megengine.functional.tensor.broadcast_to', function() {}); + this.registerFunction('megengine.functional.tensor.concat', function() {}); + this.registerFunction('megengine.functional.tensor.expand_dims', function() {}); + this.registerFunction('megengine.functional.tensor.flatten', function() {}); + this.registerFunction('megengine.functional.tensor.full', function() {}); + this.registerFunction('megengine.functional.tensor.reshape', function() {}); + this.registerFunction('megengine.functional.tensor.split', function() {}); + this.registerFunction('megengine.functional.tensor.stack', function() {}); + this.registerFunction('megengine.functional.tensor.transpose', function() {}); + this.registerFunction('megengine.functional.vision.interpolate', function() {}); + this.registerFunction('megengine.module.qat.module.QATModule._apply_fakequant_with_observer', function() {}); + this.registerType('megengine.core._imperative_rt.common.CompNode', class {}); + this.registerType('megengine.core._imperative_rt.ops.ElemwiseMultiType', class {}); + this.registerType('megengine.core._imperative_rt.ops.FakeQuant', class {}); + this.registerType('megengine.core._imperative_rt.ops.GetVarShape', class {}); + this.registerType('megengine.core._imperative_rt.ops.Resize', class {}); + this.registerType('megengine.core.ops._internal.param_defs.ConvolutionV0.Mode', class {}); + this.registerType('megengine.core.ops._internal.param_defs.Convolution.ComputeMode', class {}); + this.registerType('megengine.distributed.group.Group', class {}); + this.registerType('megengine.module.activation.ReLU', class {}); + this.registerType('megengine.module.activation.Softmax', class {}); + this.registerType('megengine.module.adaptive_pooling.AdaptiveAvgPool2d', class {}); + this.registerType('megengine.module.batchnorm.BatchNorm1d', class {}); + this.registerType('megengine.module.batchnorm.BatchNorm2d', class {}); + this.registerType('megengine.module.conv.Conv2d', class {}); + this.registerType('megengine.module.conv.ConvTranspose2d', class {}); + this.registerType('megengine.module.conv_bn.ConvBn2d', class {}); + this.registerType('megengine.module.dropout.Dropout', class {}); + this.registerType('megengine.module.identity.Identity', class {}); + this.registerType('megengine.module.linear.Linear', class {}); + this.registerType('megengine.module.module.Module', class {}); + this.registerType('megengine.module.normalization.InstanceNorm', class {}); + this.registerType('megengine.module.normalization.GroupNorm', class {}); + this.registerType('megengine.module.pooling.AvgPool2d', class {}); + this.registerType('megengine.module.pooling.MaxPool2d', class {}); + this.registerType('megengine.module.qat.concat.Concat', class {}); + this.registerType('megengine.module.qat.elemwise.Elemwise', class {}); + this.registerType('megengine.module.sequential.Sequential', class {}); + this.registerType('megengine.quantization.fake_quant.FakeQuantize', class {}); + this.registerType('megengine.quantization.utils.QParams', class {}); + this.registerType('megengine.quantization.utils.QuantMode', class {}); + this.registerType('megengine.quantization.observer.ExponentialMovingAverageObserver', class {}); + this.registerType('megengine.quantization.observer.HistogramObserver', class {}); + this.registerType('megengine.quantization.observer.MinMaxObserver', class {}); + this.registerType('megengine.quantization.observer.PassiveObserver', class {}); + this.registerType('megengine.quantization.observer.SyncExponentialMovingAverageObserver', class {}); + this.registerType('megengine.quantization.observer.SyncMinMaxObserver', class {}); + this.registerType('megengine.traced_module.expr.Apply', class {}); + this.registerType('megengine.traced_module.expr.CallFunction', class {}); + this.registerType('megengine.traced_module.expr.CallMethod', class {}); + this.registerType('megengine.traced_module.expr.Constant', class {}); + this.registerType('megengine.traced_module.expr.GetAttr', class {}); + this.registerType('megengine.traced_module.expr.Input', class {}); + this.registerType('megengine.traced_module.fake_quant.FakeQuantize', class {}); + this.registerType('megengine.traced_module.node.ModuleNode', class {}); + this.registerType('megengine.traced_module.node.NodeMixin', class {}); + this.registerType('megengine.traced_module.node.TensorNode', class {}); + this.registerType('megengine.traced_module.pytree.ArgsIndex', class {}); + this.registerType('megengine.traced_module.serialization._ModuleState', class {}); + this.registerType('megengine.traced_module.traced_module.InternalGraph', class {}); + this.registerType('megengine.traced_module.traced_module.NameSpace', class {}); + this.registerType('megengine.traced_module.traced_module.TracedModule', class {}); + this.registerType('megengine.tensor.Parameter', class { + constructor(data, dtype, device) { + this.data = data; + this.dtype = dtype; + this.device = device; + } + }); + this.registerType('megengine.traced_module.pytree.TreeDef', class { + toString() { + let content = ''; + for (const child of this.children_defs) { + content += `${child},`; + } + if (typeof this.type === "string") { + return `${this.type.split(".").slice(-1)}(${content})`; + } + return `${this.type.__name__}(${content})`; + } + }); + this.registerType('megengine.traced_module.pytree.LeafDef', class { + toString() { + let content = ''; + if (this.const_val !== null) { + content += this.const_val; + } else { + content += '['; + } + for (const t of Object.values(this.type)) { + content += t.__name__; + } + content += ']'; + return content; + } + }); + this.registerType('megengine.tensor.Tensor', class { + constructor(data, dtype, device) { + this.data = data; + this.dtype = dtype; + this.device = device; + } + }); + this.registerType('megengine.core.tensor.dtype.QuantDtypeMeta', class { + constructor(name, cname, np_dtype, qmin, qmax, is_signed) { + this.name = name; + this.cname = cname; + this.np_dtype = np_dtype; + this.qmin = qmin; + this.qmax = qmax; + this.is_signed = is_signed; + } + }); + this.registerType('nolearn.lasagne.base.BatchIterator', class {}); + this.registerType('nolearn.lasagne.base.Layers', class {}); + this.registerType('nolearn.lasagne.base.NeuralNet', class {}); + this.registerType('nolearn.lasagne.base.TrainSplit', class {}); + this.registerType('nolearn.lasagne.handlers.PrintLayerInfo', class {}); + this.registerType('nolearn.lasagne.handlers.PrintLog', class {}); + this.registerType('numpy.ndarray', class { + constructor(shape, dtype, buffer, offset, strides, order) { + this.shape = shape; + this.dtype = dtype; + this.data = buffer !== undefined ? buffer : null; + this.offset = offset !== undefined ? offset : 0; + this._strides = strides !== undefined ? strides : null; + this.order = offset !== undefined ? order : null; + this.flags = {}; + this._read(); + } + __setstate__(state) { + [this.version, this.shape, this.dtype, this.flags.fn, this.data] = state; + this._read(); + } + flatten() { + const size = this.shape.reduce((a, b) => a * b, 1); + const value = execution.invoke('numpy.ndarray', [ + [ size ], this.dtype, this.data, this.offset, this.strides, this.order + ]); + value.flags = this.flags; + return value; + } + tobytes() { + return this.data; + } + tolist() { + if (this.shape.length < 0 || this.shape.length > 1) { + throw new python.Error(`Unsupported shape '${JSON.stringify(this.shape)}'.`); + } + const size = this.shape.reduce((a, b) => a * b, 1); + const list = new Array(size); + switch (this.dtype.kind) { + case 'U': { + const data = new Uint32Array(new Uint8Array(this.data).buffer); + const itemsize = this.dtype.itemsize >> 2; + let offset = 0; + for (let i = 0; i < size; i++) { + const buffer = data.subarray(offset, offset + itemsize); + const index = buffer.indexOf(0); + list[i] = Array.from(index >= 0 ? buffer.subarray(0, index) : buffer).map((c) => String.fromCodePoint(c)).join(''); + offset += itemsize; + } + return list; + } + case 'S': { + const data = this.data; + const itemsize = this.dtype.itemsize; + const decoder = new TextDecoder('utf-8'); + let offset = 0; + for (let i = 0; i < size; i++) { + const buffer = data.subarray(offset, offset + itemsize); + const index = buffer.indexOf(0); + list[i] = decoder.decode(index >= 0 ? buffer.subarray(0, index) : buffer); + offset += itemsize; + } + return list; + } + case 'O': { + return this.data; + } + default: { + throw new python.Error(`Type kind '${this.dtype.kind}' not implemented.`); + } + } + } + get itemsize() { + return this.dtype.itemsize; + } + get size() { + return (this.shape || []).reduce((a, b) => a * b, 1); + } + get strides() { + if (!this._strides) { + const shape = this.shape; + const strides = new Array(shape.length); + let stride = this.itemsize; + for (let i = shape.length - 1; i >= 0; i--) { + strides[i] = stride; + stride *= shape[i]; + } + return strides; + } + return this._strides; + } + _read() { + if (this.data) { + const length = this.dtype.itemsize * this.size; + if (typeof this.data == 'string') { + this.data = this._unescape(this.data, length); + if (this.data.length != length) { + throw new python.Error('Invalid string array data size.'); + } + } else if (this.data.length != length) { + // throw new python.Error('Invalid array data size.'); + } + } + } + _unescape(token, size) { + const length = token.length; + const a = new Uint8Array(length); + if (size && size == length) { + for (let p = 0; p < size; p++) { + a[p] = token.charCodeAt(p); + } + return a; + } + let i = 0; + let o = 0; + while (i < length) { + let c = token.charCodeAt(i++); + if (c !== 0x5C || i >= length) { + a[o++] = c; + } else { + c = token.charCodeAt(i++); + switch (c) { + case 0x27: a[o++] = 0x27; break; // ' + case 0x5C: a[o++] = 0x5C; break; // \\ + case 0x22: a[o++] = 0x22; break; // " + case 0x72: a[o++] = 0x0D; break; // \r + case 0x6E: a[o++] = 0x0A; break; // \n + case 0x74: a[o++] = 0x09; break; // \t + case 0x62: a[o++] = 0x08; break; // \b + case 0x58: // x + case 0x78: { // X + const xsi = i - 1; + const xso = o; + for (let xi = 0; xi < 2; xi++) { + if (i >= length) { + i = xsi; + o = xso; + a[o] = 0x5c; + break; + } + let xd = token.charCodeAt(i++); + xd = xd >= 65 && xd <= 70 ? xd - 55 : xd >= 97 && xd <= 102 ? xd - 87 : xd >= 48 && xd <= 57 ? xd - 48 : -1; + if (xd === -1) { + i = xsi; + o = xso; + a[o] = 0x5c; + break; + } + a[o] = a[o] << 4 | xd; + } + o++; + break; + } + default: + if (c < 48 || c > 57) { // 0-9 + a[o++] = 0x5c; + a[o++] = c; + } else { + i--; + const osi = i; + const oso = o; + for (let oi = 0; oi < 3; oi++) { + if (i >= length) { + i = osi; + o = oso; + a[o] = 0x5c; + break; + } + const od = token.charCodeAt(i++); + if (od < 48 || od > 57) { + i = osi; + o = oso; + a[o] = 0x5c; + break; + } + a[o] = a[o] << 3 | od - 48; + } + o++; + } + break; + } + } + } + return a.slice(0, o); + } + }); + this.registerType('numpy.ma.core.MaskedArray', class extends numpy.ndarray { + constructor(data /*, mask, dtype, copy, subok, ndmin, fill_value, keep_mask, hard_mask, shrink, order */) { + super(data.shape, data.dtype, data.data); + } + }); + this.registerType('numpy.core.memmap.memmap', class extends numpy.ndarray { + constructor(shape, dtype) { + super(shape, dtype); + } + }); + this.registerType('pathlib.Path', class {}); + this.registerType('pathlib.PosixPath', class {}); + this.registerType('pathlib.WindowsPath', class {}); + this.registerType('sklearn._loss.link.BaseLink', class {}); + this.registerType('sklearn._loss.link.MultinomialLogit', class extends sklearn._loss.link.BaseLink {}); + this.registerType('sklearn._loss.loss.BaseLoss', class {}); + this.registerType('sklearn._loss.loss.HalfMultinomialLoss', class extends sklearn._loss.loss.BaseLoss {}); + this.registerType('sklearn.base.BaseEstimator', class {}); + this.registerType('sklearn.base.TransformerMixin', class {}); + this.registerType('sklearn.calibration._CalibratedClassifier', class {}); + this.registerType('sklearn.calibration._SigmoidCalibration', class {}); + this.registerType('sklearn.calibration.CalibratedClassifierCV', class {}); + this.registerType('sklearn.cluster._agglomerative.FeatureAgglomeration', class {}); + this.registerType('sklearn.cluster._dbscan.DBSCAN', class {}); + this.registerType('sklearn.cluster._kmeans.KMeans', class {}); + this.registerType('sklearn.cluster.k_means_.MiniBatchKMeans', class {}); + this.registerType('sklearn.compose._column_transformer.ColumnTransformer', class {}); + this.registerType('sklearn.compose._target.TransformedTargetRegressor', class {}); + this.registerType('sklearn.cross_decomposition._pls.PLSRegression', class {}); + this.registerType('sklearn.decomposition._fastica.FastICA', class {}); + this.registerType('sklearn.decomposition._pca.PCA', class {}); + this.registerType('sklearn.decomposition._truncated_svd.TruncatedSVD', class {}); + this.registerType('sklearn.decomposition.pca.PCA', class {}); + this.registerType('sklearn.decomposition.PCA', class {}); + this.registerType('sklearn.decomposition.truncated_svd.TruncatedSVD', class {}); + this.registerType('sklearn.discriminant_analysis.LinearDiscriminantAnalysis', class {}); + this.registerType('sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis', class {}); + this.registerType('sklearn.dummy.DummyClassifier', class {}); + this.registerType('sklearn.dummy.DummyRegressor', class {}); + this.registerType('sklearn.externals.joblib.numpy_pickle.NumpyArrayWrapper', class { + constructor(/* subtype, shape, dtype */) { + } + __setstate__(state) { + this.subclass = state.subclass; + this.dtype = state.dtype; + this.shape = state.shape; + this.order = state.order; + this.allow_mmap = state.allow_mmap; + } + __read__(unpickler) { + if (this.dtype.__name__ == 'object') { + return unpickler.load(); + } + const size = this.dtype.itemsize * this.shape.reduce((a, b) => a * b, 1); + this.data = unpickler.read(size); + return execution.invoke(this.subclass, [ this.shape, this.dtype, this.data ]); + } + }); + this.registerType('sklearn.externals.joblib.numpy_pickle.NDArrayWrapper', class { + constructor(/* subtype, shape, dtype */) { + } + __setstate__(state) { + this.subclass = state.subclass; + this.filename = state.state; + this.allow_mmap = state.allow_mmap; + } + __read__(/* unpickler */) { + return this; // return execution.invoke(this.subclass, [ this.shape, this.dtype, this.data ]); + } + }); + this.registerType('sklearn.ensemble._bagging.BaggingClassifier', class {}); + this.registerType('sklearn.ensemble._bagging.BaggingRegressor', class {}); + this.registerType('sklearn.ensemble._forest.RandomForestClassifier', class {}); + this.registerType('sklearn.ensemble._forest.RandomForestRegressor', class {}); + this.registerType('sklearn.ensemble._forest.ExtraTreesClassifier', class {}); + this.registerType('sklearn.ensemble._forest.ExtraTreesRegressor', class {}); + this.registerType('sklearn.ensemble._gb_losses.BinomialDeviance', class {}); + this.registerType('sklearn.ensemble._gb_losses.LeastSquaresError', class {}); + this.registerType('sklearn.ensemble._gb_losses.MultinomialDeviance', class {}); + this.registerType('sklearn.ensemble._gb.GradientBoostingClassifier', class {}); + this.registerType('sklearn.ensemble._gb.GradientBoostingRegressor', class {}); + this.registerType('sklearn.ensemble._hist_gradient_boosting.binning._BinMapper', class {}); + this.registerType('sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor', class {}); + this.registerType('sklearn.ensemble._hist_gradient_boosting.loss.LeastSquares', class {}); + this.registerType('sklearn.ensemble._hist_gradient_boosting.predictor.TreePredictor', class {}); + this.registerType('sklearn.ensemble._iforest.IsolationForest', class {}); + this.registerType('sklearn.ensemble._stacking.StackingClassifier', class {}); + this.registerType('sklearn.ensemble._voting.VotingClassifier', class {}); + this.registerType('sklearn.ensemble._voting.VotingRegressor', class {}); + this.registerType('sklearn.ensemble._weight_boosting.AdaBoostClassifier', class {}); + this.registerType('sklearn.ensemble._weight_boosting.AdaBoostRegressor', class {}); + this.registerType('sklearn.ensemble.forest.RandomForestClassifier', class {}); + this.registerType('sklearn.ensemble.forest.RandomForestRegressor', class {}); + this.registerType('sklearn.ensemble.forest.ExtraTreesClassifier', class {}); + this.registerType('sklearn.ensemble.gradient_boosting.BinomialDeviance', class {}); + this.registerType('sklearn.ensemble.gradient_boosting.GradientBoostingClassifier', class {}); + this.registerType('sklearn.ensemble.gradient_boosting.LogOddsEstimator', class {}); + this.registerType('sklearn.ensemble.gradient_boosting.MultinomialDeviance', class {}); + this.registerType('sklearn.ensemble.gradient_boosting.PriorProbabilityEstimator', class {}); + this.registerType('sklearn.ensemble.weight_boosting.AdaBoostClassifier', class {}); + this.registerType('sklearn.feature_extraction._hashing.FeatureHasher', class {}); + this.registerType('sklearn.feature_extraction.text.CountVectorizer', class {}); + this.registerType('sklearn.feature_extraction.text.HashingVectorizer', class {}); + this.registerType('sklearn.feature_extraction.text.TfidfTransformer', class {}); + this.registerType('sklearn.feature_extraction.text.TfidfVectorizer', class {}); + this.registerType('sklearn.feature_selection._from_model.SelectFromModel', class {}); + this.registerFunction('sklearn.feature_selection._univariate_selection.chi2', function() { + throw new python.Error("'sklearn.feature_selection._univariate_selection.chi2' not implemented."); + }); + this.registerType('sklearn.feature_selection._univariate_selection.GenericUnivariateSelect', class {}); + this.registerType('sklearn.feature_selection._univariate_selection.SelectKBest', class {}); + this.registerType('sklearn.feature_selection._univariate_selection.SelectPercentile', class {}); + this.registerType('sklearn.feature_selection._variance_threshold.VarianceThreshold', class {}); + this.registerType('sklearn.feature_selection.univariate_selection.SelectKBest', class {}); + this.registerType('sklearn.feature_selection.variance_threshold.VarianceThreshold', class {}); + this.registerType('sklearn.gaussian_process._gpr.GaussianProcessRegressor', class {}); + this.registerType('sklearn.gaussian_process.gpc.GaussianProcessClassifier', class {}); + this.registerType('sklearn.gaussian_process.kernels.ConstantKernel', class {}); + this.registerType('sklearn.gaussian_process.kernels.DotProduct', class {}); + this.registerType('sklearn.gaussian_process.kernels.Product', class {}); + this.registerType('sklearn.gaussian_process.kernels.RBF', class {}); + this.registerType('sklearn.gaussian_process.kernels.Sum', class {}); + this.registerType('sklearn.gaussian_process.kernels.WhiteKernel', class {}); + this.registerType('sklearn.grid_search._CVScoreTuple', class {}); + this.registerType('sklearn.grid_search.GridSearchCV', class {}); + this.registerType('sklearn.impute._base.SimpleImputer', class {}); + this.registerType('sklearn.impute.SimpleImputer', class {}); + this.registerType('sklearn.isotonic.IsotonicRegression', class {}); + this.registerType('sklearn.linear_model._base.LinearRegression', class {}); + this.registerType('sklearn.linear_model._bayes.BayesianRidge', class {}); + this.registerType('sklearn.linear_model._coordinate_descent.ElasticNetCV', class {}); + this.registerType('sklearn.linear_model._coordinate_descent.ElasticNet', class {}); + this.registerType('sklearn.linear_model._coordinate_descent.Lasso', class {}); + this.registerType('sklearn.linear_model._least_angle.LassoLarsCV', class {}); + this.registerType('sklearn.linear_model._logistic.LogisticRegression', class {}); + this.registerType('sklearn.linear_model._quantile.QuantileRegressor', class {}); + this.registerType('sklearn.linear_model._ridge.Ridge', class {}); + this.registerType('sklearn.linear_model._ridge.RidgeClassifier', class {}); + this.registerType('sklearn.linear_model._sgd_fast.Hinge', class {}); + this.registerType('sklearn.linear_model._sgd_fast.Log', class {}); + this.registerType('sklearn.linear_model._sgd_fast.ModifiedHuber', class {}); + this.registerType('sklearn.linear_model._sgd_fast.SquaredHinge', class {}); + this.registerType('sklearn.linear_model._stochastic_gradient.SGDClassifier', class {}); + this.registerType('sklearn.linear_model.base.LinearRegression', class {}); + this.registerType('sklearn.linear_model.sgd_fast.Hinge', class {}); + this.registerType('sklearn.linear_model.LogisticRegression', class {}); + this.registerType('sklearn.linear_model.logistic.LogisticRegression', class {}); + this.registerType('sklearn.linear_model.logistic.LogisticRegressionCV', class {}); + this.registerType('sklearn.linear_model.LassoLars​', class {}); + this.registerType('sklearn.linear_model.ridge.Ridge', class {}); + this.registerType('sklearn.linear_model.sgd_fast.Log', class {}); + this.registerType('sklearn.linear_model.stochastic_gradient.SGDClassifier', class {}); + this.registerType('sklearn.manifold._t_sne.TSNE', class {}); + this.registerType('sklearn.metrics._dist_metrics.EuclideanDistance', class {}); + this.registerType('sklearn.metrics._dist_metrics.EuclideanDistance64', class {}); + this.registerType('sklearn.metrics._scorer._PassthroughScorer', class {}); + this.registerType('sklearn.metrics._scorer._PredictScorer', class {}); + this.registerType('sklearn.metrics.scorer._PredictScorer', class {}); + this.registerType('sklearn.metrics._scorer._ThresholdScorer', class {}); + this.registerType('sklearn.mixture._bayesian_mixture.BayesianGaussianMixture', class {}); + this.registerType('sklearn.mixture._gaussian_mixture.GaussianMixture', class {}); + this.registerType('sklearn.model_selection._search.GridSearchCV', class {}); + this.registerType('sklearn.model_selection._search.RandomizedSearchCV', class {}); + this.registerType('sklearn.model_selection._split.KFold', class {}); + this.registerType('sklearn.model_selection._split.StratifiedKFold', class {}); + this.registerType('sklearn.multiclass.OneVsRestClassifier', class {}); + this.registerType('sklearn.multioutput.MultiOutputClassifier', class {}); + this.registerType('sklearn.multioutput.MultiOutputRegressor', class {}); + this.registerType('sklearn.naive_bayes.BernoulliNB', class {}); + this.registerType('sklearn.naive_bayes.ComplementNB', class {}); + this.registerType('sklearn.naive_bayes.GaussianNB', class {}); + this.registerType('sklearn.naive_bayes.MultinomialNB', class {}); + this.registerType('sklearn.neighbors.ball_tree.BallTree', class {}); + this.registerType('sklearn.neighbors._classification.KNeighborsClassifier', class {}); + this.registerType('sklearn.neighbors._dist_metrics.newObj', class {}); + this.registerType('sklearn.neighbors._dist_metrics.EuclideanDistance', class {}); + this.registerType('sklearn.neighbors._kd_tree.KDTree', class {}); + this.registerType('sklearn.neighbors._kd_tree.newObj', class {}); + this.registerType('sklearn.neighbors._regression.KNeighborsRegressor', class {}); + this.registerType('sklearn.neighbors._unsupervised.NearestNeighbors', class {}); + this.registerType('sklearn.neighbors.classification.KNeighborsClassifier', class {}); + this.registerType('sklearn.neighbors.dist_metrics.newObj', class {}); + this.registerType('sklearn.neighbors.dist_metrics.EuclideanDistance', class {}); + this.registerType('sklearn.neighbors.kd_tree.newObj', class {}); + this.registerType('sklearn.neighbors.kd_tree.KDTree', class {}); + this.registerType('sklearn.neighbors.KNeighborsClassifier', class {}); + this.registerType('sklearn.neighbors.KNeighborsRegressor', class {}); + this.registerType('sklearn.neighbors.regression.KNeighborsRegressor', class {}); + this.registerType('sklearn.neighbors.unsupervised.NearestNeighbors', class {}); + this.registerType('sklearn.neural_network._multilayer_perceptron.MLPClassifier', class {}); + this.registerType('sklearn.neural_network._multilayer_perceptron.MLPRegressor', class {}); + this.registerType('sklearn.neural_network._stochastic_optimizers.AdamOptimizer', class {}); + this.registerType('sklearn.neural_network._stochastic_optimizers.SGDOptimizer', class {}); + this.registerType('sklearn.neural_network.rbm.BernoulliRBM', class {}); + this.registerType('sklearn.neural_network.multilayer_perceptron.MLPClassifier', class {}); + this.registerType('sklearn.neural_network.multilayer_perceptron.MLPRegressor', class {}); + this.registerType('sklearn.neural_network.stochastic_gradient.SGDClassifier', class {}); + this.registerType('sklearn.pipeline.Pipeline', class {}); + this.registerType('sklearn.pipeline.FeatureUnion', class {}); + this.registerType('sklearn.preprocessing._data.MinMaxScaler', class {}); + this.registerType('sklearn.preprocessing._data.MaxAbsScaler', class {}); + this.registerType('sklearn.preprocessing._data.Normalizer', class {}); + this.registerType('sklearn.preprocessing._data.PolynomialFeatures', class {}); + this.registerType('sklearn.preprocessing._data.PowerTransformer', class {}); + this.registerType('sklearn.preprocessing._data.QuantileTransformer', class {}); + this.registerType('sklearn.preprocessing._data.RobustScaler', class {}); + this.registerType('sklearn.preprocessing._data.StandardScaler', class {}); + this.registerType('sklearn.preprocessing._discretization.KBinsDiscretizer', class {}); + this.registerType('sklearn.preprocessing._encoders.OneHotEncoder', class {}); + this.registerType('sklearn.preprocessing._encoders.OrdinalEncoder', class {}); + this.registerType('sklearn.preprocessing._function_transformer.FunctionTransformer', class {}); + this.registerType('sklearn.preprocessing._label.LabelBinarizer', class {}); + this.registerType('sklearn.preprocessing._label.LabelEncoder', class {}); + this.registerType('sklearn.preprocessing._label.MultiLabelBinarizer', class {}); + this.registerType('sklearn.preprocessing._polynomial.PolynomialFeatures', class {}); + this.registerType('sklearn.preprocessing.data.Binarizer', class {}); + this.registerType('sklearn.preprocessing.data.MaxAbsScaler', class {}); + this.registerType('sklearn.preprocessing.data.MinMaxScaler', class {}); + this.registerType('sklearn.preprocessing.data.Normalizer', class {}); + this.registerType('sklearn.preprocessing.data.OneHotEncoder', class {}); + this.registerType('sklearn.preprocessing.data.PolynomialFeatures', class {}); + this.registerType('sklearn.preprocessing.data.PowerTransformer', class {}); + this.registerType('sklearn.preprocessing.data.RobustScaler', class {}); + this.registerType('sklearn.preprocessing.data.QuantileTransformer', class {}); + this.registerType('sklearn.preprocessing.data.StandardScaler', class {}); + this.registerType('sklearn.preprocessing.imputation.Imputer', class {}); + this.registerType('sklearn.preprocessing.label.LabelBinarizer', class {}); + this.registerType('sklearn.preprocessing.label.LabelEncoder', class {}); + this.registerType('sklearn.preprocessing.label.MultiLabelBinarizer', class {}); + this.registerType('sklearn.svm._classes.LinearSVC', class {}); + this.registerType('sklearn.svm._classes.NuSVC', class {}); + this.registerType('sklearn.svm._classes.OneClassSVM', class {}); + this.registerType('sklearn.svm._classes.SVC', class {}); + this.registerType('sklearn.svm._classes.SVR', class {}); + this.registerType('sklearn.svm.classes.LinearSVC', class {}); + this.registerType('sklearn.svm.classes.OneClassSVM', class {}); + this.registerType('sklearn.svm.classes.SVC', class {}); + this.registerType('sklearn.svm.classes.SVR', class {}); + this.registerType('sklearn.tree._classes.DecisionTreeClassifier', class {}); + this.registerType('sklearn.tree._classes.DecisionTreeRegressor', class {}); + this.registerType('sklearn.tree._classes.ExtraTreeClassifier', class {}); + this.registerType('sklearn.tree._classes.ExtraTreeRegressor', class {}); + this.registerType('sklearn.tree._tree.Tree', class { + constructor(n_features, n_classes, n_outputs) { + this.n_features = n_features; + this.n_classes = n_classes; + this.n_outputs = n_outputs; + } + __setstate__(state) { + this.max_depth = state.max_depth; + this.node_count = state.node_count; + this.nodes = state.nodes; + this.values = state.values; + } + }); + this.registerType('sklearn.tree.tree.DecisionTreeClassifier', class {}); + this.registerType('sklearn.tree.tree.DecisionTreeRegressor', class {}); + this.registerType('sklearn.tree.tree.ExtraTreeClassifier', class {}); + this.registerType('sklearn.utils._bunch.Bunch', class {}); + this.registerType('sklearn.utils.deprecation.DeprecationDict', class {}); + this.registerType('pickle.Unpickler', class { + constructor(data) { + this._reader = data instanceof Uint8Array ? new python.BinaryReader(data) : new python.StreamReader(data); + this.persistent_load = () => { + throw new python.Error('Unsupported persistent id.'); + }; + } + load() { + const reader = this._reader; + const marker = []; + let stack = []; + const memo = {}; + let size = 0; + while (reader.position < reader.length) { + const opcode = reader.byte(); + // console.log((reader.position - 1).toString() + ' ' + Object.entries(OpCode).find(([, value]) => value === opcode)[0]); + // https://svn.python.org/projects/python/trunk/Lib/pickletools.py + // https://github.com/python/cpython/blob/master/Lib/pickle.py + switch (opcode) { + case 128: { // PROTO + const version = reader.byte(); + if (version > 5) { + throw new python.Error(`Unsupported protocol version '${version}'.`); + } + break; + } + case 99: { // GLOBAL 'c' + const module = reader.line(); + const name = reader.line(); + stack.push(this.find_class(module, name)); + break; + } + case 147: { // STACK_GLOBAL '\x93' (Protocol 4) + const name = stack.pop(); + const module = stack.pop(); + stack.push(this.find_class(module, name)); + break; + } + case 111: { // OBJ 'o' + const args = stack; + const cls = args.pop(); + stack = marker.pop(); + const obj = this._instantiate(cls, args); + stack.push(obj); + break; + } + case 112 : { // PUT 'p' + const index = parseInt(reader.line(), 10); + memo[index] = stack[stack.length - 1]; + size++; + break; + } + case 103: { // GET 'g' + const index = parseInt(reader.line(), 10); + stack.push(memo[index]); + break; + } + case 48: // POP '0' + stack.pop(); + break; + case 49: // POP_MARK '1' + stack = marker.pop(); + break; + case 50: // DUP '2' + stack.push(stack[stack.length-1]); + break; + case 80: // PERSID 'P' + stack.push(this.persistent_load(reader.line())); + break; + case 81: // BINPERSID 'Q' + stack.push(this.persistent_load(stack.pop())); + break; + case 82: { // REDUCE 'R' + const args = stack.pop(); + const func = stack.pop(); + stack.push(this._reduce(func, args)); + break; + } + case 129: { // NEWOBJ + const args = stack.pop(); + const cls = stack.pop(); + const obj = this._newobj(cls, args); + stack.push(obj); + break; + } + case 146: { // NEWOBJ_EX '\x92' (Protocol 4) + const kwargs = stack.pop(); + const args = stack.pop(); + const cls = stack.pop(); + if (Object.entries(kwargs).length > 0) { + throw new python.Error("Unpickle 'NEWOBJ_EX' not implemented."); + } + const obj = this._newobj(cls, args); + stack.push(obj); + break; + } + case 104: // BINGET 'h' + stack.push(memo[reader.byte()]); + break; + case 105: { // INST 'i' + const module = reader.line(); + const name = reader.line(); + const args = stack; + const cls = `${module}.${name}`; + stack = marker.pop(); + // TODO + // cls = this.find_class(module, name) + const obj = this._instantiate(cls, args); + stack.push(obj); + break; + } + case 106: // LONG_BINGET 'j' + stack.push(memo[reader.uint32()]); + break; + case 113: // BINPUT 'q' + memo[reader.byte()] = stack[stack.length - 1]; + size++; + break; + case 114: // LONG_BINPUT 'r' + memo[reader.uint32()] = stack[stack.length - 1]; + size++; + break; + case 74: // BININT 'J' + stack.push(reader.int32()); + break; + case 75: // BININT1 'K' + stack.push(reader.byte()); + break; + case 76: // LONG 'L' + stack.push(parseInt(reader.line(), 10)); + break; + case 77: // BININT2 'M' + stack.push(reader.uint16()); + break; + case 66: // BINBYTES 'B' (Protocol 3) + stack.push(reader.read(reader.int32())); + break; + case 67: // SHORT_BINBYTES 'C' (Protocol 3) + stack.push(reader.read(reader.byte())); + break; + case 142: // BINBYTES8 '\x8e' (Protocol 4) + stack.push(reader.read(reader.int64())); + break; + case 70: // FLOAT 'F' + stack.push(parseFloat(reader.line())); + break; + case 71: // BINFLOAT 'G' + stack.push(reader.float64()); + break; + case 73: { // INT 'I' + const value = reader.line(); + if (value == '01') { + stack.push(true); + } else if (value == '00') { + stack.push(false); + } else { + stack.push(parseInt(value, 10)); + } + break; + } + case 93: // EMPTY_LIST ']' + stack.push(execution.invoke('builtins.list', [])); + break; + case 41: // EMPTY_TUPLE ')' + stack.push([]); + break; + case 143: // EMPTY_SET '\x8f' (Protocol 4) + stack.push([]); + break; + case 144: { // ADDITEMS '\x90' (Protocol 4) + const items = stack; + stack = marker.pop(); + const obj = stack[stack.length - 1]; + for (let i = 0; i < items.length; i++) { + obj.push(items[i]); + } + break; + } + case 145: { // FROZENSET '\x91' (Protocol 4) + const items = stack; + stack = marker.pop(); + stack.push(items); + break; + } + case 100: { // DICT 'd' + const items = stack; + stack = marker.pop(); + const dict = {}; + for (let i = 0; i < items.length; i += 2) { + dict[items[i]] = items[i + 1]; + } + stack.push(dict); + break; + } + case 108: { // LIST 'l' + const items = stack; + stack = marker.pop(); + stack.push(items); + break; + } + case 116: { // TUPLE 't' + const items = stack; + stack = marker.pop(); + stack.push(items); + break; + } + case 133: { // TUPLE1 // '\x85' + stack.push([ stack.pop() ]); + break; + } + case 134: { // TUPLE2 '\x86' + const b = stack.pop(); + const a = stack.pop(); + stack.push([ a, b ]); + break; + } + case 135: { // TUPLE3 '\x87' + const c = stack.pop(); + const b = stack.pop(); + const a = stack.pop(); + stack.push([ a, b, c ]); + break; + } + case 115: { // SETITEM 's' + const value = stack.pop(); + const key = stack.pop(); + const obj = stack[stack.length - 1]; + if (obj.__setitem__) { + obj.__setitem__(key, value); + } else { + obj[key] = value; + } + break; + } + case 117: { // SETITEMS 'u' + const items = stack; + stack = marker.pop(); + const obj = stack[stack.length - 1]; + for (let i = 0; i < items.length; i += 2) { + if (obj.__setitem__) { + obj.__setitem__(items[i], items[i + 1]); + } else { + obj[items[i]] = items[i + 1]; + } + } + break; + } + case 125: // EMPTY_DICT '}' + stack.push({}); + break; + case 97: { // APPEND 'a' + const append = stack.pop(); + stack[stack.length-1].push(append); + break; + } + case 101: { // APPENDS 'e' + const appends = stack; + stack = marker.pop(); + const list = stack[stack.length - 1]; + list.push.apply(list, appends); + break; + } + case 83: { // STRING 'S' + const str = reader.line(); + stack.push(str.substr(1, str.length - 2)); + break; + } + case 84: // BINSTRING 'T' + stack.push(reader.string(reader.uint32())); + break; + case 85 : // SHORT_BINSTRING 'U' + stack.push(reader.string(reader.byte())); + break; + case 86: // UNICODE 'V' + stack.push(reader.line()); + break; + case 88: // BINUNICODE 'X + stack.push(reader.string(reader.uint32(), 'utf-8')); + break; + case 140: // SHORT_BINUNICODE '\x8c' (Protocol 4) + stack.push(reader.string(reader.byte(), 'utf-8')); + break; + case 98: { // BUILD 'b' + const state = stack.pop(); + let obj = stack.pop(); + if (obj.__setstate__) { + if (obj.__setstate__.__call__) { + obj.__setstate__.__call__([ obj, state ]); + } else { + obj.__setstate__(state); + } + } else if (ArrayBuffer.isView(state) || Object(state) !== state) { + obj.__state__ = state; + } else if (obj instanceof Map) { + for (const key in state) { + obj.set(key, state[key]); + } + } else { + Object.assign(obj, state); + } + if (obj.__read__) { + obj = obj.__read__(this); + } + stack.push(obj); + break; + } + case 40: // MARK '(' + marker.push(stack); + stack = []; + break; + case 136: // NEWTRUE '\x88' + stack.push(true); + break; + case 137: // NEWFALSE '\x89' + stack.push(false); + break; + case 138: { // LONG1 '\x8a' + const data = reader.read(reader.byte()); + let number = 0; + switch (data.length) { + case 0: number = 0; break; + /* eslint-disable prefer-destructuring */ + case 1: number = data[0]; break; + /* eslint-enable prefer-destructuring */ + case 2: number = data[1] << 8 | data[0]; break; + case 3: number = data[2] << 16 | data[1] << 8 | data[0]; break; + case 4: number = data[3] << 24 | data[2] << 16 | data[1] << 8 | data[0]; break; + case 5: number = data[4] * 0x100000000 + ((data[3] << 24 | data[2] << 16 | data[1] << 8 | data[0]) >>> 0); break; + default: number = Array.prototype.slice.call(data, 0); break; + } + stack.push(number); + break; + } + case 139: // LONG4 '\x8b' + // TODO decode LONG4 + stack.push(reader.read(reader.uint32())); + break; + case 148: // MEMOIZE '\x94' (Protocol 4) + memo[size++] = stack[stack.length - 1]; + break; + case 149: // FRAME '\x95' (Protocol 4) + reader.read(8); + break; + case 150: { // BYTEARRAY8 '\x96' (Protocol 5) + stack.push(reader.read(reader.int64())); + break; + } + case 78: // NONE 'N' + stack.push(null); + break; + case 46: // STOP '.' + return stack.pop(); + case 141: // BINUNICODE8 '\x8d' (Protocol 4) + case 151: // NEXT_BUFFER '\x97' (Protocol 5) + case 152: // READONLY_BUFFER '\x98' (Protocol 5) + default: + throw new python.Error(`Unknown opcode ${opcode} at position ${(reader.position - 1)}.`); + } + } + throw new python.Error('Unexpected end of file.'); + } + find_class(module, name) { + execution.__import__(module); + return execution.resolve(`${module}.${name}`); + } + _instantiate(cls, args) { + return execution.invoke(cls, args); + } + _newobj(cls, args) { + // cls.__new__(cls, args) + return execution.invoke(cls, args); + } + _reduce(func, args) { + return execution.invoke(func, args); + } + read(size) { + return this._reader.read(size); + } + stream(size) { + return this._reader.stream(size); + } + }); + this.registerType('random.Random', class {}); + this.registerType('re.Pattern', class { + constructor(pattern, flags) { + this.pattern = pattern; + this.flags = flags; + } + }); + this.registerType('spacy._ml.PrecomputableAffine', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('spacy.syntax._parser_model.ParserModel', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('theano.compile.function_module._constructor_Function', class {}); + this.registerType('theano.compile.function_module._constructor_FunctionMaker', class {}); + this.registerType('theano.compile.function_module.Function', class {}); + this.registerType('theano.compile.function_module.Supervisor', class {}); + this.registerType('theano.compile.io.In', class {}); + this.registerType('theano.compile.io.SymbolicOutput', class {}); + this.registerType('theano.compile.mode.Mode', class {}); + this.registerType('theano.compile.ops.OutputGuard', class {}); + this.registerType('theano.compile.ops.Shape', class {}); + this.registerType('theano.compile.ops.Shape_i', class {}); + this.registerType('theano.gof.destroyhandler.DestroyHandler', class {}); + this.registerType('theano.gof.fg.FunctionGraph', class {}); + this.registerType('theano.gof.graph.Apply', class {}); + this.registerType('theano.gof.link.Container', class {}); + this.registerType('theano.gof.opt._metadict', class {}); + this.registerType('theano.gof.opt.ChangeTracker', class {}); + this.registerType('theano.gof.opt.MergeFeature', class {}); + this.registerType('theano.gof.optdb.Query', class {}); + this.registerType('theano.gof.toolbox.PreserveVariableAttributes', class {}); + this.registerType('theano.gof.toolbox.ReplaceValidate', class {}); + this.registerType('theano.gof.utils.scratchpad', class {}); + this.registerType('theano.misc.ordered_set.Link', class {}); + this.registerType('theano.misc.ordered_set.OrderedSet', class {}); + this.registerType('theano.sandbox.cuda.basic_ops.HostFromGpu', class {}); + this.registerType('theano.sandbox.cuda.type.CudaNdarray_unpickler', class {}); + this.registerType('theano.sandbox.cuda.type.CudaNdarrayType', class {}); + this.registerType('theano.sandbox.cuda.var.CudaNdarraySharedVariable', class {}); + this.registerType('theano.scalar.basic.Abs', class {}); + this.registerType('theano.scalar.basic.Add', class {}); + this.registerType('theano.scalar.basic.Cast', class {}); + this.registerType('theano.scalar.basic.Composite', class {}); + this.registerType('theano.scalar.basic.EQ', class {}); + this.registerType('theano.scalar.basic.GE', class {}); + this.registerType('theano.scalar.basic.Identity', class {}); + this.registerType('theano.scalar.basic.IntDiv', class {}); + this.registerType('theano.scalar.basic.Inv', class {}); + this.registerType('theano.scalar.basic.LE', class {}); + this.registerType('theano.scalar.basic.LT', class {}); + this.registerType('theano.scalar.basic.Mul', class {}); + this.registerType('theano.scalar.basic.Neg', class {}); + this.registerType('theano.scalar.basic.Scalar', class {}); + this.registerType('theano.scalar.basic.ScalarConstant', class {}); + this.registerType('theano.scalar.basic.ScalarVariable', class {}); + this.registerType('theano.scalar.basic.Second', class {}); + this.registerType('theano.scalar.basic.Sgn', class {}); + this.registerType('theano.scalar.basic.specific_out', class {}); + this.registerType('theano.scalar.basic.Sub', class {}); + this.registerType('theano.scalar.basic.Switch', class {}); + this.registerType('theano.scalar.basic.Tanh', class {}); + this.registerType('theano.scalar.basic.transfer_type', class {}); + this.registerType('theano.scalar.basic.TrueDiv', class {}); + this.registerType('theano.tensor.basic.Alloc', class {}); + this.registerType('theano.tensor.basic.Dot', class {}); + this.registerType('theano.tensor.basic.MaxAndArgmax', class {}); + this.registerType('theano.tensor.basic.Reshape', class {}); + this.registerType('theano.tensor.basic.ScalarFromTensor', class {}); + this.registerType('theano.tensor.blas.Dot22', class {}); + this.registerType('theano.tensor.blas.Dot22Scalar', class {}); + this.registerType('theano.tensor.blas.Gemm', class {}); + this.registerType('theano.tensor.elemwise.DimShuffle', class {}); + this.registerType('theano.tensor.elemwise.Elemwise', class {}); + this.registerType('theano.tensor.elemwise.Sum', class {}); + this.registerType('theano.tensor.nnet.abstract_conv.AbstractConv2d', class {}); + this.registerType('theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs', class {}); + this.registerType('theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights', class {}); + this.registerType('theano.tensor.nnet.corr.CorrMM', class {}); + this.registerType('theano.tensor.nnet.corr.CorrMM_gradInputs', class {}); + this.registerType('theano.tensor.nnet.corr.CorrMM_gradWeights', class {}); + this.registerType('theano.tensor.nnet.nnet.CrossentropyCategorical1Hot', class {}); + this.registerType('theano.tensor.nnet.nnet.CrossentropyCategorical1HotGrad', class {}); + this.registerType('theano.tensor.nnet.nnet.CrossentropySoftmax1HotWithBiasDx', class {}); + this.registerType('theano.tensor.nnet.nnet.CrossentropySoftmaxArgmax1HotWithBias', class {}); + this.registerType('theano.tensor.nnet.nnet.Softmax', class {}); + this.registerType('theano.tensor.nnet.nnet.SoftmaxGrad', class {}); + this.registerType('theano.tensor.nnet.nnet.SoftmaxWithBias', class {}); + this.registerType('theano.tensor.opt.MakeVector', class {}); + this.registerType('theano.tensor.opt.ShapeFeature', class {}); + this.registerType('theano.tensor.sharedvar.TensorSharedVariable', class {}); + this.registerType('theano.tensor.signal.pool.MaxPoolGrad', class {}); + this.registerType('theano.tensor.signal.pool.Pool', class {}); + this.registerType('theano.tensor.subtensor.Subtensor', class {}); + this.registerType('theano.tensor.type.TensorType', class {}); + this.registerType('theano.tensor.var.TensorConstant', class {}); + this.registerType('theano.tensor.var.TensorConstantSignature', class {}); + this.registerType('theano.tensor.var.TensorVariable', class {}); + this.registerType('thinc.describe.Biases', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('thinc.describe.Dimension', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('thinc.describe.Gradient', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('thinc.describe.Weights', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('thinc.describe.Synapses', class { + __setstate__(state) { + Object.assign(this, state); + } + }); + this.registerType('thinc.neural._classes.affine.Affine', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.convolution.ExtractWindow', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.feature_extracter.FeatureExtracter', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.feed_forward.FeedForward', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.function_layer.FunctionLayer', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.hash_embed.HashEmbed', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.layernorm.LayerNorm', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.maxout.Maxout', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.resnet.Residual', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural._classes.softmax.Softmax', class { + __setstate__(state) { + Object.assign(this, new pickle.Unpickler(state).load()); + } + }); + this.registerType('thinc.neural.mem.Memory', class { + }); + this.registerType('thinc.neural.ops.NumpyOps', class { + }); + this.registerType('__main__.BYOLState', class { + constructor(dict) { + Object.assign(this, dict); + } + }); + this.registerType('types.GenericAlias', class {}); + this.registerType('types.SimpleNamespace', class {}); + this.register('types').ObjectType = builtins.object; + this.register('types').ModuleType = builtins.module; + this.register('types').MethodType = builtins.method; + this.register('types').FunctionType = builtins.function; + this.register('types').TypeType = builtins.type; + this.register('types').CodeType = builtins.code; + this.registerType('xgboost.compat.XGBoostLabelEncoder', class {}); + this.registerType('xgboost.core.Booster', class {}); + this.registerType('xgboost.sklearn.XGBClassifier', class {}); + this.registerType('xgboost.sklearn.XGBRegressor', class {}); + this.registerFunction('_codecs.encode', function(obj, encoding) { + return execution.invoke('builtins.bytearray', [ obj, encoding ]); + }); + this.registerType('builtins.bytearray', class extends Uint8Array { + constructor(source, encoding /*, errors */) { + source = builtins.bytes.__encode__(source, encoding); + super(Number.isInteger(source) ? source : source.length); + if (Array.isArray(source)) { + for (let i = 0; i < source.length; i++) { + this[i] = source; + } + } else if (source instanceof Uint8Array) { + this.set(source, 0); + } else if (typeof source === 'string') { + for (let i = 0; i < source.length; i++) { + this[i] = source.charCodeAt(i); + } + } + } + static __encode__(source, encoding) { + if (source === undefined) { + return 0; + } + if (Number.isInteger(source)) { + return source; + } + if (Array.isArray(source) || source instanceof Uint8Array) { + return source; + } + if (typeof source === 'string') { + switch (encoding) { + case 'latin1': + case 'latin-1': + return source; + case 'utf8': + case 'utf-8': + return new TextEncoder('utf-8').encode(source); + case undefined: + throw new python.Error('Unsupported string argument without an encoding.'); + default: + throw new python.Error(`Unsupported encoding '${encoding}'.`); + } + } + throw new python.Error('Unsupported source.'); + } + }); + this.registerType('builtins.bytes', class extends Uint8Array { + constructor(source, encoding /*, errors */) { + source = builtins.bytes.__encode__(source, encoding); + super(Number.isInteger(source) ? source : source.length); + if (Array.isArray(source)) { + for (let i = 0; i < source.length; i++) { + this[i] = source; + } + } else if (source instanceof Uint8Array) { + this.set(source, 0); + } else if (typeof source === 'string') { + for (let i = 0; i < source.length; i++) { + this[i] = source.charCodeAt(i); + } + } + } + static __encode__(source, encoding) { + if (source === undefined) { + return 0; + } + if (Number.isInteger(source)) { + return source; + } + if (Array.isArray(source) || source instanceof Uint8Array) { + return source; + } + if (typeof source === 'string') { + switch (encoding) { + case 'latin1': + case 'latin-1': + return source; + case 'utf8': + case 'utf-8': + return new TextEncoder('utf-8').encode(source); + case undefined: + throw new python.Error('Unsupported string argument without an encoding.'); + default: + throw new python.Error(`Unsupported encoding '${encoding}'.`); + } + } + throw new python.Error('Unsupported source.'); + } + }); + this.registerType('builtins.frozenset', class extends Set { + constructor(iterable) { + super(); + if (iterable) { + for (const item of iterable) { + this.add(item); + } + } + } + }); + this.registerFunction('builtins.issubclass', function(obj, type) { + const name = `${type.__module__}.${type.__name__}`; + if (obj.__module__ && obj.__name__) { + if (name === `${obj.__module__}.${obj.__name__}`) { + return true; + } + } + if (obj.__bases__) { + for (const base of obj.__bases__) { + if (builtins.issubclass(base, type)) { + return true; + } + } + } + return false; + }); + this.registerFunction('builtins.isinstance', function(obj, type) { + return obj.__class__ ? builtins.issubclass(obj.__class__, type) : false; + }); + this.registerFunction('builtins.hasattr', function(obj, name) { + return Object.prototype.hasOwnProperty.call(obj, name); + }); + this.registerFunction('builtins.getattr', function(obj, name, defaultValue) { + if (Object.prototype.hasOwnProperty.call(obj, name)) { + return obj[name]; + } + return defaultValue; + }); + this.registerFunction('builtins.setattr', function(obj, name, value) { + obj[name] = value; + }); + this.registerType('builtins.set', class extends Set { + constructor(iterable) { + super(iterable); + } + }); + this.registerType('builtins.slice', class { + constructor(start, stop, step) { + this.start = start; + this.stop = stop; + this.step = step; + } + }); + this.registerFunction('builtins.hash', function(/* obj */) { + throw new python.Error("'builtins.hash' not implemented."); + }); + this.registerFunction('cloudpickle.cloudpickle._builtin_type', function(name) { + return name; + }); + this.registerType('collections.Counter', class {}); + this.registerFunction('collections.defaultdict', function(/* default_factory */) { + return {}; + }); + this.registerFunction('copy_reg._reconstructor', function(cls, base, state) { + // copyreg._reconstructor in Python 3 + if (base === '__builtin__.object' || base === self._builtins.object) { + return self.invoke(cls, []); + } else if (base === '__builtin__.tuple' || base === self._builtins.tuple) { + const obj = self.invoke(cls, []); + for (let i = 0; i < state.length; i++) { + obj[i] = state[i]; + } + return obj; + } + throw new python.Error(`Unsupported copy_reg._reconstructor base type '${base}'.`); + }); + this.registerFunction('copy.deepcopy', function(/* x */) { + throw new python.Error('Unsupported copy.deepcopy().'); + }); + this.registerFunction('dill._dill._create_array', function(f, args, state, npdict) { + const array = f(...args); + if (array.__setstate__) { + array.__setstate__(state); + } + if (npdict) { + throw new python.Error("'dill._dill._create_array::npdict' not implemented."); + } + return array; + }); + this.registerFunction('dill._dill._create_cell', function(/* args */) { + return function() { + // TODO + }; + }); + this.registerFunction('dill._dill._create_code', function(args) { + return self.invoke('types.CodeType', [ args ]); + }); + this.registerFunction('dill._dill._create_function', function(/* fcode, fglobals, fname, fdefaults, fclosure, fdict, fkwdefaults */) { + return function() { + // TODO + }; + }); + this.registerFunction('dill._dill._create_namedtuple', function(name, fieldnames, modulename /*, defaults */) { + const obj = execution.invoke('dill._dill._import_module', [ `${modulename}.${name}` ]); + if (obj) { + return obj; + } + return undefined; + }); + this.registerFunction('dill._dill._create_type', function(/* typeobj */) { + // return execution.invoke(typeobj, Array.from(arguments).slice(1)); + throw new python.Error("'dill._dill._create_type' not implemented."); + }); + this.registerFunction('dill._dill._eval_repr', function(/* repr_str */) { + throw new python.Error("'dill._dill._eval_repr' not implemented."); + }); + this.registerFunction('dill._dill._get_attr', function(self, name) { + if (Object.prototype.hasOwnProperty.call(self, name)) { + return self[name]; + } + return undefined; + }); + this.registerFunction('dill._dill._import_module', function(import_name, safe) { + try { + if (import_name.startsWith('__runtime__.')) { + return execution.module(import_name); + } else if (import_name.indexOf('.') === -1) { + return execution.__import__(import_name); + } + return execution.resolve(import_name); + } catch (err) { + if (safe) { + return null; + } + throw err; + } + }); + this.registerFunction('dill._dill._load_type', function(name) { + const _dill = self.register('dill._dill'); + if (!_dill._reverse_typemap) { + _dill._reverse_typemap = new Map(); + for (const name of [ '__builtin__', 'types' ]) { + const module = self.register(name); + for (const [name, obj] of Object.entries(module)) { + if (obj.__module__ === 'builtins' && + obj.__class__ === builtins.type) { + _dill._reverse_typemap.set(name, obj); + } + } + } + _dill._reverse_typemap.set('PartialType', functools.partial); + _dill._reverse_typemap.set('CellType', builtins.cell); + } + if (!_dill._reverse_typemap.has(name)) { + throw new python.Error(`Unknown type name '${name}' in 'dill._dill._load_type'.`); + } + return _dill._reverse_typemap.get(name); + }); + this.registerFunction('keras.saving.pickle_utils.deserialize_model_from_bytecode', function(/* serialized_model */) { + return null; // throw new python.Error("'keras.saving.pickle_utils.deserialize_model_from_bytecode' not implemented."); + }); + this.registerFunction('keras.src.saving.pickle_utils.deserialize_model_from_bytecode', keras.saving.pickle_utils.deserialize_model_from_bytecode); + this.registerFunction('lasagne.nonlinearities.rectify', function() { + throw new python.Error("'lasagne.nonlinearities.rectify' not implemented."); + }); + this.registerFunction('lasagne.nonlinearities.softmax', function() { + throw new python.Error("'lasagne.nonlinearities.softmax' not implemented."); + }); + this.registerFunction('lasagne.objectives.categorical_crossentropy', function() { + throw new python.Error("'lasagne.objectives.categorical_crossentropy' not implemented."); + }); + this.registerFunction('lasagne.updates.nesterov_momentum', function() { + throw new python.Error("'lasagne.updates.nesterov_momentum' not implemented."); + }); + this.registerFunction('msgpack.unpackb', function(packed, ext_hook) { + const BinaryReader = class { + constructor(buffer, ext_hook) { + // https://github.com/msgpack/msgpack-javascript/blob/master/src/Decoder.ts + // https://github.com/msgpack/msgpack-python/blob/main/msgpack/_unpacker.pyx + this._buffer = buffer; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._ext_hook = ext_hook; + } + value() { + const c = this._view.getUint8(this.skip(1)); + if (c >= 0xe0) { + return c - 0x100; + } + if (c < 0xC0) { + if (c < 0x80) { + return c; + } + if (c < 0x90) { + return this.map(c - 0x80); + } + if (c < 0xa0) { + return this.array(c - 0x90); + } + return this.string(c - 0xa0); + } + switch (c) { + case 0xC0: return null; + case 0xC2: return false; + case 0xC3: return true; + case 0xC4: return this.read(this._view.getUint8(this.skip(1))); + case 0xC5: return this.read(this._view.getUint16(this.skip(2))); + case 0xC6: return this.read(this._view.getUint32(this.skip(4))); + case 0xC7: return this.extension(this._view.getUint8(this.skip(1))); + case 0xC8: return this.extension(this._view.getUint16(this.skip(2))); + case 0xC9: return this.extension(this._view.getUint32(this.skip(4))); + case 0xCA: return this._view.getFloat32(this.skip(4)); + case 0xCB: return this._view.getFloat64(this.skip(8)); + case 0xCC: return this._view.getUint8(this.skip(1)); + case 0xCD: return this._view.getUint16(this.skip(2)); + case 0xCE: return this._view.getUint32(this.skip(4)); + case 0xCF: return this._view.getUint64(this.skip(8)); + case 0xD0: return this._view.getInt8(this.skip(1)); + case 0xD1: return this._view.getInt16(this.skip(2)); + case 0xD2: return this._view.getInt32(this.skip(4)); + case 0xD3: return this._view.getInt64(this.skip(8)); + case 0xD4: return this.extension(1); + case 0xD5: return this.extension(2); + case 0xD6: return this.extension(4); + case 0xD7: return this.extension(8); + case 0xD8: return this.extension(16); + case 0xD9: return this.string(this._view.getUint8(this.skip(1))); + case 0xDA: return this.string(this._view.getUint16(this.skip(2))); + case 0xDB: return this.string(this._view.getUint32(this.skip(4))); + case 0xDC: return this.array(this._view.getUint16(this.skip(2))); + case 0xDD: return this.array(this._view.getUint32(this.skip(4))); + case 0xDE: return this.map(this._view.getUint16(this.skip(2))); + case 0xDF: return this.map(this._view.getUint32(this.skip(4))); + default: throw new python.Error(`Invalid code '${c}'.`); + } + } + map(size) { + const map = {}; + for (let i = 0; i < size; i++) { + const key = this.value(); + const value = this.value(); + map[key] = value; + } + return map; + } + array(size) { + const array = new Array(size); + for (let i = 0; i < size; i++) { + array[i] = this.value(); + } + return array; + } + extension(size) { + const code = this._view.getUint8(this.skip(1)); + const data = this.read(size); + return this._ext_hook(code, data); + } + skip(offset) { + const position = this._position; + this._position += offset; + if (this._position > this._buffer.length) { + throw new python.Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + return position; + } + read(size) { + const data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + return data; + } + string(size) { + const buffer = this.read(size); + this._decoder = this._decoder || new TextDecoder('utf8'); + return this._decoder.decode(buffer); + } + }; + return new BinaryReader(packed, ext_hook).value(); + }); + this.registerFunction('nolearn.lasagne.base.objective', function() { + throw new python.Error("'nolearn.lasagne.base.objective' not implemented."); + }); + this.registerFunction('numpy._reconstruct', function(subtype, shape, dtype) { + return self.invoke(subtype, [ shape, dtype ]); + }); + this.registerFunction('numpy.core._DType_reconstruct', function(/* scalar_type */) { + throw new python.Error("'numpy.core._DType_reconstruct' not implemented."); + }); + this.registerFunction('numpy.core._multiarray_umath._reconstruct', function(subtype, shape, dtype) { + return self.invoke(subtype, [ shape, dtype ]); + }); + this.registerFunction('numpy.core.multiarray._reconstruct', function(subtype, shape, dtype) { + return self.invoke(subtype, [ shape, dtype ]); + }); + this.registerFunction('numpy.core.multiarray.scalar', function(dtype, rawData) { + let data = rawData; + if (typeof rawData === 'string' || rawData instanceof String) { + data = new Uint8Array(rawData.length); + for (let i = 0; i < rawData.length; i++) { + data[i] = rawData.charCodeAt(i); + } + } + switch (dtype.kind) { + case 'b': { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.itemsize) { + case 1: return view.getInt8(0) ? true : false; + default: throw new python.Error(`Unsupported scalar dtype boolean itemsize '${dtype.itemsize}'.`); + } + } + case 'f': { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.itemsize) { + case 2: return view.getFloat16(0, dtype.byteorder === '<'); + case 4: return view.getFloat32(0, dtype.byteorder === '<'); + case 8: return view.getFloat64(0, dtype.byteorder === '<'); + default: throw new python.Error(`Unsupported scalar dtype float itemsize '${dtype.itemsize}'.`); + } + } + case 'i': { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.itemsize) { + case 1: return view.getInt8(0); + case 2: return view.getInt16(0, dtype.byteorder === '<'); + case 4: return view.getInt32(0, dtype.byteorder === '<'); + case 8: return view.getInt64(0, dtype.byteorder === '<'); + default: throw new python.Error(`Unsupported scalar dtype int itemsize '${dtype.itemsize}'.`); + } + } + case 'u': { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.itemsize) { + case 1: return view.getUint8(0); + case 2: return view.getUint16(0, dtype.byteorder === '<'); + case 4: return view.getUint32(0, dtype.byteorder === '<'); + case 8: return view.getUint64(0, dtype.byteorder === '<'); + default: throw new python.Error(`Unsupported scalar dtype uint itemsize '${dtype.itemsize}'.`); + } + } + case 'U': { + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + const list = []; + for (let i = 0; i < dtype.itemsize; i += 4) { + list.push(String.fromCodePoint(view.getUint32(i, true))); + } + return list.join(''); + } + default: { + throw new python.Error(`Unsupported scalar dtype kind '${dtype.kind}'.`); + } + } + }); + this.registerFunction('numpy.core._multiarray_umath.scalar', function(dtype, rawData) { + let data = rawData; + if (typeof rawData === 'string') { + data = new Uint8Array(rawData.length); + for (let i = 0; i < rawData.length; i++) { + data[i] = rawData.charCodeAt(i); + } + } + const dataView = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.__name__) { + case 'uint8': + return dataView.getUint8(0); + case 'float32': + return dataView.getFloat32(0, true); + case 'float64': + return dataView.getFloat64(0, true); + case 'int8': + return dataView.getInt8(0, true); + case 'int16': + return dataView.getInt16(0, true); + case 'int32': + return dataView.getInt32(0, true); + case 'int64': + return dataView.getInt64(0, true); + default: + throw new python.Error(`Unsupported scalar type '${dtype.__name__}'.`); + } + }); + this.registerFunction('numpy.load', function(file) { + // https://github.com/numpy/numpy/blob/main/numpy/lib/format.py + const signature = [ 0x93, 0x4E, 0x55, 0x4D, 0x50, 0x59 ]; + if (!file.read(6).every((v, i) => v == signature[i])) { + throw new python.Error('Invalid signature.'); + } + const version = file.read(2); + const [major, minor] = version; + if (major > 3) { + throw new python.Error(`Invalid version '${[ major, minor ].join('.')}'.`); + } + const buffer = new Uint8Array([ 0, 0, 0, 0 ]); + buffer.set(file.read(major >= 2 ? 4 : 2), 0); + const header_length = buffer[3] << 24 | buffer[2] << 16 | buffer[1] << 8 | buffer[0]; + let header = file.read(header_length); + const decoder = new TextDecoder(major >= 3 ? 'utf-8' : 'ascii'); + header = decoder.decode(header); + header = JSON.parse(header.replace(/\(/,'[').replace(/\)/,']').replace('[,','[1,]').replace(',]',']').replace(/'/g, '"').replace(/:\s*False\s*,/,':false,').replace(/:\s*True\s*,/,':true,').replace(/,\s*\}/, ' }')); + if (!header.descr || header.descr.length < 2) { + throw new python.Error("Missing property 'descr'."); + } + if (!header.shape) { + throw new python.Error("Missing property 'shape'."); + } + const shape = header.shape; + const dtype = self.invoke('numpy.dtype', [ header.descr.substring(1) ]); + dtype.byteorder = header.descr.substring(0, 1); + let data = null; + switch (dtype.byteorder) { + case '|': { + data = file.read(); + if (dtype.kind === 'O') { + const unpickler = execution.invoke('pickle.Unpickler', [ data ]); + return unpickler.load(); + } + break; + } + case '>': + case '<': { + if (header.descr.length !== 3) { + throw new python.Error(`Unsupported data type '${header.descr}'.`); + } + const count = shape.length === 0 ? 1 : shape.reduce((a, b) => a * b, 1); + data = file.read(dtype.itemsize * count); + break; + } + default: { + throw new python.Error(`Unsupported data type '${header.descr}'.`); + } + } + if (header.fortran_order) { + data = null; + } + return self.invoke('numpy.ndarray', [ shape, dtype, data ]); + }); + this.registerFunction('numpy.save', function(file, arr) { + const descr = arr.dtype.str; + if (descr[0] !== '<' && descr[0] !== '>') { + throw new python.Error(`Unsupported byte order '${descr}'.`); + } + if (descr.length !== 3 || (descr[1] !== 'f' && descr[1] !== 'i' && descr[1] !== 'u' && descr[1] !== 'c' && descr.substring(1) !== 'b1')) { + throw new python.Error(`Unsupported data type '${descr}'.`); + } + let shape = ''; + switch (arr.shape.length) { + case 0: shape = '()'; break; + case 1: shape = `(${arr.shape[0]},)`; break; + default: shape = `(${arr.shape.map((dimension) => dimension.toString()).join(', ')})`; break; + } + const properties = [ + `'descr': '${descr}'`, + "'fortran_order': False", + `'shape': ${shape}` + ]; + let header = `{ ${properties.join(', ')} }`; + header += `${' '.repeat(64 - ((header.length + 2 + 8 + 1) & 0x3f))}\n`; + const encoder = new TextEncoder('ascii'); + file.write([ 0x93, 0x4E, 0x55, 0x4D, 0x50, 0x59, 0x01, 0x00 ]); // '\\x93NUMPY' + version + file.write([ header.length & 0xff, (header.length >> 8) & 0xff ]); + file.write(encoder.encode(header)); + file.write(arr.tobytes()); + }); + this.registerFunction('numpy.asarray', function(a, dtype) { + const encode = (context, data, dim) => { + const size = context.shape[dim]; + const littleendian = context.littleendian; + if (dim == context.shape.length - 1) { + for (let i = 0; i < size; i++) { + switch (context.dtype) { + case 'f2': + context.view.setFloat16(context.position, data[i], littleendian); + break; + case 'f4': + context.view.setFloat32(context.position, data[i], littleendian); + break; + case 'f8': + context.view.setFloat64(context.position, data[i], littleendian); + break; + case 'i1': + context.view.setInt8(context.position, data[i], littleendian); + break; + case 'i2': + context.view.setInt16(context.position, data[i], littleendian); + break; + case 'i4': + context.view.setInt32(context.position, data[i], littleendian); + break; + case 'i8': + context.view.setInt64(context.position, data[i], littleendian); + break; + case 'u1': + context.view.setUint8(context.position, data[i], littleendian); + break; + case 'u2': + context.view.setUint16(context.position, data[i], littleendian); + break; + case 'u4': + context.view.setUint32(context.position, data[i], littleendian); + break; + case 'u8': + context.view.setUint64(context.position, data[i], littleendian); + break; + case 'c8': + context.view.setComplex64(context.position, data[i], littleendian); + break; + case 'c16': + context.view.setComplex128(context.position, data[i], littleendian); + break; + case 'b1': + context.view.setInt8(context.position, data[i] ? 1 : 0); + break; + default: + throw new python.Error(`Unsupported tensor data type '${context.dtype}'.`); + } + context.position += context.itemsize; + } + } else { + for (let j = 0; j < size; j++) { + encode(context, data[j], dim + 1); + } + } + }; + const array_size = (value) => { + if (value.every((item) => Array.isArray(item))) { + const dims = value.map((item) => array_size(item)); + const [dim] = dims; + for (let i = 1; i < dims.length; i++) { + if (dim.length === dims[i].length) { + if (!dims[i].every((value, i) => value === dim[i])) { + throw new python.Error('Invalid array shape.'); + } + } + } + return [ value.length ].concat(dim); + } + return [ value.length ]; + }; + const shape = Array.isArray(a) ? array_size(a) : []; + const size = dtype.itemsize * shape.reduce((a, b) => a * b, 1); + const context = { + position: 0, + itemsize: dtype.itemsize, + dtype: dtype.str.substring(1), + littleendian: dtype.str[0], + shape: shape, + data: new Uint8Array(size) + }; + context.view = new DataView(context.data.buffer, context.data.byteOffset, size); + encode(context, a, 0); + return self.invoke('numpy.ndarray', [ shape, dtype, context.data ]); + }); + this.registerFunction('numpy.mean', function() { + throw new python.Error("'numpy.mean' not implemented."); + }); + this.registerFunction('numpy.ma.core._mareconstruct', function(subtype, baseclass, baseshape, basetype) { + const data = self.invoke(baseclass, [ baseshape, basetype ]); + // = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) + const mask = self.invoke('numpy.ndarray', [ baseshape, '' ]); + return self.invoke(subtype, [ data, mask, basetype ]); + }); + this.registerFunction('numpy.random.__RandomState_ctor', function() { + return {}; + }); + this.registerFunction('numpy.random._pickle.__randomstate_ctor', function() { + return {}; + }); + this.registerFunction('numpy.random._pickle.__bit_generator_ctor', function() { + throw new python.Error("'numpy.random._pickle.__bit_generator_ctor' not implemented."); + }); + this.registerFunction('numpy.random._pickle.__generator_ctor', function() { + throw new python.Error("'numpy.random._pickle.__generator_ctor' not implemented."); + }); + this.registerFunction('numpy.reshape', function() { + throw new python.Error("'numpy.reshape' not implemented."); + }); + this.registerFunction('numpy.core.numeric._frombuffer', function(/* buf, dtype, shape, order */) { + return {}; + }); + this.registerFunction('sklearn.feature_selection._univariate_selection.f_classif', function() { + throw new python.Error("'sklearn.feature_selection._univariate_selection.f_classif' not implemented."); + }); + this.registerFunction('sklearn.metrics.scorer._passthrough_scorer', function() { + throw new python.Error("'sklearn.metrics.scorer._passthrough_scorer' not implemented."); + }); + this.registerFunction('sklearn.metrics._classification.accuracy_score', function() { + throw new python.Error("'sklearn.metrics._classification.accuracy_score' not implemented."); + }); + this.registerFunction('sklearn.metrics._classification.f1_score', function() { + throw new python.Error("'sklearn.metrics._classification.f1_score' not implemented."); + }); + this.registerFunction('sklearn.metrics._classification.precision_score', function() { + throw new python.Error("'sklearn.metrics._classification.precision_score' not implemented."); + }); + this.registerFunction('sklearn.metrics._classification.recall_score', function() { + throw new python.Error("'sklearn.metrics._classification.recall_score' not implemented."); + }); + this.registerFunction('sklearn.metrics._dist_metrics.newObj', function() { + throw new python.Error("'sklearn.metrics._dist_metrics.newObj' not implemented."); + }); + this.registerFunction('sklearn.metrics._regression.mean_absolute_error', function() { + throw new python.Error("'sklearn.metrics._regression.mean_absolute_error' not implemented."); + }); + this.registerFunction('sklearn.metrics._regression.mean_squared_error', function() { + throw new python.Error("'sklearn.metrics._regression.mean_squared_error' not implemented."); + }); + this.registerFunction('re._compile', function(pattern, flags) { + return self.invoke('re.Pattern', [ pattern, flags ]); + }); + this.registerFunction('srsly.cloudpickle.cloudpickle._builtin_type', function(name) { + return function() { + return self.invoke(`types.${name}`, arguments); + }; + }); + this.registerFunction('theano.scalar.basic.same_out', function() { + throw new python.Error("'theano.scalar.basic.same_out' not implemented."); + }); + this.registerFunction('theano.scalar.basic.same_out_nocomplex', function() { + throw new python.Error("'theano.scalar.basic.same_out_nocomplex' not implemented."); + }); + this.registerFunction('theano.scalar.basic.upcast_out', function() { + throw new python.Error("'theano.scalar.basic.upcast_out' not implemented."); + }); + this.registerFunction('theano.scalar.basic.upgrade_to_float', function() { + throw new python.Error("'theano.scalar.basic.upgrade_to_float' not implemented."); + }); + this.registerFunction('theano.tensor.nnet.conv2d', function() { + throw new python.Error("'theano.tensor.nnet.conv2d' not implemented."); + }); + this.registerFunction('theano.tensor.type.values_eq_approx_remove_inf_nan', function() { + throw new python.Error("'theano.tensor.type.values_eq_approx_remove_inf_nan' not implemented."); + }); + this.registerFunction('theano.tensor.type.values_eq_approx_remove_nan', function() { + throw new python.Error("'theano.tensor.type.values_eq_approx_remove_nan' not implemented."); + }); + this.registerType('torch.nn.modules.module.Module', class { + constructor() { + this._modules = execution.invoke('collections.OrderedDict', []); + this._parameters = execution.invoke('collections.OrderedDict', []); + this._buffers = execution.invoke('collections.OrderedDict', []); + } + __setattr__(name, value) { + if (value instanceof torch.nn.modules.module.Module) { + this._modules.set(name, value); + } else { + this[name] = value; + } + } + __getattr__(name) { + if (this._modules.has(name)) { + return this._modules.get(name); + } + return this[name]; + } + }); + torch.nn.Module = torch.nn.modules.module.Module; + torch.nn.modules.Module = torch.nn.modules.module.Module; + this.registerType('torch.ao.quantization.fake_quantize.FakeQuantize', class {}); + this.registerType('torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize', class {}); + this.registerType('torch.ao.quantization.observer._PartialWrapper', class {}); + this.registerType('torch.ao.quantization.observer.HistogramObserver', class {}); + this.registerType('torch.ao.quantization.observer.MovingAverageMinMaxObserver', class {}); + this.registerType('torch.ao.quantization.observer.MinMaxObserver', class {}); + this.registerType('torch.ao.quantization.observer.PerChannelMinMaxObserver', class {}); + this.registerType('torch.ao.quantization.observer.PlaceholderObserver', class {}); + this.registerType('torch.ao.quantization.qconfig.QConfig', class {}); + this.registerType('torch.ao.quantization.stubs.DeQuantStub', class {}); + this.registerType('torch.ao.quantization.stubs.QuantStub', class {}); + this.registerType('torch.ao.quantization.stubs.QuantWrapper', class {}); + this.registerFunction('torch.ao.quantization.fx.graph_module._save_packed_weight', function() { + throw new python.Error("'torch.ao.quantization.fx.graph_module._save_packed_weight' not implemented."); + }); + this.registerFunction('torch.ao.quantization.fx._lower_to_native_backend._load_packed_weight', function() { + throw new python.Error("'torch.ao.quantization.fx._lower_to_native_backend._load_packed_weight' not implemented."); + }); + this.registerFunction('torch.ao.quantization.fx._lower_to_native_backend._save_packed_weight', function() { + throw new python.Error("'torch.ao.quantization.fx._lower_to_native_backend._save_packed_weight' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings._get_special_act_post_process', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings._get_special_act_post_process' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.get_default_dynamic_quant_module_mappings', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.get_default_dynamic_quant_module_mappings' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.get_default_qat_module_mappings', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.get_default_qat_module_mappings' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.get_default_qconfig_propagation_list', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.get_default_qconfig_propagation_list' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.get_default_static_quant_module_mappings', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.get_default_static_quant_module_mappings' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.get_default_static_quant_reference_module_mappings', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.get_default_static_quant_reference_module_mappings' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings.no_observer_set', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings.no_observer_set' not implemented."); + }); + this.registerFunction('torch.ao.quantization.quantization_mappings._has_special_act_post_process', function() { + throw new python.Error("'torch.ao.quantization.quantization_mappings._has_special_act_post_process' not implemented."); + }); + this.registerFunction('torch.ao.quantization.utils.get_qparam_dict', function() { + throw new python.Error("'torch.ao.quantization.utils.get_qparam_dict' not implemented."); + }); + this.registerFunction('torch.ao.quantization.utils.has_no_children_ignoring_parametrizations', function() { + throw new python.Error("'torch.ao.quantization.utils.has_no_children_ignoring_parametrizations' not implemented."); + }); + this.registerType('torch.autograd.variable.Variable', class {}); + this.registerType('torch.backends.cudnn.rnn.Unserializable', class {}); + this.registerFunction('torch.distributed._shard.sharded_tensor.pre_load_state_dict_hook', function() { + throw new python.Error("'torch.distributed._shard.sharded_tensor.pre_load_state_dict_hook' not implemented."); + }); + this.registerFunction('torch.distributed._shard.sharded_tensor.state_dict_hook', function() { + throw new python.Error("'torch.distributed._shard.sharded_tensor.state_dict_hook' not implemented."); + }); + this.registerType('torch.distributed.algorithms.join._JoinConfig', class {}); + this.registerType('torch.distributions.bernoulli.Bernoulli', class {}); + this.registerType('torch.distributions.categorical.Categorical', class {}); + this.registerType('torch.distributions.constraints._LowerCholesky', class {}); + this.registerType('torch.distributions.constraints._Real', class {}); + this.registerType('torch.distributions.multivariate_normal.MultivariateNormal', class {}); + this.registerType('torch.distributions.normal.Normal', class {}); + this.registerType('torch.distributions.transforms._InverseTransform', class {}); + this.registerType('torch.distributions.transforms.AffineTransform', class {}); + this.registerType('torch.distributions.transforms.ComposeTransform', class {}); + this.registerType('torch.distributions.transforms.ExpTransform', class {}); + this.registerType('torch.distributions.transforms.LowerCholeskyTransform', class {}); + this.registerType('torch.distributions.uniform.Uniform', class {}); + this.registerType('torch.nn.backends.thnn._get_thnn_function_backend', class {}); + this.registerType('torch.nn.intrinsic.modules.fused.ConvBnReLU2d', class {}); + this.registerType('torch.nn.intrinsic.modules.fused.ConvReLU2d', class {}); + this.registerType('torch.nn.intrinsic.modules.fused.BNReLU2d', class {}); + this.registerType('torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d', class {}); + this.registerType('torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d', class {}); + this.registerType('torch.nn.intrinsic.qat.modules.conv_fused.ConvReLU2d', class {}); + this.registerType('torch.nn.intrinsic.quantized.modules.conv_relu.ConvReLU2d', class {}); + this.registerType('torch.nn.intrinsic.quantized.modules.linear_relu.LinearReLU', class {}); + this.registerType('torch.nn.modules.activation.CELU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.ELU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.GELU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.GLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Hardtanh', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Hardshrink', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Hardsigmoid', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Hardswish', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.LeakyReLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.LogSigmoid', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.LogSoftmax', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Mish', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.MultiheadAttention', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.ReLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.ReLU6', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.PReLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.RReLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.SELU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Sigmoid', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.SiLU', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softmax', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softmax2d', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softmin', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softplus', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softshrink', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Softsign', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Tanh', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Tanhshrink', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.activation.Threshold', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.adaptive.AdaptiveLogSoftmaxWithLoss', class {}); + this.registerType('torch.nn.modules.batchnorm._NormBase', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.batchnorm._BatchNorm', class extends torch.nn.modules.batchnorm._NormBase {}); + this.registerType('torch.nn.modules.batchnorm.BatchNorm1d', class extends torch.nn.modules.batchnorm._BatchNorm {}); + this.registerType('torch.nn.modules.batchnorm.BatchNorm2d', class extends torch.nn.modules.batchnorm._BatchNorm {}); + this.registerType('torch.nn.modules.batchnorm.BatchNorm3d', class extends torch.nn.modules.batchnorm._BatchNorm {}); + this.registerType('torch.nn.modules.batchnorm.LazyBatchNorm1d', class {}); + this.registerType('torch.nn.modules.batchnorm.LazyBatchNorm2d', class {}); + this.registerType('torch.nn.modules.batchnorm.LazyBatchNorm3d', class {}); + this.registerType('torch.nn.modules.batchnorm.SyncBatchNorm', class {}); + this.registerType('torch.nn.modules.channelshuffle.ChannelShuffle', class {}); + this.registerType('torch.nn.modules.container.ModuleDict', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.container.ModuleList', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.container.ParameterDict', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.container.ParameterList', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.container.Sequential', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.conv._ConvNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.conv.Conv1d', class extends torch.nn.modules.conv._ConvNd {}); + this.registerType('torch.nn.modules.conv.Conv2d', class extends torch.nn.modules.conv._ConvNd {}); + this.registerType('torch.nn.modules.conv.Conv3d', class extends torch.nn.modules.conv._ConvNd {}); + this.registerType('torch.nn.modules.conv._ConvTransposeNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.conv.ConvTranspose1d', class extends torch.nn.modules.conv._ConvTransposeNd {}); + this.registerType('torch.nn.modules.conv.ConvTranspose2d', class extends torch.nn.modules.conv._ConvTransposeNd {}); + this.registerType('torch.nn.modules.conv.ConvTranspose3d', class extends torch.nn.modules.conv._ConvTransposeNd {}); + this.registerType('torch.nn.modules.conv.LazyConv1d', class {}); + this.registerType('torch.nn.modules.conv.LazyConv2d', class {}); + this.registerType('torch.nn.modules.conv.LazyConv3d', class {}); + this.registerType('torch.nn.modules.conv.LazyConvTranspose2d', class {}); + this.registerType('torch.nn.modules.distance.CosineSimilarity', class {}); + this.registerType('torch.nn.modules.dropout._DropoutNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.dropout.AlphaDropout', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.dropout.Dropout', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.dropout.Dropout1d', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.dropout.Dropout2d', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.dropout.Dropout3d', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.dropout.FeatureAlphaDropout', class extends torch.nn.modules.dropout._DropoutNd {}); + this.registerType('torch.nn.modules.fold.Fold', class {}); + this.registerType('torch.nn.modules.fold.Unfold', class {}); + this.registerType('torch.nn.modules.flatten.Flatten', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.flatten.Unflatten', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.instancenorm.InstanceNorm1d', class {}); + this.registerType('torch.nn.modules.instancenorm.InstanceNorm2d', class {}); + this.registerType('torch.nn.modules.instancenorm.InstanceNorm3d', class {}); + this.registerType('torch.nn.modules.instancenorm.LazyInstanceNorm2d', class {}); + this.registerType('torch.nn.modules.linear._LinearWithBias', class {}); + this.registerType('torch.nn.modules.linear.Bilinear', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.linear.Identity', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.linear.LazyLinear', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.linear.Linear', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.linear.NonDynamicallyQuantizableLinear', class extends torch.nn.modules.linear.Linear {}); + this.registerType('torch.nn.modules.loss._Loss', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.loss._WeightedLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.BCELoss', class extends torch.nn.modules.loss._WeightedLoss {}); + this.registerType('torch.nn.modules.loss.BCEWithLogitsLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.CrossEntropyLoss', class extends torch.nn.modules.loss._WeightedLoss {}); + this.registerType('torch.nn.modules.loss.CosineEmbeddingLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.CTCLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.GaussianNLLLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.HuberLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.HingeEmbeddingLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.KLDivLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.L1Loss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.MarginRankingLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.MultiLabelMarginLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.MultiLabelSoftMarginLoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.MultiMarginLoss', class extends torch.nn.modules.loss._WeightedLoss {}); + this.registerType('torch.nn.modules.loss.MSELoss', class extends torch.nn.modules.loss._Loss {}); + this.registerType('torch.nn.modules.loss.NLLLoss', class extends torch.nn.modules.loss._WeightedLoss {}); + this.registerType('torch.nn.modules.loss.NLLLoss2d', class extends torch.nn.modules.loss.NLLLoss {}); + this.registerType('torch.nn.modules.loss.PoissonNLLLoss', class {}); + this.registerType('torch.nn.modules.loss.SmoothL1Loss', class {}); + this.registerType('torch.nn.modules.loss.SoftMarginLoss', class {}); + this.registerType('torch.nn.modules.loss.TripletMarginLoss', class {}); + this.registerType('torch.nn.modules.loss.TripletMarginWithDistanceLoss', class {}); + this.registerType('torch.nn.modules.module._IncompatibleKeys', class {}); + this.registerType('torch.nn.modules.module._WrappedHook', class {}); + this.registerType('torch.nn.modules.module.PatchForward', class {}); + this.registerType('torch.nn.modules.normalization.CrossMapLRN2d', class {}); + this.registerType('torch.nn.modules.normalization.GroupNorm', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.normalization.LayerNorm', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.normalization.LocalResponseNorm', class {}); + this.registerType('torch.nn.modules.padding._ReflectionPadNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.padding.ReflectionPad1d', class extends torch.nn.modules.padding._ReflectionPadNd {}); + this.registerType('torch.nn.modules.padding.ReflectionPad2d', class extends torch.nn.modules.padding._ReflectionPadNd {}); + this.registerType('torch.nn.modules.padding.ReflectionPad3d', class extends torch.nn.modules.padding._ReflectionPadNd {}); + this.registerType('torch.nn.modules.padding._ReplicationPadNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.padding.ReplicationPad1d', class extends torch.nn.modules.padding._ReplicationPadNd {}); + this.registerType('torch.nn.modules.padding.ReplicationPad2d', class extends torch.nn.modules.padding._ReplicationPadNd {}); + this.registerType('torch.nn.modules.padding.ReplicationPad3d', class extends torch.nn.modules.padding._ReplicationPadNd {}); + this.registerType('torch.nn.modules.padding.ZeroPad2d', class {}); + this.registerType('torch.nn.modules.padding.ConstantPad1d', class {}); + this.registerType('torch.nn.modules.padding.ConstantPad2d', class {}); + this.registerType('torch.nn.modules.padding.ConstantPad3d', class {}); + this.registerType('torch.nn.modules.pixelshuffle.PixelShuffle', class {}); + this.registerType('torch.nn.modules.pixelshuffle.PixelUnshuffle', class {}); + this.registerType('torch.nn.modules.pooling._AdaptiveAvgPoolNd', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.pooling.AdaptiveAvgPool1d', class extends torch.nn.modules.pooling._AdaptiveAvgPoolNd {}); + this.registerType('torch.nn.modules.pooling.AdaptiveAvgPool2d', class extends torch.nn.modules.pooling._AdaptiveAvgPoolNd {}); + this.registerType('torch.nn.modules.pooling.AdaptiveAvgPool3d', class extends torch.nn.modules.pooling._AdaptiveAvgPoolNd {}); + this.registerType('torch.nn.modules.pooling.AdaptiveMaxPool1d', class {}); + this.registerType('torch.nn.modules.pooling.AdaptiveMaxPool2d', class {}); + this.registerType('torch.nn.modules.pooling.AdaptiveMaxPool3d', class {}); + this.registerType('torch.nn.modules.pooling.AvgPool1d', class {}); + this.registerType('torch.nn.modules.pooling.AvgPool2d', class {}); + this.registerType('torch.nn.modules.pooling.AvgPool3d', class {}); + this.registerType('torch.nn.modules.pooling.FractionalMaxPool2d', class {}); + this.registerType('torch.nn.modules.pooling.LPPool2d', class {}); + this.registerType('torch.nn.modules.pooling.MaxPool1d', class {}); + this.registerType('torch.nn.modules.pooling.MaxPool2d', class {}); + this.registerType('torch.nn.modules.pooling.MaxPool3d', class {}); + this.registerType('torch.nn.modules.pooling.MaxUnpool1d', class {}); + this.registerType('torch.nn.modules.pooling.MaxUnpool2d', class {}); + this.registerType('torch.nn.modules.pooling.MaxUnpool3d', class {}); + this.registerType('torch.nn.modules.rnn.GRU', class {}); + this.registerType('torch.nn.modules.rnn.GRUCell', class {}); + this.registerType('torch.nn.modules.rnn.LSTM', class {}); + this.registerType('torch.nn.modules.rnn.LSTMCell', class {}); + this.registerType('torch.nn.modules.rnn.RNNBase', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.rnn.RNN', class extends torch.nn.modules.rnn.RNNBase {}); + this.registerType('torch.nn.modules.rnn.RNNCellBase', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.nn.modules.rnn.RNNCell', class extends torch.nn.modules.rnn.RNNCellBase {}); + this.registerType('torch.nn.modules.sparse.Embedding', class {}); + this.registerType('torch.nn.modules.sparse.EmbeddingBag', class {}); + this.registerType('torch.nn.modules.transformer.Transformer', class {}); + this.registerType('torch.nn.modules.transformer.TransformerDecoder', class {}); + this.registerType('torch.nn.modules.transformer.TransformerDecoderLayer', class {}); + this.registerType('torch.nn.modules.transformer.TransformerEncoder', class {}); + this.registerType('torch.nn.modules.transformer.TransformerEncoderLayer', class {}); + this.registerType('torch.nn.modules.upsampling.Upsample', class {}); + this.registerType('torch.nn.modules.upsampling.UpsamplingBilinear2d', class {}); + this.registerType('torch.nn.modules.upsampling.UpsamplingNearest2d', class {}); + this.registerType('torch.nn.parallel.data_parallel.DataParallel', class {}); + this.registerType('torch.nn.parallel.distributed._DDPUnevenInputsConfig', class {}); + this.registerType('torch.nn.parallel.distributed.DistributedDataParallel', class {}); + this.registerType('torch.nn.qat.modules.conv.Conv2d', class {}); + this.registerType('torch.nn.qat.modules.linear.Linear', class {}); + this.registerType('torch.nn.quantized.modules.activation.ReLU', class {}); + this.registerType('torch.nn.quantized.modules.activation.LeakyReLU', class {}); + this.registerType('torch.nn.quantized.dynamic.modules.linear.Linear', class {}); + this.registerType('torch.nn.quantized.dynamic.modules.rnn.GRU', class {}); + this.registerType('torch.nn.quantized.dynamic.modules.rnn.LSTM', class {}); + this.registerType('torch.nn.quantized.dynamic.modules.rnn.LSTMCell', class {}); + this.registerType('torch.nn.quantized.dynamic.modules.rnn.PackedParameter', class {}); + this.registerType('torch.nn.quantized.modules.activation.ReLU6', class {}); + this.registerType('torch.nn.quantized.modules.batchnorm.BatchNorm2d', class {}); + this.registerType('torch.nn.quantized.modules.conv.Conv1d', class {}); + this.registerType('torch.nn.quantized.modules.conv.Conv2d', class {}); + this.registerType('torch.nn.quantized.modules.conv.ConvTranspose2d', class {}); + this.registerType('torch.nn.quantized.modules.DeQuantize', class {}); + this.registerType('torch.nn.quantized.modules.dropout.Dropout', class {}); + this.registerType('torch.nn.quantized.modules.embedding_ops.Embedding', class {}); + this.registerType('torch.nn.quantized.modules.embedding_ops.EmbeddingPackedParams', class {}); + this.registerType('torch.nn.quantized.modules.functional_modules.FloatFunctional', class {}); + this.registerType('torch.nn.quantized.modules.functional_modules.QFunctional', class {}); + this.registerType('torch.nn.quantized.modules.linear.Linear', class {}); + this.registerType('torch.nn.quantized.modules.linear.LinearPackedParams', class {}); + this.registerType('torch.nn.quantized.modules.normalization.InstanceNorm2d', class {}); + this.registerType('torch.nn.quantized.modules.normalization.GroupNorm', class extends torch.nn.modules.normalization.GroupNorm {}); + this.registerType('torch.nn.quantized.modules.normalization.LayerNorm', class extends torch.nn.modules.normalization.LayerNorm {}); + this.registerType('torch.nn.quantized.modules.Quantize', class {}); + this.registerType('torch.ao.nn.quantizable.modules.activation.MultiheadAttention', class extends torch.nn.modules.activation.MultiheadAttention {}); + this.registerType('torch.ao.nn.quantizable.modules.rnn.LSTM', class {}); + this.registerType('torch.ao.nn.quantized.modules.activation.MultiheadAttention', class extends torch.ao.nn.quantizable.modules.activation.MultiheadAttention {}); + this.registerType('torch.ao.nn.quantized.modules.activation.ReLU6', class extends torch.nn.modules.activation.ReLU {}); + this.registerType('torch.ao.nn.quantized.modules.activation.LeakyReLU', class extends torch.nn.modules.activation.LeakyReLU {}); + this.registerType('torch.ao.nn.quantized.modules.utils.WeightedQuantizedModule', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.batchnorm._BatchNorm', class extends torch.nn.modules.batchnorm._BatchNorm {}); + this.registerType('torch.ao.nn.quantized.modules.batchnorm.BatchNorm2d', class extends torch.ao.nn.quantized.modules.batchnorm._BatchNorm {}); + this.registerType('torch.ao.nn.quantized.modules.conv.Conv1d', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.conv.Conv2d', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.conv._ConvNd', class extends torch.ao.nn.quantized.modules.utils.WeightedQuantizedModule {}); + this.registerType('torch.ao.nn.quantized.modules.conv._ConvTransposeNd', class extends torch.ao.nn.quantized.modules.conv._ConvNd {}); + this.registerType('torch.ao.nn.quantized.modules.conv.ConvTranspose2d', class extends torch.ao.nn.quantized.modules.conv._ConvTransposeNd {}); + this.registerType('torch.ao.nn.quantized.modules.Quantize', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.DeQuantize', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.dropout.Dropout', class extends torch.nn.modules.dropout.Dropout {}); + this.registerType('torch.ao.nn.quantized.modules.functional_modules.FloatFunctional', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.functional_modules.QFunctional', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.functional_modules.FXFloatFunctional', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.linear.Linear', class extends torch.ao.nn.quantized.modules.utils.WeightedQuantizedModule {}); + this.registerType('torch.ao.nn.quantized.modules.linear.LinearPackedParams', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.modules.normalization.LayerNorm', class extends torch.nn.modules.normalization.LayerNorm {}); + this.registerType('torch.ao.nn.quantized.modules.rnn.LSTM', class {}); + this.registerType('torch.ao.nn.quantized.dynamic.modules.linear.Linear', class extends torch.ao.nn.quantized.modules.linear.Linear {}); + this.registerType('torch.ao.nn.quantized.dynamic.modules.rnn.PackedParameter', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.dynamic.modules.rnn.RNNBase', class extends torch.nn.modules.module.Module {}); + this.registerType('torch.ao.nn.quantized.dynamic.modules.rnn.GRU', class extends torch.ao.nn.quantized.dynamic.modules.rnn.RNNBase {}); + this.registerType('torch.ao.nn.quantized.reference.modules.conv.Conv2d', class {}); + this.registerType('torch.ao.nn.quantized.reference.modules.linear.Linear', class {}); + this.registerType('torch.ao.nn.qat.modules.conv.Conv2d', class {}); + this.registerType('torch.ao.nn.intrinsic.quantized.modules.conv_relu.ConvReLU2d', class extends torch.ao.nn.quantized.modules.conv.Conv2d {}); + this.registerType('torch.ao.nn.intrinsic.quantized.modules.linear_relu.LinearReLU', class extends torch.ao.nn.quantized.modules.linear.Linear {}); + this.registerType('torch.ao.nn.intrinsic.modules.fused._FusedModule', class extends torch.nn.modules.container.Sequential {}); + this.registerType('torch.ao.nn.intrinsic.modules.fused.ConvBn2d', class extends torch.ao.nn.intrinsic.modules.fused._FusedModule {}); + this.registerType('torch.ao.nn.intrinsic.modules.fused.ConvReLU2d', class extends torch.ao.nn.intrinsic.modules.fused._FusedModule {}); + this.registerType('torch.ao.nn.intrinsic.modules.fused.LinearReLU', class extends torch.ao.nn.intrinsic.modules.fused._FusedModule {}); + this.registerType('torch.ao.nn.intrinsic.modules.fused.ConvBnReLU2d', class extends torch.ao.nn.intrinsic.modules.fused._FusedModule {}); + this.registerType('torch.nn.utils.prune.L1Unstructured', class {}); + this.registerType('torch.nn.utils.spectral_norm.SpectralNorm', class {}); + this.registerType('torch.nn.utils.spectral_norm.SpectralNormStateDictHook', class {}); + this.registerType('torch.nn.utils.spectral_norm.SpectralNormLoadStateDictPreHook', class {}); + this.registerType('torch.nn.utils.weight_norm.WeightNorm', class {}); + this.registerFunction('torch.nn.utils.parametrize.type_before_parametrizations', function() { + throw python.Error("'torch.nn.utils.parametrize.type_before_parametrizations' not implemented."); + }); + this.registerType('torch.torch_version.TorchVersion', class extends String {}); + this.registerType('torch.optim.optimizer.Optimizer', class {}); + this.registerType('torch.optim.adam.Adam', class extends torch.optim.optimizer.Optimizer {}); + this.registerType('torch.optim.adamw.AdamW', class {}); + this.registerType('torch.optim.adagrad.Adagrad', class {}); + this.registerType('torch.optim.adadelta.Adadelta', class {}); + this.registerType('torch.optim.lbfgs.LBFGS', class {}); + this.registerType('torch.optim.lr_scheduler.CosineAnnealingLR', class {}); + this.registerType('torch.optim.lr_scheduler.CyclicLR', class {}); + this.registerType('torch.optim.lr_scheduler.ExponentialLR', class {}); + this.registerType('torch.optim.lr_scheduler.LambdaLR', class {}); + this.registerType('torch.optim.lr_scheduler.LinearLR', class {}); + this.registerType('torch.optim.lr_scheduler.MultiStepLR', class {}); + this.registerType('torch.optim.lr_scheduler.OneCycleLR', class {}); + this.registerType('torch.optim.lr_scheduler.ReduceLROnPlateau', class {}); + this.registerType('torch.optim.lr_scheduler.StepLR', class {}); + this.registerType('torch.optim.optimizer._RequiredParameter', class {}); + this.registerType('torch.optim.radam.RAdam', class extends torch.optim.optimizer.Optimizer {}); + this.registerType('torch.optim.rmsprop.RMSprop', class {}); + this.registerType('torch.optim.sgd.SGD', class {}); + this.registerType('torch.optim.sparse_adam.SparseAdam', class {}); + this.registerType('torch.optim.swa_utils.SWALR', class {}); + torch.optim.RAdam = torch.optim.radam.RAdam; + this.registerType('torch.quantization.fake_quantize.FakeQuantize', class {}); + this.registerType('torch.quantization.observer._PartialWrapper', class {}); + this.registerType('torch.quantization.observer.HistogramObserver', class {}); + this.registerType('torch.quantization.observer.MinMaxObserver', class {}); + this.registerType('torch.quantization.observer.MovingAverageMinMaxObserver', class {}); + this.registerType('torch.quantization.observer.MovingAveragePerChannelMinMaxObserver', class {}); + this.registerType('torch.quantization.qconfig.QConfig', class {}); + this.registerType('torch.quantization.stubs.DeQuantStub', class {}); + this.registerType('torch.quantization.stubs.QuantStub', class {}); + this.registerType('torch.utils._pytree.LeafSpec', class {}); + this.registerType('torch.utils._pytree.TreeSpec', class {}); + this.registerFunction('torch.utils.data._utils.collate.default_collate', function() { + throw new python.Error("'torch.utils.data._utils.collate.default_collate' not implemented."); + }); + this.registerType('torch.utils.data.dataloader._MultiProcessingDataLoaderIter', class {}); + this.registerType('torch.utils.data.dataloader.DataLoader', class {}); + this.registerType('torch.utils.data.dataset.Subset', class {}); + this.registerType('torch.utils.data.dataset.ConcatDataset', class {}); + this.registerType('torch.utils.data.dataset.TensorDataset', class {}); + this.registerType('torch.utils.data.sampler.BatchSampler', class {}); + this.registerType('torch.utils.data.sampler.RandomSampler', class {}); + this.registerType('torch.utils.data.sampler.SequentialSampler', class {}); + this.registerType('torchvision.datasets.folder.ImageFolder', class {}); + this.registerType('torchvision.datasets.mnist.FashionMNIST', class {}); + this.registerType('torchvision.datasets.mnist.MNIST', class {}); + this.registerType('torchvision.datasets.video_utils.VideoClips', class {}); + this.registerType('torchvision.datasets.vision.StandardTransform', class {}); + this.registerType('torchvision.ops.deform_conv.DeformConv2d', class {}); + this.registerType('torchvision.ops.feature_pyramid_network.FeaturePyramidNetwork', class {}); + this.registerType('torchvision.ops.feature_pyramid_network.LastLevelMaxPool', class {}); + this.registerType('torchvision.ops.feature_pyramid_network.LastLevelP6P7', class {}); + this.registerType('torchvision.ops.misc.Conv2dNormActivation', class {}); + this.registerType('torchvision.ops.misc.ConvNormActivation', class {}); + this.registerType('torchvision.ops.misc.MLP', class extends torch.nn.modules.container.Sequential {}); + this.registerType('torchvision.ops.misc.ConvTranspose2d', class {}); + this.registerType('torchvision.ops.misc.FrozenBatchNorm2d', class {}); + this.registerType('torchvision.ops.misc.Permute', class {}); + this.registerType('torchvision.ops.misc.SqueezeExcitation', class {}); + this.registerType('torchvision.ops.poolers.LevelMapper', class {}); + this.registerType('torchvision.ops.poolers.MultiScaleRoIAlign', class {}); + this.registerType('torchvision.ops.stochastic_depth.StochasticDepth', class {}); + this.registerType('torchvision.models.alexnet.AlexNet', class {}); + this.registerType('torchvision.models.convnext.ConvNeXt', class {}); + this.registerType('torchvision.models.convnext.CNBlock', class {}); + this.registerType('torchvision.models.convnext.LayerNorm2d', class {}); + this.registerType('torchvision.models.densenet.DenseNet', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.models.densenet._DenseBlock', class extends torch.nn.modules.container.ModuleDict {}); + this.registerType('torchvision.models.densenet._DenseLayer', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.models.densenet._Transition', class extends torch.nn.modules.container.Sequential {}); + this.registerType('torchvision.models.detection._utils.BalancedPositiveNegativeSampler', class {}); + this.registerType('torchvision.models.detection._utils.BoxCoder', class {}); + this.registerType('torchvision.models.detection._utils.Matcher', class {}); + this.registerType('torchvision.models.detection._utils.SSDMatcher', class {}); + this.registerType('torchvision.models.detection.anchor_utils.AnchorGenerator', class {}); + this.registerType('torchvision.models.detection.anchor_utils.DefaultBoxGenerator', class {}); + this.registerType('torchvision.models.detection.backbone_utils.BackboneWithFPN', class {}); + this.registerType('torchvision.models.detection.faster_rcnn.FasterRCNN', class {}); + this.registerType('torchvision.models.detection.faster_rcnn.FastRCNNConvFCHead', class {}); + this.registerType('torchvision.models.detection.faster_rcnn.FastRCNNPredictor', class {}); + this.registerType('torchvision.models.detection.faster_rcnn.TwoMLPHead', class {}); + this.registerType('torchvision.models.detection.fcos.FCOS', class {}); + this.registerType('torchvision.models.detection.fcos.FCOSHead', class {}); + this.registerType('torchvision.models.detection.fcos.FCOSClassificationHead', class {}); + this.registerType('torchvision.models.detection.fcos.FCOSRegressionHead', class {}); + this.registerType('torchvision.models.detection._utils.BoxLinearCoder', class {}); + this.registerType('torchvision.models.detection.keypoint_rcnn.KeypointRCNN', class {}); + this.registerType('torchvision.models.detection.keypoint_rcnn.KeypointRCNNHeads', class {}); + this.registerType('torchvision.models.detection.keypoint_rcnn.KeypointRCNNPredictor', class {}); + this.registerType('torchvision.models.detection.mask_rcnn.MaskRCNN', class {}); + this.registerType('torchvision.models.detection.mask_rcnn.MaskRCNNHeads', class {}); + this.registerType('torchvision.models.detection.mask_rcnn.MaskRCNNPredictor', class {}); + this.registerType('torchvision.models.detection.retinanet.RetinaNet', class {}); + this.registerType('torchvision.models.detection.retinanet.RetinaNetClassificationHead', class {}); + this.registerType('torchvision.models.detection.retinanet.RetinaNetHead', class {}); + this.registerType('torchvision.models.detection.retinanet.RetinaNetRegressionHead', class {}); + this.registerType('torchvision.models.detection.roi_heads.RoIHeads', class {}); + this.registerType('torchvision.models.detection.rpn.AnchorGenerator', class {}); + this.registerType('torchvision.models.detection.rpn.RegionProposalNetwork', class {}); + this.registerType('torchvision.models.detection.rpn.RPNHead', class {}); + this.registerType('torchvision.models.detection.ssd.SSD', class {}); + this.registerType('torchvision.models.detection.ssd.SSDFeatureExtractorVGG', class {}); + this.registerType('torchvision.models.detection.ssdlite.SSDLiteClassificationHead', class {}); + this.registerType('torchvision.models.detection.ssdlite.SSDLiteFeatureExtractorMobileNet', class {}); + this.registerType('torchvision.models.detection.ssdlite.SSDLiteHead', class {}); + this.registerType('torchvision.models.detection.ssdlite.SSDLiteRegressionHead', class {}); + this.registerType('torchvision.models.detection.transform.GeneralizedRCNNTransform', class {}); + this.registerType('torchvision.models.efficientnet.EfficientNet', class {}); + this.registerType('torchvision.models.efficientnet.FusedMBConv', class {}); + this.registerType('torchvision.models.efficientnet.MBConv', class {}); + this.registerType('torchvision.models.googlenet.BasicConv2d', class {}); + this.registerType('torchvision.models.googlenet.GoogLeNet', class {}); + this.registerType('torchvision.models.googlenet.Inception', class {}); + this.registerType('torchvision.models.googlenet.InceptionAux', class {}); + this.registerType('torchvision.models.inception.BasicConv2d', class {}); + this.registerType('torchvision.models.inception.Inception3', class {}); + this.registerType('torchvision.models.inception.InceptionAux', class {}); + this.registerType('torchvision.models.inception.InceptionA', class {}); + this.registerType('torchvision.models.inception.InceptionB', class {}); + this.registerType('torchvision.models.inception.InceptionC', class {}); + this.registerType('torchvision.models.inception.InceptionD', class {}); + this.registerType('torchvision.models.inception.InceptionE', class {}); + this.registerType('torchvision.models.mnasnet._InvertedResidual', class {}); + this.registerType('torchvision.models.mnasnet.MNASNet', class {}); + this.registerType('torchvision.models.maxvit.MaxVit', class {}); + this.registerType('torchvision.models.maxvit.MaxVitBlock', class {}); + this.registerType('torchvision.models.maxvit.MaxVitLayer', class {}); + this.registerType('torchvision.models.maxvit.MBConv', class {}); + this.registerType('torchvision.models.maxvit.PartitionAttentionLayer', class {}); + this.registerType('torchvision.models.maxvit.RelativePositionalMultiHeadAttention', class {}); + this.registerType('torchvision.models.maxvit.SwapAxes', class {}); + this.registerType('torchvision.models.maxvit.WindowDepartition', class {}); + this.registerType('torchvision.models.mobilenet.ConvBNReLU', class {}); + this.registerType('torchvision.models.mobilenet.MobileNetV2', class {}); + this.registerType('torchvision.models.mobilenet.InvertedResidual', class {}); + this.registerType('torchvision.models.mobilenetv2.ConvBNActivation', class {}); + this.registerType('torchvision.models.mobilenetv2.InvertedResidual', class {}); + this.registerType('torchvision.models.mobilenetv2.MobileNetV2', class {}); + this.registerType('torchvision.models.mobilenetv3.InvertedResidual', class {}); + this.registerType('torchvision.models.mobilenetv3.MobileNetV3', class {}); + this.registerType('torchvision.models.mobilenetv3.SqueezeExcitation', class {}); + this.registerType('torchvision.models.regnet.AnyStage', class extends torch.nn.modules.container.Sequential {}); + this.registerType('torchvision.models.regnet.BottleneckTransform', class {}); + this.registerType('torchvision.models.regnet.ResBottleneckBlock', class {}); + this.registerType('torchvision.models.regnet.RegNet', class {}); + this.registerType('torchvision.models.regnet.SimpleStemIN', class {}); + this.registerType('torchvision.models.resnet.Bottleneck', class {}); + this.registerType('torchvision.models.resnet.BasicBlock', class {}); + this.registerType('torchvision.models.quantization.mobilenet.QuantizableInvertedResidual', class {}); + this.registerType('torchvision.models.quantization.mobilenet.QuantizableMobileNetV2', class {}); + this.registerType('torchvision.models.quantization.mobilenetv2.QuantizableInvertedResidual', class {}); + this.registerType('torchvision.models.quantization.mobilenetv2.QuantizableMobileNetV2', class {}); + this.registerType('torchvision.models.quantization.resnet.QuantizableBasicBlock', class {}); + this.registerType('torchvision.models.quantization.resnet.QuantizableBottleneck', class {}); + this.registerType('torchvision.models.quantization.resnet.QuantizableResNet', class {}); + this.registerType('torchvision.models.segmentation.deeplabv3.ASPP', class {}); + this.registerType('torchvision.models.segmentation.deeplabv3.ASPPConv', class {}); + this.registerType('torchvision.models.segmentation.deeplabv3.ASPPPooling', class {}); + this.registerType('torchvision.models.segmentation.deeplabv3.DeepLabHead', class {}); + this.registerType('torchvision.models.segmentation.deeplabv3.DeepLabV3', class {}); + this.registerType('torchvision.models.segmentation.fcn.FCN', class {}); + this.registerType('torchvision.models.segmentation.fcn.FCNHead', class {}); + this.registerType('torchvision.models.shufflenetv2.ShuffleNetV2', class {}); + this.registerType('torchvision.models.shufflenetv2.InvertedResidual', class {}); + this.registerType('torchvision.models.squeezenet.Fire', class {}); + this.registerType('torchvision.models.squeezenet.SqueezeNet', class {}); + this.registerType('torchvision.models.resnet.ResNet', class {}); + this.registerType('torchvision.models.vgg.VGG', class {}); + this.registerType('torchvision.models.video.resnet.BasicBlock', class {}); + this.registerType('torchvision.models.video.resnet.BasicStem', class {}); + this.registerType('torchvision.models.video.resnet.Conv2Plus1D', class {}); + this.registerType('torchvision.models.video.resnet.Conv3DNoTemporal', class {}); + this.registerType('torchvision.models.video.resnet.Conv3DSimple', class {}); + this.registerType('torchvision.models.video.resnet.R2Plus1dStem', class {}); + this.registerType('torchvision.models.video.resnet.VideoResNet', class {}); + this.registerType('torchvision.models.vision_transformer.Encoder', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.models.vision_transformer.EncoderBlock', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.models.vision_transformer.MLPBlock', class extends torchvision.ops.misc.MLP {}); + this.registerType('torchvision.models.vision_transformer.VisionTransformer', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.models._utils.IntermediateLayerGetter', class {}); + this.registerType('torchvision.transforms.functional.InterpolationMode', class {}); + this.registerType('torchvision.transforms.transforms.ColorJitter', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.Compose', class {}); + this.registerType('torchvision.transforms.transforms.CenterCrop', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.Grayscale', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.Lambda', class {}); + this.registerType('torchvision.transforms.transforms.Normalize', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomAffine', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomCrop', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomHorizontalFlip', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomVerticalFlip', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomResizedCrop', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.RandomRotation', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.Resize', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.Scale', class extends torch.nn.modules.module.Module {}); + this.registerType('torchvision.transforms.transforms.ToPILImage', class {}); + this.registerType('torchvision.transforms.transforms.ToTensor', class {}); + this.registerFunction('torchvision.models.resnet.resnet18', function() {}); + this.registerFunction('torchvision.models.resnet.resnet34', function() {}); + this.registerFunction('torchvision.models.resnet.resnet50', function() {}); + this.registerFunction('torchvision.models.resnet.resnet101', function() {}); + this.registerFunction('torchvision.models.resnet.resnet152', function() {}); + this.registerFunction('torchvision.models.vision_transformer.vit_h_14', function() {}); + this.registerFunction('torchvision.ops.boxes.box_iou', function (/* boxes1, boxes2 */) { + throw new python.Error("'torchvision.ops.boxes.box_iou' not implemented."); + }); + this.registerFunction('builtins.annotate', function(type, value) { + if (type === self._builtins.int) { + return Number.isInteger(value) ? value : NaN; + } + if (type === self._builtins.float) { + return typeof value === 'number' ? value : NaN; + } + if (type === self._builtins.number) { + // if (pytorch.Utility.isTensor(value)) { + // value.resize_([]); + // } + } + return value; + }); + this.registerFunction('builtins.unchecked_cast', function(type, value) { + return value; + }); + this.registerFunction('builtins.uninitialized', function(/* type */) { + return undefined; + }); + this.registerFunction('ops.prim.data', function(tensor) { + return tensor; + }); + this.registerFunction('ops.prim.device', function(tensor) { + return tensor.device; + }); + this.registerFunction('ops.prim.dtype', function(tensor) { + return tensor.dtype.scalar_type(); + }); + this.registerFunction('ops.prim.is_quantized', function(tensor) { + return tensor.is_quantized; + }); + this.registerFunction('ops.prim.is_cuda', function(/* tensor */) { + return false; + }); + this.registerFunction('ops.prim.is_nested', function(tensor) { + return tensor.is_nested; + }); + this.registerFunction('ops.prim.is_sparse', function(tensor) { + return tensor.is_sparse; + }); + this.registerFunction('ops.prim.unchecked_unwrap_optional', function(value) { + return value; + }); + this.registerFunction('ops.prim.NumToTensor', function(value) { + const tensor = self.invoke('torch.Tensor', []); + tensor.value = value; // TODO + return tensor; + }); + this.registerFunction('ops.prim.min', function(value) { + if (Array.isArray(value)) { + return Math.min.apply(null, value); + } + return Math.min.apply(null, arguments); + }); + this.registerFunction('ops.prim.max', function(value) { + if (Array.isArray(value)) { + return Math.max.apply(null, value); + } + return Math.max.apply(null, arguments); + }); + this.registerFunction('ops.prim.shape', function(tensor) { + return tensor && tensor.size ? tensor.size() : undefined; + }); + this.registerFunction('ops.quantized.conv_prepack', function(weight, bias, stride, padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.conv1d_prepack', function(weight, bias, stride, padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.conv2d_prepack', function(weight, bias, stride, padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.conv3d_prepack', function(weight, bias, stride, padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv3dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.conv_transpose1d_prepack', function(weight, bias, stride, padding, output_padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.output_padding = output_padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.conv_transpose2d_prepack', function(weight, bias, stride, padding, output_padding, dilation, groups) { + const params = self.invoke('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + params.stride = stride; + params.padding =padding; + params.output_padding = output_padding; + params.dilation = dilation; + params.groups = groups; + return params; + }); + this.registerFunction('ops.quantized.linear_prepack', function(weight, bias) { + const params = self.invoke('__torch__.torch.classes.quantized.LinearPackedParamsBase', []); + params.weight = weight; + params.bias = bias; + return params; + }); + this.registerFunction('ops.prim.RaiseException', function(message) { + throw new python.Error(message); + }); + this.registerFunction('builtins.range', function(start, stop, step) { + if (stop === undefined && step === undefined) { + if (Number.isInteger(start)) { + return Array(start).keys(); + } + if (isNaN(start)) { + return []; + } + } + throw new python.Error(`Unsupported range(${JSON.stringify(start)}, ${JSON.stringify(stop)}, ${JSON.stringify(step)})`); + }); + this.registerFunction('torch._C._nn.gelu', function() { + throw new python.Error("'torch._C._nn.gelu' not implemented."); + }); + this.registerFunction('torch._utils._rebuild_sparse_tensor', function(layout, data) { + if (layout === torch.sparse_coo) { + return self.invoke('torch._sparse_coo_tensor_unsafe', data); + } + throw new python.Error(`Unsupported sparse tensor layout '${layout ? layout.__str__() : ''}'.`); + }); + this.registerFunction('torch.from_numpy', function(obj) { + const dtypes = new Map([ + [ ' stride / obj.itemsize); + const storage = execution.invoke('torch.storage._TypedStorage', [ obj.size, dtype ]); + storage._set_cdata(obj.data); + const tensor = execution.invoke('torch.Tensor', []); + tensor.__setstate__([ storage, 0, obj.shape, strides ]); + return tensor; + }); + this.registerFunction('torch._utils._rebuild_device_tensor_from_numpy', function(data, dtype, device, requires_grad) { + const tensor = execution.invoke('torch.from_numpy', [ data ]); + // tensor = tensor.to(dtype, device) + tensor.requires_grad = requires_grad; + return tensor; + }); + this.registerFunction('torch._sparse_coo_tensor_unsafe', function(indices, values, size) { + const tensor = self.invoke('torch.Tensor', []); + tensor._layout = torch.sparse_coo; + tensor._indices = indices; + tensor._values = values; + tensor._shape = size; + return tensor; + }); + this.registerFunction('torch._utils._rebuild_tensor', function (storage, storage_offset, size, stride) { + if (Array.isArray(storage) && storage.length === 5 && storage[0] === 'storage') { + const [, storage_type, , ,size] = storage; + storage = new storage_type(size); + } + const name = `${storage.__class__.__module__}.${storage.__class__.__name__.replace('Storage', 'Tensor')}`; + const tensor = self.invoke(name, []); + tensor.__setstate__([ storage, storage_offset, size, stride ]); + return tensor; + }); + this.registerFunction('torch._utils._rebuild_tensor_v2', function (storage, storage_offset, size, stride, requires_grad, backward_hooks) { + const tensor = execution.invoke('torch._utils._rebuild_tensor', [ storage, storage_offset, size, stride ]); + tensor.requires_grad = requires_grad; + tensor.backward_hooks = backward_hooks; + return tensor; + }); + this.registerFunction('torch._utils._rebuild_parameter', function(data, requires_grad, backward_hooks) { + const param = self.invoke('torch.nn.parameter.Parameter', [ data, requires_grad ]); + param.backward_hooks = backward_hooks; + return param; + }); + this.registerFunction('torch._utils._rebuild_parameter_with_state', function(data, requires_grad, backward_hooks, state) { + const _set_obj_state = (obj, state) => { + const [dict_state, slots_state] = Array.isArray(state) ? state : [state, null]; + if (dict_state) { + for (const [k, v] of Object.entries(dict_state)) { + self.invoke('builtins.setattr', [ obj, k, v ]); + } + } + if (slots_state) { + for (const [k, v] of Object.entries(slots_state)) { + self.invoke('builtins.setattr', [ obj, k, v ]); + } + } + }; + const param = self.invoke('torch.nn.parameter.Parameter', [ data, requires_grad ]); + param._backward_hooks = backward_hooks; + _set_obj_state(param, state); + return param; + }); + this.registerFunction('torch._utils._rebuild_qtensor', function(storage, storage_offset, size, stride, quantizer_params, requires_grad, backward_hooks) { + const tensor = execution.invoke('torch._utils._rebuild_tensor_v2', [ storage, storage_offset, size, stride, requires_grad, backward_hooks ]); + tensor.quantizer_params = quantizer_params; + return tensor; + }); + this.registerFunction('torch._utils._set_obj_state', function(obj, state) { + let dict_state = state; + let slots_state = null; + if (state instanceof self.builtins.tuple) { + if (state.length != 2) { + throw new python.Error(`Invalid serialized state: '${state}'.`); + } + [dict_state, slots_state] = state; + } + if (dict_state) { + for (const [name, value] of Object.entries(dict_state)) { + execution.invoke('builtins.setattr', [ obj, name, value ]); + } + } + if (slots_state) { + for (const [name, value] of Object.entries(slots_state)) { + execution.invoke('builtins.setattr', [ obj, name, value ]); + } + } + return obj; + }); + this.registerFunction('torch._set_item', function(dict, key, value) { + dict[key] = value; + }); + this.registerFunction('torch._tensor._rebuild_from_type_v2', function(func, new_type, args, state) { + let ret = func.apply(null, args); + if (ret.__class__ !== new_type) { + // ret = ret.as_subclass(new_type); + } + const setstate = execution.invoke('builtins.getattr', [ ret.__class__, '__setstate__', torch.Tensor.__setstate__ ]); + if (setstate !== torch.Tensor.__setstate__) { + ret.__setstate__(state); + } else { + ret = execution.invoke('torch._utils._set_obj_state', [ ret, state ]); + } + return ret; + }); + this.registerFunction('torch.__and__', function(left, right) { + return left && right; + }); + this.registerFunction('torch.__contains__', function(dict, key) { + return dict[key] !== undefined; + }); + this.registerFunction('torch.__derive_index', function(index, start, step) { + return start + index * step; + }); + this.registerFunction('torch.__is__', function(left, right) { + if (left === null && right === null) { + return true; + } + if ((left !== null && right === null) || (left === null && right !== null)) { + return false; + } + throw new python.Error("Unsupported 'torch.__is__' expression type."); + }); + this.registerFunction('torch.__isnot__', function(left, right) { + if (left === null && right === null) { + return false; + } + if ((left !== null && right === null) || (left === null && right !== null)) { + return true; + } + throw new python.Error("Unsupported 'torch.__isnot__' expression type."); + }); + this.registerFunction('torch.__not__', function(value) { + if (typeof value === 'boolean') { + return !value; + } + throw new python.Error("Unsupported 'torch.__not__' expression type."); + }); + this.registerFunction('torch.__range_length', function(lo, hi, step) { + if (step === 0) { + throw new python.Error('range() arg 3 must not be zero'); + } + if (step > 0 && lo < hi) { + return 1 + (hi - 1 - lo) / step; + } else if (step < 0 && lo > hi) { + return 1 + (lo - 1 - hi) / (0 - step); + } + return 0; + }); + this.registerFunction('torch._unwrap_optional', function(value) { + return value; // TODO + }); + this.registerFunction('torch.empty_strided', function(/* size, stride, dtype, layout, device, pin_memory, requires_grad */) { + return null; + // TODO throw new python.Error("'torch.empty_strided' not implemented."); + }); + this.registerFunction('torch.add', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left * right; + } + if (Array.isArray(left) && Array.isArray(right)) { + return left.concat(right); + } + if (typeof left === 'string' && typeof right === 'string') { + return left + right; + } + throw new python.Error('Unsupported torch.add expression type.'); + }); + this.registerFunction('torch.append', function(list, value) { + list.push(value); + return value; + }); + this.registerFunction('torch.extend', function(list, value) { + list.push(...value); + }); + this.registerFunction('torch.insert', function(list, index, value) { + list.splice(index, 0, value); + return value; + }); + this.registerFunction('torch.clear', function(value) { + if (Object(value) === value) { + for (const key of Object.keys(value)) { + delete value[key]; + } + } + }); + this.registerFunction('torch.replace', function(value) { + return value; + }); + this.registerFunction('torch.dict', function(args) { + const obj = {}; + if (args) { + if (Array.isArray(args)) { + for (const [key, value] of args) { + obj[key] = value; + } + } else { + throw new python.Error("'torch.dict' arguments not supported."); + } + } + return obj; + }); + this.registerFunction('torch.dim', function(tensor) { + if (tensor && tensor.size) { + const size = tensor.size(); + if (size) { + return size.length; + } + } + return NaN; // TODO + }); + this.registerFunction('torch.numel', function(tensor) { + if (tensor && tensor.size) { + const size = tensor.size(); + if (size) { + return size.reduce((a, b) => a * b, 1); + } + } + return NaN; + }); + this.registerFunction('torch.eq', function(left, right) { + if (typeof left === 'string' && typeof right === 'string') { + return left === right; + } + if (typeof left === 'number' && typeof right === 'number') { + if (isNaN(left) && isNaN(right)) { + return true; + } + return left === right; + } + if (left === undefined || right === undefined) { + return true; + } + if (Array.isArray(left) && Array.isArray(right)) { + return left.length === right.length && left.every((item, index) => item === right[index]); + } + throw new python.Error("Unsupported 'torch.eq' expression type."); + }); + this.registerFunction('torch.floor', function(value) { + return Math.floor(value); + }); + this.registerFunction('torch.ceil', function(value) { + return Math.ceil(value); + }); + this.registerFunction('torch.floordiv', function(left, right) { + return Math.floor(left / right); + }); + this.registerFunction('torch.format', function() { + const args = Array.from(arguments); + const list = args.shift().split(/({}D?)/); + return list.map((text) => { + if (text === '{}' || text === '{}D') { + const arg = args.shift(); + return Array.isArray(arg) ? `[${arg.map((item) => item.toString()).join(', ')}]` : arg ? arg.toString() : '?'; + } + return text; + }).join(''); + }); + this.registerFunction('torch.gt', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + if (!isNaN(left) && !isNaN(right)) { + return left > right; + } + } + if (isNaN(left) && !isNaN(right)) { + return true; + } + throw new python.Error("Unsupported 'torch.gt' expression type."); + }); + this.registerFunction('torch.ge', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + if (!isNaN(left) && !isNaN(right)) { + return left > right; + } + } + if (isNaN(left) && !isNaN(right)) { + return true; + } + throw new python.Error("Unsupported 'torch.ge' expression type."); + }); + this.registerFunction('torch.is_floating_point', function(tensor) { + const type = tensor.dtype.scalar_type(); + return (type === 5 || type === 6 || type === 7); + }); + this.registerFunction('torch.is_grad_enabled', function() { + return false; + }); + this.registerFunction('torch.set_grad_enabled', function(/* value */) { + }); + this.registerFunction('torch.serialization._get_layout', function(name) { + const value = name.startsWith('torch.') ? torch[name.split('.')[1]] : null; + return value instanceof torch.layout ? value : null; + }); + this.registerFunction('torch.storage._load_from_bytes', function(b) { + return torch.load(b); + }); + this.registerFunction('torch.jit._pickle.build_boollist', function(data) { + return data; + }); + this.registerFunction('torch.jit._pickle.build_doublelist', function(data) { + return data; + }); + this.registerFunction('torch.jit._pickle.build_intlist', function(data) { + return data; + }); + this.registerFunction('torch.jit._pickle.build_tensorlist', function(data) { + return data; + }); + this.registerFunction('torch.jit._pickle.build_tensor_from_id', function(data) { + return self.builtins.CONSTANTS[`c${data}`]; + }); + this.registerFunction('torch.jit._pickle.restore_type_tag', function(value /*, type_str */) { + return value; + }); + this.registerFunction('torch.keys', function(dict) { + return Object.keys(dict); + }); + this.registerFunction('torch.len', function(value) { + if (Array.isArray(value)) { + return value.length; + } + if (value && value.shape && value.__len__) { + return value.__len__(); + } + return NaN; + }); + this.registerFunction('torch.le', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + if (isNaN(left) || isNaN(right)) { + return false; + } + return left <= right; + } + if (left === undefined || right === undefined) { + return true; + } + throw new python.Error("Unsupported 'torch.le' expression type."); + }); + this.registerFunction('torch.list', function(args) { + return args; + }); + this.registerFunction('torch.list_with_default', function(size /*, defaults */) { + return size; + }); + this.registerType('torch.PyTorchFileReader', class { + constructor(entries) { + let prefix = 0; + const paths = Array.from(entries.keys()).map((path) => path.replace(/\\/g, '/').split('/').reverse()); + for (let set = new Set(); set && paths.length > 0;) { + set = new Set(paths.map((path) => path.length > 1 ? path.pop() : null)); + set = set.size > 1 || set.keys().next().value === null ? null : set; + prefix += set ? set.keys().next().value.length + 1 : 0; + } + this._records = new Map(Array.from(entries).map(([name, value]) => [ name.substring(prefix), value ])); + this._version = '0'; + const stream = this.get_record('.data/version') || this.get_record('version') || null; + if (stream) { + const decoder = new TextDecoder('utf-8'); + const buffer = stream.peek(); + const text = decoder.decode(buffer); + this._version = text.split('\n').shift().trim(); + } + } + has_record(name) { + return this._records.has(name); + } + get_record(name) { + return this._records.get(name); + } + get_all_records() { + return Array.from(this._records.keys()); + } + version() { + return this._version; + } + }); + this.registerFunction('torch.load', function(f) { + const legacy_load = (entries) => { + const deserialized_objects = {}; + if (entries.has('storages')) { + const data = entries.get('storages'); + const unpickler = execution.invoke('pickle.Unpickler', [ data ]); + const num_storages = unpickler.load(); + for (let i = 0; i < num_storages; i++) { + const args = unpickler.load(); + const [key, , storage_type] = args; + const obj = storage_type._new_with_file(unpickler); + deserialized_objects[key] = obj; + } + /* + let storage_views = unpickler.load(); + for target_cdata, root_cdata, offset, size in storage_views: + root = deserialized_objects[root_cdata] + deserialized_objects[target_cdata] = root[offset:offset + size] + */ + } + if (entries.has('tensors')) { + const data = entries.get('tensors'); + const unpickler = execution.invoke('pickle.Unpickler', [ data ]); + const num_tensors = unpickler.load(); + const int32 = (unpickler) => { + const buffer = unpickler.read(4); + return buffer[0] + (buffer[1] << 8) + (buffer[2] << 16) + (buffer[3] << 24); + }; + const int64 = (unpickler) => { + const buffer = unpickler.read(8); + if (buffer[6] !== 0 && buffer[7] !== 0) { + throw new python.Error('Unsigned 64-bit value exceeds 32-bit range.'); + } + return buffer[0] + (buffer[1] << 8) + (buffer[2] << 16) + (buffer[3] << 24) + (buffer[4] * 4294967296) + (buffer[5] * 1099511627776); + }; + for (let i = 0; i < num_tensors; i++) { + const args = unpickler.load(); + const [key, storage_id] = args; + const storage = deserialized_objects[storage_id]; + const ndim = int32(unpickler); + unpickler.read(4); + const shape = Array.from(new Array(ndim)).map(() => int64(unpickler)); + const stride = Array.from(new Array(ndim)).map(() => int64(unpickler)); + const storage_offset = int64(unpickler); + const tensor = execution.invoke('torch._utils._rebuild_tensor', [ storage, storage_offset, shape, stride ]); + deserialized_objects[key] = tensor; + } + } + const data = entries.get('pickle'); + const unpickler = execution.invoke('pickle.Unpickler', [ data ]); + unpickler.persistent_load = (saved_id) => deserialized_objects[saved_id]; + return unpickler.load(); + }; + const _legacy_load = () => { + const unpickler = execution.invoke('pickle.Unpickler', [ f ]); + unpickler.load(); // magic_number + const protocol_version = unpickler.load(); + if (protocol_version != 1001) { + throw new python.Error(`Unsupported protocol version '${protocol_version}'.`); + } + const sys_info = unpickler.load(); + if (sys_info.protocol_version != 1001) { + throw new python.Error(`Unsupported protocol version '${sys_info.protocol_version}'.`); + } + if (sys_info.little_endian === false) { + throw new python.Error("Unsupported big-endian storage data."); + } + const module_source_map = new Map(); + const deserialized_objects = new Map(); + unpickler.persistent_load = (saved_id) => { + switch (saved_id[0]) { + case 'module': { + const [, module, ,source] = saved_id; + module_source_map.set(module, source); + return saved_id[1]; + } + case 'storage': { + const [, storage_type, key, , size, view_metadata] = saved_id; + if (!deserialized_objects.has(key)) { + const obj = new storage_type(size); + deserialized_objects.set(key, obj); + } + if (view_metadata) { + const view_key = view_metadata.shift(); + view_metadata.shift(); // view_offset + view_metadata.shift(); // view_size + if (!deserialized_objects.has(view_key)) { + const view = null; // storage.slice(view_offset, view_offset + view_size); + deserialized_objects.set(view_key, view); + } + return deserialized_objects.get(view_key); + } + return deserialized_objects.get(key); + } + default: { + throw new python.Error(`Unsupported persistent load type '${saved_id[0]}'.`); + } + } + }; + const obj = unpickler.load(); + const deserialized_storage_keys = unpickler.load(); + for (const deserialized_storage_key of deserialized_storage_keys) { + const storage = deserialized_objects.get(deserialized_storage_key); + storage._set_from_file(unpickler); + } + if (!obj) { + throw new python.Error('File format is not PyTorch.'); + } + if (obj === 'None') { + throw new python.Error("File contains 'None' root object."); + } + return obj; + }; + const _load = (entries) => { + if (f.has('constant.pkl')) { + throw python.Error("TorchScript 'torch.load' not supported."); + } + const loaded_storages = new Map(); + const persistent_load = (saved_id) => { + switch (saved_id[0]) { + case 'storage': { + const [, storage_type, key, , numel] = saved_id; + if (!loaded_storages.has(key)) { + const storage = new storage_type(numel); + const name = `data/${key}`; + const stream = entries.get(name); + storage._set_cdata(stream); + loaded_storages.set(key, storage); + } + return loaded_storages.get(key); + } + default: { + throw new python.Error(`Unsupported persistent load type '${saved_id[0]}'.`); + } + } + }; + const data_file = entries.get('data.pkl'); + const unpickler = execution.invoke('pickle.Unpickler', [ data_file ]); + unpickler.persistent_load = persistent_load; + const result = unpickler.load(); + return result; + }; + if (f instanceof Map) { + const reader = new torch.PyTorchFileReader(f); + const records = reader.get_all_records().map((name) => [ name, reader.get_record(name) ]); + f = new Map(records); + if (f.has('pickle')) { + return legacy_load(f); + } + if (f.has('data.pkl')) { + return _load(f); + } + throw new python.Error(`Unsupported 'torch.load' input '${JSON.stringify(Array.from(f.keys()))}'.`); + } + return _legacy_load(f); + }); + this.registerFunction('torch.log10', function(/* x */) { + throw new python.Error("'torch.log10' not implemented."); + }); + this.registerFunction('torch.lt', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left < right; + } + throw new python.Error("Unsupported 'torch.lt' expression type."); + }); + this.registerFunction('torch.mul', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left * right; + } + if (isNaN(left) || isNaN(right)) { + return NaN; + } + if (Array.isArray(left) && left.every((value) => typeof value === 'number') && typeof right === 'number') { + return left.map((value) => value * right); + } + throw new python.Error("Unsupported 'torch.mul' expression type."); + }); + this.registerFunction('torch.div', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left / right; + } + if (isNaN(left) || isNaN(right)) { + return NaN; + } + throw new python.Error("Unsupported 'torch.div' expression type."); + }); + this.registerFunction('torch.round', function(value) { + if (typeof value === 'number') { + return Math.round(value); + } + if (isNaN(value)) { + return value; + } + throw new python.Error("Unsupported 'torch.round' expression type."); + }); + this.registerFunction('torch.remainder', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left % right; + } + if (isNaN(left) || isNaN(right)) { + return NaN; + } + throw new python.Error("Unsupported 'torch.remainder' expression type."); + }); + this.registerFunction('torch.ne', function(left, right) { + if (typeof left === 'boolean' && typeof right === 'boolean') { + return left !== right; + } + if (typeof left === 'number' && typeof right === 'number') { + if (isNaN(left) || isNaN(right)) { + return false; + } + return left !== right; + } + if (Array.isArray(left) && Array.isArray(right) && left.length === right.length) { + return false; + } + if (typeof left === 'string' && typeof right === 'string') { + return left !== right; + } + if (left === undefined || right === undefined) { + return true; + } + throw new python.Error("Unsupported 'torch.ne' expression type."); + }); + this.registerFunction('torch.neg', function(value) { + if (typeof value === 'number') { + return -value; + } + throw new python.Error("Unsupported 'torch.neg' expression type."); + }); + this.registerFunction('torch.pow', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return Math.pow(left, right); + } + throw new python.Error("Unsupported 'torch.pow' expression type."); + }); + this.registerFunction('torch.q_scale', function(/* tensor */) { + return -1; // TODO + }); + this.registerFunction('torch.t', function(tensor) { + return tensor; + }); + this.registerFunction('torch.size', function(tensor, dim) { + if (tensor && tensor.size) { + const size = tensor.size(); + if (Array.isArray(size)) { + if (dim === undefined) { + return size; + } + if (Number.isInteger(dim)) { + if (dim >= 0 && dim < size.length) { + return size[dim]; + } + if (dim < 0 && -dim < size.length) { + return size[size.length + dim]; + } + } + throw new python.Error(`Dimension out of range (expected to be in range of ${JSON.stringify(size)}, but got ${JSON.stringify(dim)}).`); + } + } + if (Number.isInteger(dim)) { + return NaN; + } + return []; + }); + this.registerFunction('torch.sqrt', function(x) { + return Math.sqrt(x); + }); + this.registerFunction('torch.slice', function(l, start, end, step) { + if (!Array.isArray(l)) { + throw new python.Error('Slicing expected array'); + } + step = step || 1; + if (step !== 1) { + throw new python.Error('Slicing only supports step=1'); + } + start = Math.max(0, start >= 0 ? start : l.length + start); + end = Math.min(l.length, end || Number.MAX_SAFE_INTEGER); + return l.slice(start, end); + }); + this.registerFunction('torch.sub', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left - right; + } + throw new python.Error("Unsupported 'torch.sub' expression type."); + }); + this.registerFunction('torch.functional.einsum', function() { + throw new python.Error("'torch.functional.einsum' not implemented."); + }); + this.registerFunction('torch.functional.split', function() { + throw new python.Error("'torch.functional.split' not implemented."); + }); + this.registerFunction('torch.nn.functional.adaptive_avg_pool2d', function(/* input */) { + throw new python.Error("'torch.nn.functional.adaptive_avg_pool2d' not implemented."); + }); + this.registerFunction('torch.nn.functional.cross_entropy', function() { + throw new python.Error("'torch.nn.functional.cross_entropy' not implemented."); + }); + this.registerFunction('torch.nn.functional.elu', function(/* input */) { + throw new python.Error("'torch.nn.functional.elu' not implemented."); + }); + this.registerFunction('torch.nn.functional.gelu', function(/* input */) { + throw new python.Error("'torch.nn.functional.gelu' not implemented."); + }); + this.registerFunction('torch.nn.functional.hardsigmoid', function(/* input */) { + throw new python.Error("'torch.nn.functional.hardsigmoid' not implemented."); + }); + this.registerFunction('torch.nn.functional.hardswish', function(/* input */) { + throw new python.Error("'torch.nn.functional.hardswish' not implemented."); + }); + this.registerFunction('torch.nn.functional.hardtanh', function(/* input */) { + throw new python.Error("'torch.nn.functional.hardtanh' not implemented."); + }); + this.registerFunction('torch.nn.functional.interpolate', function(/* input */) { + throw new python.Error("'torch.nn.functional.interpolate' not implemented."); + }); + this.registerFunction('torch.nn.functional.leaky_relu', function(/* input */) { + throw new python.Error("'torch.nn.functional.leaky_relu' not implemented."); + }); + this.registerFunction('torch.nn.functional.linear', function(/* input */) { + throw new python.Error("'torch.nn.functional.linear' not implemented."); + }); + this.registerFunction('torch.nn.functional._max_pool2d', function(/* input */) { + throw new python.Error("'torch.nn.functional._max_pool2d' not implemented."); + }); + this.registerFunction('torch.nn.functional.max_pool2d_with_indices', function(/* input */) { + throw new python.Error("'torch.nn.functional.max_pool2d_with_indices' not implemented."); + }); + this.registerFunction('torch.nn.functional.mse_loss', function(/* input */) { + throw new python.Error("'torch.nn.functional.mse_loss' not implemented."); + }); + this.registerFunction('torch.nn.functional.pad', function(/* input */) { + throw new python.Error("'torch.nn.functional.pad' not implemented."); + }); + this.registerFunction('torch.nn.functional.relu', function(/* input */) { + throw new python.Error("'torch.nn.functional.relu' not implemented."); + }); + this.registerFunction('torch.nn.functional.relu6', function(/* input */) { + throw new python.Error("'torch.nn.functional.relu6' not implemented."); + }); + this.registerFunction('torch.nn.functional.sigmoid', function(/* input */) { + throw new python.Error("'torch.nn.functional.sigmoid' not implemented."); + }); + this.registerFunction('torch.nn.functional.silu', function(/* input */) { + throw new python.Error("'torch.nn.functional.silu' not implemented."); + }); + this.registerFunction('torch.nn.functional.softmax', function(/* input */) { + throw new python.Error("'torch.nn.functional.softmax' not implemented."); + }); + this.registerFunction('torch.nn.functional.tanh', function(/* input */) { + throw new python.Error("'torch.nn.functional.tanh' not implemented."); + }); + this.registerFunction('torch.values', function(dict) { + return Object.values(dict); + }); + this.registerFunction('torch.warn', function() { + }); + this.registerType('torch._ops.OperatorBase', class {}); + this.registerType('torch._ops.HigherOrderOperator', class extends torch._ops.OperatorBase {}); + this.registerType('torch._ops.OpOverload', class extends torch._ops.OperatorBase {}); + this.registerType('torch.export.unflatten.UnflattenedModule', class extends torch.nn.modules.module.Module { + constructor(/* export_module, flat_args_adapter */) { + super(); + } + }); + this.registerType('torch.export.exported_program.ExportedProgram', class { + constructor(/* root, graph, graph_signature, state_dict, range_constraints, module_call_graph, example_inputs, verifier, tensor_constants */) { + } + }); + this.registerFunction('torch.export.unflatten', function(/* module, flat_args_adapter */) { + throw new python.Error("'torch.export.unflatten' not implemented."); + }); + this.registerFunction('torch._export.exported_program._create_graph_module_for_export', function(root, graph) { + return new torch.fx.graph_module.GraphModule(root, graph); + }); + this.registerType('torch._export.serde.serialize.SerializedArtifact', class { + constructor(exported_program, state_dict, constants) { + this.exported_program = exported_program; + this.state_dict = state_dict; + this.constants = constants; + } + }); + this.registerType('torch.fx.experimental.symbolic_shapes.ShapeEnv', class { + constructor() { + } + create_symintnode(/* sym, hint, source */) { + return new torch.SymInt(); + } + }); + this.registerFunction('torch.fx.graph_module._deserialize_graph_module', function(/* forward, body */) { + return execution.invoke('torch.fx.graph_module.GraphModule', []); + }); + this.registerFunction('torch.fx.graph_module._forward_from_src', function(src, globals /*, co_fields */) { + globals = Object.assign({}, globals); + const context = new python.Execution.Context(globals, null); + execution.exec(src, context); + const forward_fn = globals.forward; + delete globals.forward; + return forward_fn; + }); + this.registerFunction('torch.fx.graph_module.reduce_graph_module', function(body, import_block) { + // https://github.com/pytorch/pytorch/blob/master/torch/fx/graph_module.py + const fn_src = body._code || body.code; + const forward = execution.invoke('torch.fx.graph_module._forward_from_src', [ import_block + fn_src, {} ]); + return execution.invoke('torch.fx.graph_module._deserialize_graph_module', [ forward, body ]); + }); + this.registerFunction('torch.fx.graph_module.reduce_package_graph_module', function(importer, body, generated_module_name) { + const forward = importer.import_module(generated_module_name).forward; + return execution.invoke('torch.fx.graph_module._deserialize_graph_module', [ forward, body ]); + }); + this.registerType('torch.fx.graph.CodeGen', class {}); + this.registerType('torch.fx.graph._Namespace', class { + constructor() { + this._obj_to_name = new Map(); + this._unassociated_names = new Set(); + this._used_names = new Set(); + this._base_count = {}; + } + create_name(candidate, obj) { + if (obj && this._obj_to_name.has(obj)) { + return self._obj_to_name.get(obj); + } + candidate = candidate || '_unnamed'; + candidate = /^\d+$/.test(candidate) ? `_${candidate}` : candidate; + candidate = candidate.replace(/[^0-9a-zA-Z_]+/, '_'); + const match = candidate.match(/(.*)_(\d+)$"/); + let base = candidate; + let num = null; + if (match) { + [, base] = match; + num = parseInt(match[2], 10); + } + candidate = num ? `${base}_${num}` : base; + if (!num) { + num = this._base_count[base] || 0; + } + while (this._used_names.has(candidate) || this._is_illegal_name(candidate, obj)) { + num += 1; + candidate = `${base}_${num}`; + } + this._used_names.add(candidate); + this._base_count[base] = num; + if (obj) { + this._obj_to_name[obj] = candidate; + } else { + this._unassociated_names.add(candidate); + } + return candidate; + } + _is_illegal_name(/* name, obj */) { + /* + if name in keyword.kwlist: + return True + if name in builtins.__dict__: + return obj is not builtins.__dict__[name] + if name in _custom_builtins: + return obj is not _custom_builtins[name].obj + */ + return false; + } + associate_name_with_obj() { + + } + }); + this.registerType('torch.fx.node.Node', class { + constructor(graph, name, op, target, args, kwargs, return_type) { + this.graph = graph; + this.name = name; + this.op = op; + this.target = target; + this._input_nodes = new Map(); + this.users = new Map(); + this.type = return_type; + this._prev = this; + this._next = this; + this._erased = false; + this._repr_fn = null; + this.meta = {}; + } + prepend(x) { + x._remove_from_list(); + const p = this._prev; + p._next = x; + x._prev = p; + x._next = this; + this._prev = x; + } + _remove_from_list() { + const p = this._prev; + const n = this._next; + p._next = n; + n._prev = p; + } + }); + torch.fx.Node = torch.fx.node.Node; + this.registerType('torch.fx.graph.Graph', class { + constructor() { + this._root = new torch.fx.node.Node(self, '', 'root', '', [], {}); + this._used_names = new Map(); + this._len = 0; + this._graph_namespace = new torch.fx.graph._Namespace(); + // this._owning_module = owning_module + // this._tracer_cls = tracer_cls + // this._tracer_extras = tracer_extras + // this._codegen = CodeGen() + // this._co_fields = {} + } + placeholder(name, type_expr /*, default_value */) { + const args = []; // () if default_value is inspect.Signature.empty else (default_value,) + return this.create_node('placeholder', name, args, type_expr); + } + create_node(op, target, args, kwargs, name, type_expr) { + args = args || []; + kwargs = kwargs || {}; + const candidate = name || this._target_to_str(target); + name = this._graph_namespace.create_name(candidate, null); + const n = new torch.fx.Node(this, name, op, target, args, kwargs, type_expr); + this._graph_namespace.associate_name_with_obj(name, n); + this._insert(n); + this._len += 1; + return n; + } + _insert(n) { + this._root.prepend(n); + } + _target_to_str(target) { + if (typeof target === 'string') { + if (target.startsWith('__') && target.endswith('__')) { + target = target.substring(2, target.length - 2); + } + } else { + target = target.__name__; + } + return this._snake_case(target); + } + _snake_case(s) { + const chars = []; + let prev_lower = false; + for (const c of s) { + const x = c.toLowerCase(); + if (prev_lower && x !== c) { + chars.push('_'); + } else { + prev_lower = true; + } + chars.push(x); + } + return chars.join(''); + } + }); + torch.fx.Graph = torch.fx.graph.Graph; + this.registerType('torch.fx.graph_module.GraphModule', class extends torch.nn.modules.module.Module { + constructor(root, graph) { + super(); + this.graph = graph; + } + }); + this.registerFunction('torch.fx._symbolic_trace.wrap', function(fn_or_name) { + return fn_or_name; + }); + this.registerType('torch.fx._symbolic_trace.Tracer', class {}); + this.registerFunction('torch._export.load', function(f, expected_opset_version) { + const serialized_exported_program = f.get('serialized_exported_program.json'); + const serialized_state_dict = f.get('serialized_state_dict.pt'); + const serialized_constants = f.get('serialized_constants.pt'); + const artifact = new torch._export.serde.serialize.SerializedArtifact(serialized_exported_program, serialized_state_dict, serialized_constants); + return torch._export.serde.serialize.deserialize(artifact, expected_opset_version); + }); + this.registerFunction('torch._export.serde.serialize._dict_to_dataclass', function(cls, data) { + if (data === null) { + return data; + } + if (data.$type) { + const res = {}; + res[data.$type] = data.$value; + return res; + } + if (Array.isArray(data)) { + for (let i = 0; i < data.length; i++) { + data[i] = torch._export.serde.serialize._dict_to_dataclass(null, data[i]); + } + return data; + } + if (data === Object(data)) { + for (const key of Object.keys(data)) { + data[key] = torch._export.serde.serialize._dict_to_dataclass(null, data[key]); + } + return data; + } + return data; + }); + this.registerFunction('torch._export.serde.serialize.deserialize', function(artifact, expected_opset_version) { + artifact.exported_program = torch._export.serde.serialize._dict_to_dataclass(null, artifact.exported_program); + return new torch._export.serde.serialize.ExportedProgramDeserializer(expected_opset_version).deserialize(artifact); + }); + this.registerType('torch._export.serde.serialize.ExportedProgramDeserializer', class { + constructor(expected_opset_version) { + this.expected_opset_version = expected_opset_version; + } + deserialize(serialized_artifact) { + const symbol_name_to_range = new Map(Object.entries(serialized_artifact.exported_program.range_constraints)); + /* + symbol_name_to_range = { + k: symbolic_shapes.ValueRanges(_int_to_sympy_int(v.min_val), _int_to_sympy_int(v.max_val)) + for k, v in serialized_artifact.exported_program.range_constraints.items() + } + */ + const constants = serialized_artifact.constants ? torch.load(serialized_artifact.constants) : null; + const tensor_constants = constants ? new Map(Object.entries(constants).filter(([, tensor]) => tensor instanceof torch.Tensor)) : null; + const deserializer = new torch._export.serde.serialize.GraphModuleDeserializer(); + const res = deserializer.deserialize(serialized_artifact.exported_program.graph_module, symbol_name_to_range, constants); + const range_constraints = null; + /* + range_constraints = self.deserialize_range_constraints( + symbol_name_to_range, res.names_to_symbols, + ) + model_opset_version: Optional[Dict[str, int]] = serialized_artifact.exported_program.opset_version + self._validate_model_opset_version(model_opset_version) + upgrader = GraphModuleOpUpgrader(self.expected_opset_version, model_opset_version) + */ + const state_dict = serialized_artifact.state_dict ? torch.load(serialized_artifact.state_dict) : null; + const exported_program = new torch.export.exported_program.ExportedProgram( + res.graph_module, res.graph_module.graph, res.signature, + state_dict, range_constraints, res.module_call_graph, null, + null, // verifier=load_verifier(serialized_artifact.exported_program.dialect), + tensor_constants); + return exported_program; + // return upgrader.upgrade(exported_program) + } + }); + this.registerType('torch._export.serde.serialize.GraphModuleDeserializer', class { + constructor() { + this.serialized_name_to_node = new Map(); + this.serialized_name_to_meta = new Map(); + this.graph = new torch.fx.Graph(); + this.module = new torch.nn.Module(); + this._SYM_INT_OPS = new Set([ + operator.mul, operator.add, operator.sub, operator.floordiv, operator.mod, + torch.sym_sqrt, torch.sym_int, torch.sym_ite, torch.sym_max, torch.sym_min, torch.sym_sqrt + ]); + this._SYM_BOOL_OPS = new Set([ + operator.eq, operator.ne, operator.le, operator.ge, operator.lt, operator.gt, + torch.sym_not + ]); + } + deserialize_graph_output(/* output */) { + /* TODO + if (output.type == 'as_tensor') { + return self.serialized_name_to_node[output.as_tensor.name] + } + else if (output.type == 'as_sym_int') { + return self.serialized_name_to_node[output.as_sym_int.as_name] + } + elif output.type == 'as_sym_bool': + return self.serialized_name_to_node[output.as_sym_bool.as_name] + else: + raise SerializeError(f'Unable to deserialize output node {output}') + */ + } + deserialize_graph(serialized_graph) { + if (serialized_graph.constants) { + this.constants = new Map(Object.entries(serialized_graph.constants).map(([k, v]) => [ k, torch.load(v) ])); + } + for (const [name, tensor_value] of Object.entries(serialized_graph.tensor_values)) { + const meta_val = this.deserialize_tensor_meta(tensor_value.meta || tensor_value, this.fake_tensor_mode); + this.serialized_name_to_meta.set(name, meta_val); + } + for (const [name, sym_int_value] of Object.entries(serialized_graph.sym_int_values)) { + this.serialized_name_to_meta.set(name, this.deserialize_sym_int(sym_int_value)); + } + for (const [name, sym_bool_value] in Object.entries(serialized_graph.sym_bool_values)) { + this.serialized_name_to_meta.set(name, this.deserialize_sym_bool(sym_bool_value)); + } + for (const input of serialized_graph.inputs) { + const placeholder_node = this.graph.placeholder(input.as_tensor.name); + this.sync_fx_node(input.as_tensor.name, placeholder_node); + } + for (const serialized_node of serialized_graph.nodes) { + const target = this.deserialize_operator(serialized_node.target); + this.deserialize_node(serialized_node, target); + } + const outputs = []; + for (const output of serialized_graph.outputs) { + outputs.push(this.deserialize_graph_output(output)); + } + } + deserialize_operator(serialized_target) { + let target = null; + if (serialized_target.startsWith('_operator')) { + target = operator; + } else if (serialized_target.startsWith('torch')) { + target = torch; + } else { + return serialized_target; + } + const serialized_target_names = serialized_target.split('.').reverse(); + serialized_target_names.pop(); + for (const name of serialized_target_names) { + target = target[name]; + if (!target) { + return serialized_target; + } + } + return target; + } + deserialize_node(serialized_node, target) { + let fx_node = null; + if (this._SYM_BOOL_OPS.has(target) || this._SYM_INT_OPS.has(target)) { + /* + const name = serialized_node.outputs[0].value.as_name; + const args = self.deserialize_sym_op_inputs(serialized_node.inputs); + fx_node = self.graph.create_node("call_function", target, args, {}, name); + self.deserialize_sym_op_outputs(serialized_node, fx_node); + */ + } else if (builtins.isinstance(target, torch._ops.HigherOrderOperator)) { + // assert(len(serialized_node.outputs) == 1 && serialized_node.outputs[0].type in ('as_tensors', 'as_tensor')), 'Only single tensor output or list of tensor output is supported for higher order operators.') + const [output] = serialized_node.outputs; + const name = output.type == 'as_tensor' ? output.value.name : null; + const args = serialized_node.inputs.map((input) => this.deserialize_input(input.arg)); + fx_node = this.graph.create_node('call_function', target, args, {}, name); + if (output.as_tensor !== null) { + this.sync_fx_node(name, fx_node); + } + if (output.as_tensors !== null) { + this.deserialize_multiple_outputs(serialized_node, fx_node); + } + } else if (builtins.isinstance(target, torch._ops.OpOverload)) { + const name = this._is_single_tensor_return(target) ? serialized_node.outputs[0].as_tensor.name : null; + const [args, kwargs] = this.deserialize_inputs(target, serialized_node); + fx_node = self.graph.create_node('call_function', target, args, kwargs, name); + this.deserialize_outputs(serialized_node, fx_node); + } else { + // TODO + // throw new python.Error(`Unsupported target type '${target}'.`); + } + fx_node && Object.assign(fx_node.meta, this.deserialize_metadata(serialized_node.metadata)); + } + deserialize(serialized_graph_module, symbol_name_to_range, constants) { + this.shape_env = new torch.fx.experimental.symbolic_shapes.ShapeEnv(/* assume_static_by_default = True */); + /* + this.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=False, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + */ + this.symbol_name_to_symbol = new Map(); + this.symbol_name_to_range = symbol_name_to_range || new Map(); + this.constants = constants || new Map(); + this.deserialize_graph(serialized_graph_module.graph); + const sig = null; // self.deserialize_signature(serialized_graph_module.signature) + const module_call_graph = null; // self.deserialize_module_call_graph(serialized_graph_module.module_call_graph) + return { + graph_module: torch._export.exported_program._create_graph_module_for_export(this.module, this.graph), + signature: sig, + module_call_graph: module_call_graph, + names_to_symbols: this.symbol_name_to_symbol + }; + } + sync_fx_node(name, fx_node) { + if (this.serialized_name_to_node.has(name)) { + throw new python.Error(`Node ${name} has already been deserialized before.`); + } + this.serialized_name_to_node.set(name, fx_node); + fx_node.meta['val'] = this.serialized_name_to_meta.get(name); + } + deserialize_sym_op_inputs(inputs) { + return inputs.map((input) => this.deserialize_input(input.arg)); + } + deserialize_inputs(target /* , serialized_node */) { + const schema_args = target._schema.arguments; + const actual_args = null; + /* + actual_args = { + input.name: self.deserialize_input(input.arg) for input in serialized_node.inputs + } + */ + const args = []; + const kwargs = {}; + for (const schema_arg of schema_args) { + const is_positional = !schema_arg.has_default_value() && !schema_arg.kwarg_only; + if (is_positional) { + args.push(actual_args[schema_arg.name]); + } else if (schema_arg.name in actual_args) { + kwargs[schema_arg.name] = actual_args[schema_arg.name]; + } + } + return [ args, kwargs ]; + } + deserialize_input(/* inp */) { + /* + value = inp.value + typ_ = inp.type + if typ_ == 'as_none': + # None should converted as None, but is encoded as bool in serialized + # Convert serialized object to torch equivalent + return None + elif typ_ == 'as_tensor': + return self.serialized_name_to_node[inp.as_tensor.name] + elif typ_ == 'as_scalar_type': + return _SERIALIZE_TO_TORCH_DTYPE[inp.as_scalar_type] + elif typ_ == 'as_memory_format': + return _SERIALIZE_TO_TORCH_MEMORY_FORMAT[inp.as_memory_format] + elif typ_ == 'as_layout': + return _SERIALIZE_TO_TORCH_LAYOUT[inp.as_layout] + elif typ_ == 'as_graph': + assert isinstance(value, GraphArgument) + with self.save_graph_module(): + self.deserialize_graph(value.graph) + submodule = torch._export.exported_program._create_graph_module_for_export(self.module, self.graph) + self.module.register_module(value.name, submodule) + return self.graph.create_node( + 'get_attr', + value.name, + name=value.name, + ) + elif typ_ == 'as_device': + return deserialize_device(inp.as_device) + elif typ_ == 'as_int': + return inp.as_int + elif typ_ == 'as_float': + return inp.as_float + elif typ_ == 'as_bool': + return inp.as_bool + elif typ_ == 'as_string': + return inp.as_string + elif typ_ == 'as_sym_int': + return self.deserialize_sym_argument(inp.as_sym_int) + elif typ_ == 'as_sym_bool': + return self.deserialize_sym_argument(inp.as_sym_bool) + elif isinstance(value, list): + if len(value) == 0: + return [] + elif isinstance(value[0], TensorArgument): + result = [] + for arg in value: + result.append(self.serialized_name_to_node[arg.name]) + return result + elif isinstance(value[0], (int, float, bool)): + # convert from serialized.python.types.List to python list + return list(value) + elif isinstance(value[0], (SymIntArgument, SymBoolArgument)): + return [self.deserialize_sym_argument(arg) for arg in value] + elif isinstance(value[0], OptionalTensorArgument): + def deserialize_optional_tensor_args(a): + if a.type == 'as_none': + return None + elif a.type == 'as_tensor': + return self.serialized_name_to_node[a.value] + else: + raise SerializeError(f'Unhandled argument {inp}') + return list(map(deserialize_optional_tensor_args, value)) + else: + raise SerializeError(f'Unhandled argument {inp}') + elif typ_ == 'as_custom_obj': + return self.constants[inp.as_custom_obj.name] + else { + raise SerializeError(`Unhandled argument ${inp}.`); + } + */ + } + deserialize_metadata(metadata) { + const ret = {}; + const stack_trace = metadata['stack_trace']; + if (stack_trace) { + ret['stack_trace'] = stack_trace; + } + const deserialize_meta_func = (serialized_target) => { + let module = null; + let serialized_target_names = []; + if (serialized_target.startsWith('torch.nn')) { + module = torch.nn; + serialized_target_names = serialized_target.split('.').slice(1); + } else if (serialized_target.startsWith('torch')) { + module = torch; + serialized_target_names = serialized_target.split('.').slice(1); + } else { + return this.deserialize_operator(serialized_target); + } + let target = module; + for (const name of serialized_target_names) { + if (!builtins.hasattr(target, name)) { + return serialized_target; + } + target = builtins.getattr(target, name); + } + return target; + }; + const nn_module_stack_str = metadata['nn_module_stack']; + if (nn_module_stack_str) { + const import_nn_module_stack = (key, path, ty) => { + return [ key, [ path, ty ] ]; + }; + const nn_module_stack = new Map(nn_module_stack_str.split(';').map((item) => import_nn_module_stack(...item.split(',')))); + ret['nn_module_stack'] = nn_module_stack; + } + const source_fn_st_str = metadata['source_fn_stack']; + if (source_fn_st_str) { + const source_fn_st = []; + for (const source_fn_str of source_fn_st_str.split(';')) { + const [name, target_str] = source_fn_str.split(','); + source_fn_st.push([ name, deserialize_meta_func(target_str) ]); + } + ret['source_fn_stack'] = source_fn_st; + } + return ret; + } + deserialize_tensor_meta(tensor_meta) { + const sizes = tensor_meta.sizes.map((val) => this.deserialize_sym_int(val)); + const strides = tensor_meta.strides.map((val) => this.deserialize_sym_int(val)); + const device = this.deserialize_device(tensor_meta.device); + const dtype = null; // TODO _SERIALIZE_TO_TORCH_DTYPE[tensor_meta.dtype], + return torch.empty_strided(sizes, strides, device, dtype); + } + deserialize_sym_int(s) { + if (s.as_expr !== undefined && s.as_expr !== null) { + let sym; + if (this.symbol_name_to_symbol.has(s.as_expr.expr_str)) { + sym = this.symbol_name_to_symbol.get(s.as_expr.expr_str); + } else { + sym = {}; + /* TODO + sym = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + if isinstance(sym, sympy.Symbol) { + self.symbol_name_to_symbol[val.expr_str] = sym + if vr := self.symbol_name_to_range.get(val.expr_str): + symbolic_shapes._constrain_symbol_range( + self.shape_env, + sym, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + runtime_min=vr.lower, # type: ignore[arg-type] + runtime_max=vr.upper # type: ignore[arg-type] + ) + } + */ + } + const hint = s.as_expr.hint || null; + return this.shape_env.create_symintnode(sym, hint); + } else if (s.as_int !== undefined && s.as_int !== null) { + return s.as_int; + } + throw new python.Error('SymInt has invalid field type.'); + } + deserialize_device(d) { + if (d.index !== undefined) { + return new torch.device(d.type, d.index); + } + return new torch.device(d.type); + } + }); + this.registerFunction('torch_utils.persistence._reconstruct_persistent_obj', function(meta) { + const name = `_imported_module_${Math.floor(Math.random() * 10000)}`; + const module = execution.invoke('types.ModuleType', [ name ]); + execution.register('sys').modules.set(name, module); + const context = new python.Execution.Context(module, null); + execution.exec(meta.module_src, context); + const obj = execution.invoke(`${name}.${meta.class_name}`, []); + if (meta.state) { + if (obj.__setstate__) { + obj.__setstate__(meta.state); + } else { + Object.assign(obj, meta.state); + } + } + return obj; + }); + this.registerFunction('torch_utils.misc.assert_shape', function(/* tensor, ref_shape */) {}); + this.registerFunction('torch_utils.ops.conv2d_resample.conv2d_resample', function(/* x, w, f, up, down, padding, groups, flip_weight, flip_filter */) {}); + this.registerFunction('torch_utils.ops.upfirdn2d.setup_filter', function(/* x, f, up, down, padding, flip_filter, gain, impl */) {}); + this.registerFunction('torch_utils.ops.bias_act', function(/* x, b, dim, act, alpha, gain, clamp, impl */) {}); + this.registerFunction('torch_utils.ops.fma.fma', function(/* a, b, c */) {}); + this.registerType('torch.device', class { + constructor(type, index) { + this.type = type; + if (index) { + this.index = index; + } + } + }); + this.registerType('torch.dtype', class { + constructor(scalar_type, name, itemsize) { + this._scalar_type = scalar_type; + this._name = name; + this._itemsize = itemsize; + } + scalar_type() { + return this._scalar_type; + } + itemsize() { + return this._itemsize; + } + __reduce__() { + return this._name; + } + __str__() { + return `torch.${this._name}`; + } + toString() { + return this.__str__(); + } + }); + this.registerType('torch.layout', class { + constructor(name) { + this._name = name; + } + __str__() { + return this._name; + } + toString() { + return this.__str__(); + } + }); + this.registerType('torch.qscheme', class { + constructor(name) { + this._name = name; + } + __str__() { + return this._name; + } + toString() { + return this.__str__(); + } + }); + this.registerType('torch.utils.hooks.RemovableHandle', class { + __setstate__(state) { + [this.hooks_dict_ref, this.id] = state; + this.hooks_dict_ref = this.hooks_dict_ref || new Map(); + } + }); + this.registerType('torch.storage._StorageBase', class { + constructor(size, dtype) { + this._size = size; + this._dtype = dtype; + this._device = null; + } + get device() { + return this._device; + } + get dtype() { + return this._dtype; + } + element_size() { + return this._dtype.element_size; + } + size() { + return this._size; + } + get data() { + return this._cdata; + } + _set_cdata(data) { + const length = this.size() * this.dtype.itemsize(); + if (length !== data.length) { + throw new python.Error('Storage data size mismatch.'); + } + this._cdata = data; + } + _set_from_file(unpickler) { + const buffer = unpickler.read(8); + const size = buffer.reverse().reduce((a, b) => (a*256)+b, 0); + if (size !== this.size()) { + throw new python.Error('Storage size mismatch.'); + } + const itemsize = this.dtype.itemsize(); + const data = unpickler.stream(itemsize * size); + this._set_cdata(data); + } + static _new_with_file(unpickler) { + const buffer = unpickler.read(8); + const size = buffer.reverse().reduce((a, b) => (a * 256) + b, 0); + const storage = new this(size); + const itemsize = storage.dtype.itemsize(); + const data = unpickler.stream(itemsize * size); + storage._set_cdata(data); + return storage; + } + }); + this.registerType('torch.storage._UntypedStorage', class extends torch.storage._StorageBase { + constructor() { + super(); + throw new python.Error('_UntypedStorage not implemented.'); + } + }); + this.registerType('torch.storage._TypedStorage', class { + constructor() { + if (arguments.length >= 2 && Number.isInteger(arguments[0]) && arguments[1] instanceof torch.dtype) { + if (arguments[3] instanceof torch.device) { + [this._size, this._dtype, , this._device] = arguments; + } else { + [this._size, this._dtype] = arguments; + } + } else { + throw new python.Error(`Unsupported _TypedStorage arguments '${JSON.stringify(arguments)}'.`); + } + } + get device() { + return this._device; + } + get dtype() { + return this._dtype; + } + element_size() { + return this._dtype.element_size; + } + size() { + return this._size; + } + get data() { + return this._cdata; + } + _set_cdata(data) { + const length = this.size() * this.dtype.itemsize(); + if (length !== data.length) { + throw new python.Error('Storage data size mismatch.'); + } + this._cdata = data; + } + _set_from_file(unpickler) { + const buffer = unpickler.read(8); + const size = buffer.reverse().reduce((a, b) => (a*256)+b, 0); + if (size !== this.size()) { + throw new python.Error('Storage size mismatch.'); + } + const itemsize = this.dtype.itemsize(); + const data = unpickler.stream(itemsize * size); + this._set_cdata(data); + } + static _new_with_file(unpickler) { + const buffer = unpickler.read(8); + const size = buffer.reverse().reduce((a, b) => (a*256)+b, 0); + const storage = new this(size); + const itemsize = storage.dtype.itemsize(); + const data = unpickler.stream(itemsize * size); + storage._set_cdata(data); + return storage; + } + }); + this.registerType('torch.storage._LegacyStorage', class extends torch.storage._TypedStorage { + constructor() { + super(); + throw new python.Error('_LegacyStorage not implemented.'); + } + }); + this.registerType('torch.BoolStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.bool); + } + }); + this.registerType('torch.ByteStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.uint8); + } + }); + this.registerType('torch.CharStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.int8); + } + }); + this.registerType('torch.ShortStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.int16); + } + }); + this.registerType('torch.IntStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.int32); + } + }); + this.registerType('torch.LongStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.int64); + } + }); + this.registerType('torch.HalfStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.float16); + } + }); + this.registerType('torch.FloatStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.float32); + } + }); + this.registerType('torch.DoubleStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.float64); + } + }); + this.registerType('torch.ComplexHalfStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.complex32); + } + }); + this.registerType('torch.ComplexFloatStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.complex64); + } + }); + this.registerType('torch.ComplexDoubleStorage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.complex128); + } + }); + this.registerType('torch.QInt8Storage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.qint8); + } + }); + this.registerType('torch.QUInt8Storage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.quint8); + } + }); + this.registerType('torch.QInt32Storage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.qint32); + } + }); + this.registerType('torch.BFloat16Storage', class extends torch.storage._StorageBase { + constructor(size) { + super(size, torch.bfloat16); + } + }); + this.registerType('torch.Size', class extends Array { + constructor(size) { + super(size.length); + for (let i = 0; i < size.length; i++) { + this[i] = size[i]; + } + } + __len__() { + return this.length; + } + }); + this.registerType('torch.Tensor', class { + constructor() { + this._layout = torch.strided; + } + get device() { + return this.storage().device; + } + get dtype() { + if (this._layout === torch.sparse_coo) { + return this._values.dtype(); + } + return this.storage().dtype; + } + get shape() { + return this._shape; + } + get layout() { + return this._layout; + } + get values() { + if (this._layout === torch.sparse_coo) { + return this._values; + } + throw new python.Error(`Unsupported values in layout'${this._layout.__str__()}'.`); + } + get indices() { + if (this._layout === torch.sparse_coo) { + return this._indices; + } + throw new python.Error(`Unsupported indices in layout'${this._indices.__str__()}'.`); + } + get is_quantized() { + return this.__quantized__ === true; + } + get is_nested() { + return this.__nested__ === true; + } + get is_sparse() { + return this.layout !== torch.strided; + } + size() { + return this._shape; + } + storage() { + if (!this._storage) { + const name = this.__class__.__name__ == 'Tensor' ? 'FloatStorage' : this.__storage__.__name__.replace('Tensor', 'Storage'); + this._storage = self.invoke(`${this.__class__.__module__}.${name}`, []); + } + return this._storage; + } + storage_offset() { + return this._storage_offset; + } + stride() { + return this._stride; + } + resize_(shape) { + this._shape = shape; + } + __len__() { + return this._shape[0]; + } + __setstate__(state) { + switch (state.length) { + case 3: + break; + case 4: + [this._storage, this._storage_offset, this._shape, this._stride] = state; + break; + case 5: + [this.data, ,this._backward_hooks, this.requires_grad] = state; + break; + default: + throw new python.Error(`Unsupported tensor state length '${state.length}'.`); + } + } + __bool__() { + return true; + } + __int__() { + const storage = this.storage(); + if (storage && storage.dtype.__reduce__() === 'int64' && storage.data.length === 8) { + const buffer = storage.data.peek ? storage.data.peek() : storage.data; + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + return view.getInt64(0, true); + } + return NaN; + } + __float__() { + const storage = this.storage(); + if (storage && storage.dtype.__reduce__() === 'float32') { + if (storage.size() !== undefined && storage.data.length === 4) { + const buffer = storage.data.peek ? storage.data.peek() : storage.data; + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + return view.getFloat32(0, true); + } + } + return NaN; + } + __str__() { + return 'tensor(...)'; + } + }); + this.registerType('torch.nn.parameter.Parameter', class extends torch.Tensor { + constructor(data, requires_grad) { + super(); + if (!data) { + data = self.invoke('torch.Tensor', [[]]); + } + this.data = data; + this.requires_grad = requires_grad !== undefined ? requires_grad : true; + } + }); + this.registerType('torch.nn.parameter.UninitializedParameter', class extends torch.nn.parameter.Parameter { + constructor(requires_grad /*, device, dtype */) { + super(undefined, requires_grad); + } + }); + this.registerType('torch.nn.parameter.UninitializedBuffer', class extends torch.Tensor {}); + this.registerType('torch.BoolTensor', class extends torch.Tensor {}); + this.registerType('torch.ByteTensor', class extends torch.Tensor {}); + this.registerType('torch.CharTensor', class extends torch.Tensor {}); + this.registerType('torch.ShortTensor', class extends torch.Tensor {}); + this.registerType('torch.IntTensor', class extends torch.Tensor {}); + this.registerType('torch.LongTensor', class extends torch.Tensor {}); + this.registerType('torch.HalfTensor', class extends torch.Tensor {}); + this.registerType('torch.FloatTensor', class extends torch.Tensor {}); + this.registerType('torch.DoubleTensor', class extends torch.Tensor {}); + this.registerType('torch.ComplexFloatTensor', class extends torch.Tensor {}); + this.registerType('torch.ComplexDoubleTensor', class extends torch.Tensor {}); + this.registerType('torch.QInt8Tensor', class extends torch.Tensor {}); + this.registerType('torch.QUInt8Tensor', class extends torch.Tensor {}); + this.registerType('torch.QInt32Tensor', class extends torch.Tensor {}); + this.registerType('torch.BFloat16Tensor', class extends torch.Tensor {}); + this.registerType('torch.cuda.FloatTensor', class extends torch.Tensor {}); + this.registerType('torch.cuda.DoubleTensor', class extends torch.Tensor {}); + this.registerType('torch.SymInt', class { + constructor(node) { + this.node = node; + } + }); + this.registerType('torch._C._TensorBase', class {}); + this.registerType('torch._C._VariableFunctionsClass', class {}); + this.register('torch.nn').Module = this.register('torch.nn.modules.module').Module; + this.register('torch.optim').Adam = this.register('torch.optim.adam').Adam; + this.register('torch.nn').ReLU = this.register('torch.nn.modules.activation').ReLU; + this.register('sklearn.utils').Bunch = this.register('sklearn.utils._bunch').Bunch; + torch.uint8 = torch.ByteStorage.dtype = new torch.dtype(0, 'uint8', 1); + torch.int8 = torch.CharStorage.dtype = new torch.dtype(1, 'int8', 1); + torch.int16 = torch.ShortStorage.dtype = new torch.dtype(2, 'int16', 2); + torch.int32 = torch.IntStorage.dtype = new torch.dtype(3, 'int32', 4); + torch.int64 = torch.LongStorage.dtype = new torch.dtype(4, 'int64', 8); + torch.float16 = torch.HalfStorage.dtype = new torch.dtype(5, 'float16', 2); + torch.float32 = torch.FloatStorage.dtype = new torch.dtype(6, 'float32', 4); + torch.float64 = torch.DoubleStorage.dtype = new torch.dtype(7, 'float64', 8); + torch.complex32 = torch.ComplexHalfStorage.dtype = new torch.dtype(8, 'complex32', 4); + torch.complex64 = torch.ComplexFloatStorage.dtype = new torch.dtype(9, 'complex64', 8); + torch.complex128 = torch.ComplexDoubleStorage.dtype = new torch.dtype(10, 'complex128', 16); + torch.bool = torch.BoolStorage.dtype = new torch.dtype(11, 'boolean', 1); + torch.qint8 = torch.QInt8Storage.dtype = new torch.dtype(12, 'qint8', 1); + torch.quint8 = torch.QUInt8Storage.dtype = new torch.dtype(13, 'quint8', 1); + torch.qint32 = torch.QInt32Storage.dtype = new torch.dtype(14, 'qint32', 4); + torch.bfloat16 = torch.BFloat16Storage.dtype = new torch.dtype(15, 'bfloat16', 2); + torch.quint4x2 = new torch.dtype(16, 'quint4x2'); + torch.strided = new torch.layout('torch.strided'); + torch.sparse_coo = new torch.layout('torch.sparse_coo'); + torch.sparse_csr = new torch.layout('torch.sparse_csr'); + torch.sparse_csc = new torch.layout('torch.sparse_csc'); + torch.sparse_bsr = new torch.layout('torch.sparse_bsr'); + torch.sparse_bsc = new torch.layout('torch.sparse_bsc'); + torch._mkldnn = new torch.layout('torch._mkldnn'); + torch.per_tensor_affine = new torch.qscheme('torch.per_tensor_affine'); + torch.per_channel_affine = new torch.qscheme('torch.per_channel_affine'); + torch.per_tensor_symmetric = new torch.qscheme('torch.per_tensor_symmetric'); + torch.per_channel_symmetric = new torch.qscheme('torch.per_channel_symmetric'); + torch.per_channel_affine_float_qparams = new torch.qscheme('torch.per_channel_affine_float_qparams'); + torch.inf = this.register('math').inf; + this.registerType('fastcore.basics.fastuple', class {}); + this.registerType('fastcore.basics.GetAttr', class {}); + this.registerType('fastcore.dispatch._TypeDict', class {}); + this.registerType('fastcore.dispatch.TypeDispatch', class {}); + this.registerType('fastcore.foundation.L', class {}); + this.registerType('fastcore.transform.Pipeline', class {}); + this.registerType('fastcore.transform.Transform', class {}); + this.registerType('fastcore.transform.ItemTransform', class extends fastcore.transform.Transform {}); + this.registerType('fastai.callback.core.TrainEvalCallback', class {}); + this.registerType('fastai.callback.hook._hook_inner', class {}); + this.registerType('fastai.callback.hook.Hook', class {}); + this.registerType('fastai.callback.hook.Hooks', class {}); + this.registerType('fastai.callback.progress.ProgressCallback', class {}); + this.registerType('fastai.callback.core.Callback', class extends fastcore.basics.GetAttr {}); + this.registerType('fastai.data.core.DataLoaders', class extends fastcore.basics.GetAttr {}); + this.registerType('fastai.data.core.Datasets', class {}); + this.registerType('fastai.data.load.DataLoader', class extends fastcore.basics.GetAttr {}); + this.registerType('fastai.data.core.TfmdDL', class extends fastai.data.load.DataLoader {}); + this.registerType('fastai.data.core.TfmdLists', class {}); + this.registerType('fastai.data.load._FakeLoader', class {}); + this.registerFunction('fastai.data.load._wif', function() { + throw new python.Error("'fastai.data.load._wif' not implemented."); + }); + this.registerType('fastai.data.transforms.Categorize', class {}); + this.registerType('fastai.data.transforms.Category', class {}); + this.registerType('fastai.data.transforms.CategoryMap', class {}); + this.registerType('fastai.data.transforms.IntToFloatTensor', class {}); + this.registerType('fastai.data.transforms.Normalize', class {}); + this.registerType('fastai.data.transforms.parent_label', class {}); + this.registerType('fastai.data.transforms.RegressionSetup', class {}); + this.registerType('fastai.data.transforms.ToTensor', class {}); + this.registerType('fastai.imports.noop', class {}); + this.registerType('fastai.layers.AdaptiveConcatPool2d', class {}); + this.registerType('fastai.layers.ConvLayer', class {}); + this.registerType('fastai.layers.Embedding', class {}); + this.registerType('fastai.layers.Flatten', class {}); + this.registerType('fastai.layers.LinBnDrop', class {}); + this.registerType('fastai.layers.MergeLayer', class {}); + this.registerType('fastai.layers.PixelShuffle_ICNR', class {}); + this.registerType('fastai.layers.ResBlock', class {}); + this.registerType('fastai.layers.SelfAttention', class {}); + this.registerType('fastai.layers.SigmoidRange', class {}); + this.registerType('fastai.learner.Metric', class {}); + this.registerType('fastai.learner.AvgLoss', class extends fastai.learner.Metric {}); + this.registerType('fastai.learner.AvgMetric', class extends fastai.learner.Metric {}); + this.registerType('fastai.learner.AvgSmoothLoss', class extends fastai.learner.Metric {}); + this.registerType('fastai.learner.CastToTensor', class extends fastai.callback.core.Callback {}); + this.registerType('fastai.learner.Learner', class extends fastcore.basics.GetAttr {}); + this.registerType('fastai.learner.Recorder', class {}); + this.registerType('fastai.losses.BaseLoss', class {}); + this.registerType('fastai.losses.CrossEntropyLossFlat', class {}); + this.registerType('fastai.metrics.AccumMetric', class extends fastai.learner.Metric {}); + this.registerFunction('fastai.metrics._rmse', function() { + throw new python.Error("'fastai.metrics._rmse' not implemented."); + }); + this.registerFunction('fastai.metrics.accuracy', function() { + throw new python.Error("'fastai.metrics.error_rate' not implemented."); + }); + this.registerFunction('fastai.metrics.error_rate', function() { + throw new python.Error("'fastai.metrics.error_rate' not implemented."); + }); + this.registerFunction('fastai.optimizer.Adam', function() { + throw new python.Error("'fastai.optimizer.Adam' not implemented."); + }); + + this.registerType('fastai.tabular.core.Categorify', class {}); + this.registerType('fastai.tabular.core.FillMissing', class {}); + this.registerType('fastai.tabular.core.FillStrategy', class {}); + this.registerType('fastai.tabular.core.ReadTabBatch', class extends fastcore.transform.ItemTransform {}); + this.registerType('fastai.tabular.core.TabDataLoader', class extends fastai.data.core.TfmdDL {}); + this.registerType('fastai.tabular.data.TabularDataLoaders', class extends fastai.data.core.DataLoaders {}); + this.registerType('fastai.tabular.core.Tabular', class {}); + this.registerType('fastai.tabular.core.TabularPandas', class extends fastai.tabular.core.Tabular {}); + this.registerType('fastai.tabular.learner.TabularLearner', class extends fastai.learner.Learner {}); + this.registerType('fastai.tabular.model.TabularModel', class {}); + this.registerFunction('fastai.torch_core._fa_rebuild_tensor', function(cls) { + const tensor = self.invoke('torch._utils._rebuild_tensor_v2', Array.from(arguments).slice(1)); + return self.invoke(cls, tensor); + }); + this.registerType('fastai.torch_core.TensorBase', class extends torch.Tensor { + constructor(x) { + super(); + Object.assign(this, x); + } + }); + this.registerType('fastai.torch_core.TensorCategory', class {}); + this.registerType('fastai.torch_core.TensorImage', class {}); + this.registerFunction('fastai.torch_core.trainable_params', function() { + throw new python.Error("'fastai.torch_core.trainable_params' not implemented."); + }); + this.registerFunction('fastai.torch_core._rebuild_from_type', function(func, type, args, dict) { + const tensor = self.invoke(type, [ func(...args) ]); + Object.assign(tensor, dict); + return tensor; + }); + this.registerType('fastai.vision.augment._BrightnessLogit', class {}); + this.registerType('fastai.vision.augment._ContrastLogit', class {}); + this.registerType('fastai.vision.augment._WarpCoord', class {}); + this.registerType('fastai.vision.augment.Brightness', class {}); + this.registerType('fastai.vision.augment.flip_mat', class {}); + this.registerType('fastai.vision.augment.Flip', class {}); + this.registerType('fastai.vision.augment.RandomResizedCropGPU', class {}); + this.registerType('fastai.vision.augment.Resize', class {}); + this.registerType('fastai.vision.augment.rotate_mat', class {}); + this.registerType('fastai.vision.augment.zoom_mat', class {}); + this.registerType('fastai.vision.core.PILImage', class {}); + this.registerType('fastai.vision.learner._resnet_split', class {}); + this.registerType('fastai.vision.models.unet.DynamicUnet', class {}); + this.registerType('fastai.vision.models.unet.ResizeToOrig', class {}); + this.registerType('fastai.vision.models.unet.UnetBlock', class {}); + } + + get builtins() { + return this._builtins; + } + + + source(file) { + return this._sources.has(file) ? this._sources.get(file) : null; + } + + debug(/* file */) { + } + + exec(code , context) { + const reader = new python.Parser(code, '', null); + const program = reader.parse(); + if (!program) { + throw new python.Error("Module '" + '?' + "' parse error."); + } + this.block(program.body, context); + } + + parse(file) { + const buffer = this.source(file); + if (buffer) { + const debug = this.debug(file); + const code = this._utf8Decoder.decode(buffer); + const parser = new python.Parser(code, file, debug); + const program = parser.parse(); + if (!program) { + throw new python.Error(`Module '${file}' parse error.`); + } + return program; + } + return null; + } + + import(name, current, level) { + if (level) { + let bits = current.split('.'); + if (bits.length < level) { + throw new python.Error('Invalid relative import beyond top-level package.'); + } + bits = bits.slice(0, bits.length - level); + const base = bits.join('.'); + name = name ? [ base, name ].join('.') : base; + } + const index = name.lastIndexOf('.'); + let parent = null; + let child = null; + if (index > 0) { + parent = name.substring(0, index); + child = name.substring(index + 1); + this.import(parent); + } + if (!this._modules.has(name)) { + const module = this._registry.get(name) || new this._builtins.module(name); + module.__package__ = name; + this._modules.set(name, module); + const path = name.split('.').join('/'); + module.__path__ = [ path ]; + const file = `${path}.py`; + const program = this.parse(file); + if (program) { + module.__file__ = file; + for (const [name, value] of Object.entries(this.builtins)) { + switch (name) { + case '__class__': + case '__package__': + case '__module__': + case '__name__': + case '__path__': + case '__file__': + break; + default: + module[name] = value; + break; + } + } + const context = new python.Execution.Context(module, null); + if (name !== 'builtins') { + context.set('__builtins__', this._modules.get('builtins')); + } + this.block(program.body, context); + } + if (parent) { + const parent_module = this._modules.get(parent); + parent_module[child] = module; + } + } + return this._modules.get(name); + } + + __import__(name, globals, locals, fromlist, level) { + let module = null; + level = level || 0; + if (level === 0) { + module = this.import(name); + } else { + globals = globals || {}; + let current = globals.__package__; + if (!current) { + const spec = globals.__spec__; + if (spec) { + current = spec.parent; + } else { + const name = globals.__name__; + const bits = name.split('.'); + bits.pop(); + current = bits.join('.'); + } + } + module = this.import(name, current, level); + } + if (!fromlist) { + if (level === 0) { + return this.import(name.split('.')[0]); + } else if (name) { + throw new python.Error(`Unsupported relative import '${name}'.`); + // cut_off = len(name) - len(name.partition('.')[0]) + // return sys.modules[module.__name__[:len(module.__name__)-cut_off]] + } + } else if (module.__path__) { + const handle_fromlist = (module, fromlist, recursive) => { + for (const name of fromlist) { + if (name == '*') { + if (!recursive && module.__all__) { + handle_fromlist(module, module.__all__, true); + } + } else if (!module[name]) { + this.import(`${module.__name__}.${name}`); + } + } + return module; + }; + handle_fromlist(module, fromlist); + } + return module; + } + + module(name) { + return this._modules.get(name); + } + + resolve(name) { + const index = name.lastIndexOf('.'); + const memberName = index === -1 ? name : name.substring(index + 1, name.length); + const moduleName = index === -1 ? '' : name.substring(0, index); + const module = this.import(moduleName); + let type = module ? module[memberName] : null; + if (!type) { + if (!this._unresolved.has(name)) { + const moduleName = name.split('.').shift(); + if (this._registry.has(moduleName) && moduleName !== '__main__') { + this.emit('resolve', name); + } + const type = this._createType(name, class {}); + this._unresolved.set(name, type); + } + type = this._unresolved.get(name); + } + return type; + } + + invoke(target, args) { + if (typeof target === 'string') { + target = this.resolve(target); + } + if (target) { + if (target.__class__ === this._builtins.type) { + if (target.prototype && target.prototype.__class__ === target) { + return Reflect.construct(target, args); + } + const obj = Object.create(target); + if (obj.__init__ && typeof obj.__init__ === 'function') { + obj.__init__.apply(obj, args); + } + return obj; + } else if (target.__class__ === this._builtins.function) { + if (target.__call__) { + return target.__call__(args); + } + return target.apply(null, args); + } + } + throw new python.Error('Unsupported invoke target.'); + } + + call(target, name, args, context) { + const callTarget = this.target(target, context); + const callArguments = args.map((argument) => this.expression(argument, context)); + if (!callTarget || (name !== null && !callTarget[name])) { + if (name === '__new__' && callArguments.length === 1 && callArguments[0] == callTarget) { + name = null; + callArguments.shift(); + } else { + const format = (expression) => { + if (expression.type == 'id') { + return expression.value; + } + if (expression.type == '.') { + return `${format(expression.target)}.${format(expression.member)}`; + } + return null; + }; + const targetName = `${format(target)}.${name}`; + throw new python.Error(`Unknown function '${targetName}'.`); + } + } + const func = name ? callTarget[name] : callTarget; + if (func.__class__ === this._builtins.type) { + if (func.prototype && func.prototype.__class__ === func) { + return Reflect.construct(func, args); + } + const obj = Object.create(func); + obj.__class__ = func; + if (obj.__init__ && typeof obj.__init__ === 'function') { + obj.__init__.apply(obj, args); + } + return obj; + } + if (func.__class__ === this._builtins.function) { + if (func.__call__) { + return func.__call__(callArguments); + } + } + if (func.__class__ === this._builtins.method) { + if (func.__call__) { + return func.__call__([ callTarget ].concat(callArguments)); + } + } + if (typeof func === 'function') { + return func.apply(callTarget, callArguments); + } + throw new python.Error("Unsupported call expression."); + } + + apply(method, args, context) { + const locals = Array.prototype.slice.call(args); + context = new python.Execution.Context(context.globals, {}); + for (const parameter of method.parameters) { + let value = locals.shift(); + if (value === undefined && parameter.initializer) { + value = this.expression(parameter.initializer, context); + } + context.set(parameter.name, value); + } + return this.block(method.body.statements, context); + } + + block(statements, context) { + statements = Array.prototype.slice.call(statements); + while (statements.length > 0) { + const statement = statements.shift(); + const value = this.statement(statement, context); + if (value !== undefined) { + return value; + } + } + return undefined; + } + + statement(statement, context) { + switch (statement.type) { + case 'pass': { + break; + } + case 'return': { + return this.expression(statement.expression, context); + } + case 'def': { + const module = context.get('__name__'); + const self = this; + const parent = context.get('__class__'); + const type = (parent === this._builtins.module) ? this._builtins.function : this._builtins.method; + const func = { + __class__: type, + __globals__: context, + __module__: module, + __name__: statement.name, + __code__: statement, + __call__: function(args) { + return self.apply(this.__code__, args, this.__globals__); + } + }; + context.set(statement.name, func); + break; + } + case 'class': { + const bases = statement.bases.map((arg) => this.expression(arg, context)); + if (bases.length > 1) { + throw new python.Error(`Unsupported multiple bases for class '${statement.name}'.`); + } + const base = bases.length === 1 ? bases[0] : null; + const name = `${context.get('__name__')}.${statement.name}`; + const value = this._createType(name, base ? class extends base {} : class {}); + value.__bases__ = bases; + context.set(statement.name, value); + this.block(statement.body.statements, new python.Execution.Context(context.globals, value.prototype)); + break; + } + case 'var': { + context.set(statement.name, statement.initializer ? this.expression(statement.initializer, context) : undefined); + break; + } + case '=': { + this.expression(statement, context); + break; + } + case 'if': { + const condition = this.expression(statement.condition, context); + if (condition === true || condition) { + const value = this.block(statement.then.statements, context); + if (value !== undefined) { + return value; + } + break; + } else if (condition === false) { + const value = this.block(statement.else.statements, context); + if (value !== undefined) { + return value; + } + break; + } + throw new python.Error("Unsupported condition."); + } + case 'for': { + if (statement.target.length == 1 && + statement.variable.length === 1 && statement.variable[0].type === 'id') { + const range = this.expression(statement.target[0], context); + const [variable] = statement.variable; + for (const current of range) { + this.statement({ type: '=', target: variable, expression: { type: 'number', value: current } }, context); + const value = this.block(statement.body.statements, context); + if (value !== undefined) { + return value; + } + } + break; + } + throw new python.Error("Unsupported 'for' statement."); + } + case 'while': { + const condition = this.expression(statement.condition, context); + if (condition) { + const value = this.block(statement.body.statements, context); + if (value !== undefined) { + return value; + } + } + break; + } + case 'with': { + const items = []; + for (const item of statement.item) { + items.push(this.expression(item.expression, context)); + } + for (const item of items) { + if (item.__enter__ && item.__enter__.__call__) { + item.__enter__.__call__([ item ]); + } + } + const value = this.block(statement.body.statements, context); + for (const item of items) { + if (item.__exit__ && item.__exit__.__call__) { + item.__exit__.__call__([ item ]); + } + } + if (value !== undefined) { + return value; + } + break; + } + case 'call': { + this.expression(statement, context); + break; + } + case 'import': { + for (const alias of statement.names) { + let module = this.__import__(alias.name, context); + if (alias.asname) { + const bits = alias.name.split('.').reverse(); + bits.pop(); + while (bits.length > 0) { + module = module[bits.pop()]; + } + context.set(alias.asname, module); + } else { + context.set(alias.name.split('.')[0], module); + } + } + break; + } + case 'import_from': { + const fromlist = statement.names.map((name) => name.name); + const module = this.__import__(statement.module, context.globals, context.locals, fromlist, statement.level); + for (const entry of statement.names) { + const name = entry.name; + const asname = entry.asname ? entry.asname : null; + if (!module[name]) { + throw new python.Error(`Cannot import '${name}' from '${statement.module}'.`); + } + context.set(asname ? asname : name, module[name]); + } + break; + } + case 'string': { + break; + } + default: { + throw new python.Error(`Unsupported statement '${statement.type}'.`); + } + } + return undefined; + } + + expression(expression, context) { + const self = context.get('self'); + switch (expression.type) { + case '=': { + const target = expression.target; + if (target.type === 'id') { + context.set(target.value, this.expression(expression.expression, context)); + return undefined; + } else if (target.type === '[]') { + if (target.target.type === 'id' && + target.arguments.type === 'list' && + target.arguments.value.length === 1) { + const index = this.expression(target.arguments.value[0], context); + if (target.target.value === '__annotations__') { + context.set(target.target.value, context.get(target.target.value) || {}); + } + const obj = context.get(target.target.value); + const value = this.expression(expression.expression, context); + if (obj instanceof Map) { + obj.set(index, value); + } else { + obj[index] = value; + } + return undefined; + } + } else if (target.type === '.' && + target.member.type === 'id') { + this.expression(target.target, context)[target.member.value] = this.expression(expression.expression, context); + return undefined; + } else if (target.type === 'tuple') { + context.target.push(target.value); + const value = this.expression(expression.expression, context); + context.target.pop(); + if (target.value.every((item) => item.type === 'id')) { + if (target.value.length < value.length) { + throw new python.Error(`ValueError: too many values to unpack (expected ${target.value.length}, actual ${value.length}).`); + } + if (target.value.length > value.length) { + throw new python.Error(`ValueError: not enough values to unpack (expected ${target.value.length}, actual ${value.length}).`); + } + for (let i = 0; i < value.length; i++) { + context.set(target.value[i].value, value[i]); + } + return undefined; + } + } + break; + } + case 'list': { + return expression.value.map((item) => this.expression(item, context)); + } + case 'string': { + return expression.value.substring(1, expression.value.length - 1); + } + case 'number': { + return Number(expression.value); + } + case '[]': { + if (expression.target.type === 'id' && + expression.arguments.type === 'list' && + expression.arguments.value.length === 1) { + if (context.get(expression.target.value)) { + const index = this.expression(expression.arguments.value[0], context); + const target = context.get(expression.target.value); + if (target instanceof Map) { + return target.get(index); + } + return target[index < 0 ? target.length + index : index]; + } + } + const target = this.expression(expression.target, context); + if (target && expression.arguments.type === 'list' && + (target.__class__ === this._typing._TupleType || + target.__class__ === this._typing._SpecialGenericAlias || + target.__class__ === this._typing._SpecialForm)) { + const type = Object.assign({}, target); + type.__args__ = expression.arguments.value.map((arg) => this.expression(arg, context)); + return type; + } + if (expression.arguments.type === 'list' && expression.arguments.value.length === 1) { + const index = this.expression(expression.arguments.value[0], context); + if (target instanceof Map) { + return target.get(index); + } + return target[index < 0 ? target.length + index : index]; + } + break; + } + case '.': { + if (expression.member.type == 'id') { + const target = this.target(expression.target, context); + return target[expression.member.value]; + } + throw new python.Error("Unsupported field expression."); + } + case 'call': { + if (expression.target.type === '.') { + return this.call(expression.target.target, expression.target.member.value, expression.args, context); + } + return this.call(expression.target, null, expression.args, context); + } + case 'id': { + switch (expression.value) { + case 'self': return self; + case 'None': return null; + case 'True': return true; + case 'False': return false; + default: { + const type = (value) => { + return value && + (value.__class__ === this._builtins.type || + value.__class__ === this._typing._TupleType || + value.__class__ === this._typing._SpecialGenericAlias || + value.__class__ === this._typing._SpecialForm); + }; + const builtin = this._builtins[expression.value]; + if (type(builtin)) { + return builtin; + } + const value = context.get(expression.value); + if (value === undefined) { + const typing = this._typing[expression.value]; + if (type(typing)) { + return typing; + } + } + return value; + } + } + } + case 'tuple': { + return expression.value.map((expression) => this.expression(expression, context)); + } + case 'dict': { + const dict = {}; + for (const pair of expression.value) { + if (pair.type !== 'pair') { + throw new python.Error(`Unsupported dict item type '${pair.type}'.`); + } + const key = this.expression(pair.key, context); + const value = this.expression(pair.value, context); + dict[key] = value; + } + return dict; + } + case 'unary': { + switch (expression.op) { + case '-': { + return -this.expression(expression.operand, context); + } + default: { + throw new python.Error(`Unsupported unary expression '${expression.op}'.`); + } + } + } + default: { + throw new python.Error(`Unsupported expression '${expression.type}'.`); + } + } + return undefined; + } + + target(expression, context) { + let current = expression; + let path = []; + for (;;) { + if (current.type === '.' && current.member && current.member.type === 'id') { + path.push(current.member.value); + current = current.target; + } else if (current.type === 'id' && current.value !== 'self' && current.value !== 'CONSTANTS') { + path.push(current.value); + break; + } else { + path = null; + break; + } + } + if (path) { + let target = null; + for (let i = path.length - 1; i >= 0; i--) { + target = target ? target[path[i]] : context.get(path[i]); + if (!target) { + break; + } + } + if (!target) { + path.reverse(); + const name = path.join('.'); + const file = `${path.join('/')}.py`; + if (this._sources.has(file)) { + target = this.import(name); + } else { + target = this.resolve(name); + } + } + return target; + } + return this.expression(expression, context); + } + + add(name, source) { + this._sources.set(name, source); + } + + on(event, listener) { + const value = this._events.get(event) || []; + value.push(listener); + this._events.set(event, value); + } + + emit(event, ...args) { + if (this._events.has(event)) { + for (const callback of this._events.get(event)) { + callback(this, ...args); + } + } + } + + register(name, value) { + if (!this._registry.has(name)) { + value = value || new (this._registry.get('builtins').module)(name); + this._registry.set(name, value); + let current = name; + for (;;) { + const index = current.lastIndexOf('.'); + if (index === -1) { + break; + } + const child = current.substring(index + 1); + current = current.substring(0, index); + const parent = this.register(current); + parent[child] = value; + value = parent; + } + } + return this._registry.get(name); + } + + registerFunction(name, value) { + const parts = name.split('.'); + value.__class__ = this._builtins.function; + value.__name__ = parts.pop(); + value.__module__ = parts.join('.'); + const module = this.register(value.__module__); + if (module[name]) { + throw new python.Error(`Function '${name}' is already registered.`); + } + module[value.__name__] = value; + return value; + } + + _createType(name, value) { + const parts = name.split('.'); + value.__class__ = this._builtins.type; + value.__name__ = parts.pop(); + value.__module__ = parts.join('.'); + value.prototype.__class__ = value; + return value; + } + + registerType(name, value) { + value = this._createType(name, value); + const parts = name.split('.'); + const memberName = parts.pop(); + const moduleName = parts.join('.'); + const module = this.register(moduleName); + if (module[memberName]) { + throw new python.Error(`Class '${memberName}' is already registered.`); + } + module[memberName] = value; + return value; + } +}; + +python.Execution.Context = class { + + constructor(globals, locals) { + this.globals = globals; + this.locals = locals; + } + + set(name, value) { + if (this.locals) { + this.locals[name] = value; + } else { + this.globals[name] = value; + } + } + + get(name) { + if (this.locals && name in this.locals) { + return this.locals[name]; + } + if (name in this.globals) { + return this.globals[name]; + } + return undefined; + } + + get target() { + this._target = this._target || []; + return this._target; + } +}; + +python.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._length = buffer.length; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._utf8Decoder = new TextDecoder('utf-8'); + this._asciiDecoder = new TextDecoder('ascii'); + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + if (this._position > this._buffer.length) { + throw new python.Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new python.Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + stream(length) { + const buffer = this.read(length); + return new python.BinaryReader(buffer); + } + + peek(length) { + const position = this._position; + length = length !== undefined ? length : this._length - this._position; + this.skip(length); + const end = this._position; + this.skip(-length); + if (position === 0 && length === this._length) { + return this._buffer; + } + return this._buffer.subarray(position, end); + } + + read(length) { + const position = this._position; + length = length !== undefined ? length : this._length - this._position; + this.skip(length); + if (position === 0 && length === this._length) { + return this._buffer; + } + return this._buffer.subarray(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._view.getUint8(position); + } + + uint16() { + const position = this._position; + this.skip(2); + return this._view.getUint16(position, true); + } + + int32() { + const position = this._position; + this.skip(4); + return this._view.getInt32(position, true); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._view.getUint32(position, true); + } + + int64() { + const position = this._position; + this.skip(8); + return this._view.getInt64(position, true).toNumber(); + } + + float64() { + const position = this._position; + this.skip(8); + return this._view.getFloat64(position, false); + } + + string(size, encoding) { + const data = this.read(size); + return (encoding == 'utf-8') ? + this._utf8Decoder.decode(data) : + this._asciiDecoder.decode(data); + } + + line() { + const index = this._buffer.indexOf(0x0A, this._position); + if (index == -1) { + throw new python.Error("Could not find end of line."); + } + const size = index - this._position; + const text = this.string(size, 'ascii'); + this.skip(1); + return text; + } +}; + +python.StreamReader = class { + + constructor(stream) { + this._stream = stream; + this._length = stream.length; + this._position = 0; + this._utf8Decoder = new TextDecoder('utf-8'); + this._asciiDecoder = new TextDecoder('ascii'); + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + seek(position) { + this._stream.seek(position); + this._position = this._stream.position; + } + + skip(offset) { + this._position += offset; + if (this._position > this._length) { + throw new python.Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + stream(length) { + this._stream.seek(this._position); + this.skip(length); + return this._stream.stream(length); + } + + peek(length) { + this._stream.seek(this._position); + return this._stream.peek(length); + } + + read(length) { + this._stream.seek(this._position); + this.skip(length); + return this._stream.read(length); + } + + byte() { + const position = this._fill(1); + return this._view.getUint8(position); + } + + uint16() { + const position = this._fill(2); + return this._view.getUint16(position, true); + } + + int32() { + const position = this._fill(4); + return this._view.getInt32(position, true); + } + + uint32() { + const position = this._fill(4); + return this._view.getUint32(position, true); + } + + int64() { + const position = this._fill(8); + return this._view.getInt64(position, true).toNumber(); + } + + float64() { + const position = this._fill(8); + return this._view.getFloat64(position, true); + } + + string(size, encoding) { + const data = this.read(size); + return (encoding == 'utf-8') ? + this._utf8Decoder.decode(data) : + this._asciiDecoder.decode(data); + } + + line() { + let position = this._fill(0); + let index = this._buffer.indexOf(0x0A, position); + if (index == -1) { + const size = Math.min(0x1000000, this._stream.length - this._position); + this._fill(size); + this.skip(-size); + position = this._fill(0); + index = this._buffer.indexOf(0x0A, position); + if (index == -1) { + throw new python.Error("Could not find end of line."); + } + } + const size = index - position; + const text = this.string(size, 'ascii'); + this.skip(1); + return text; + } + + _fill(length) { + if (this._position + length > this._length) { + throw new Error(`Expected ${this._position + length - this._length} more bytes. The file might be corrupted. Unexpected end of file.`); + } + if (!this._buffer || this._position < this._offset || this._position + length > this._offset + this._buffer.length) { + this._offset = this._position; + this._stream.seek(this._offset); + this._buffer = this._stream.read(Math.min(0x10000000, this._length - this._offset)); + this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + } + const position = this._position; + this._position += length; + return position - this._offset; + } +}; + +python.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Python Error'; + } +}; + +export const Execution = python.Execution; diff --git a/pytorch-metadata.json b/pytorch-metadata.json new file mode 100755 index 00000000000..a5f31d71b67 --- /dev/null +++ b/pytorch-metadata.json @@ -0,0 +1,15911 @@ +[ + { + "name": "_caffe2::BBoxTransform", + "inputs": [ + { "name": "rois", "type": "Tensor" }, + { "name": "deltas", "type": "Tensor" }, + { "name": "im_info", "type": "Tensor" }, + { "name": "weights", "type": "float32[]" }, + { "name": "apply_scale", "type": "boolean" }, + { "name": "rotated", "type": "boolean" }, + { "name": "angle_bound_on", "type": "boolean" }, + { "name": "angle_bound_lo", "type": "int64" }, + { "name": "angle_bound_hi", "type": "int64" }, + { "name": "clip_angle_thresh", "type": "float32" }, + { "name": "legacy_plus_one", "type": "boolean" } + ], + "outputs": [ + { "name": "output_0", "type": "Tensor" }, + { "name": "output_1", "type": "Tensor" } + ] + }, + { + "name": "_caffe2::BatchPermutation", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "_caffe2::BoxWithNMSLimit", + "inputs": [ + { "name": "scores", "type": "Tensor" }, + { "name": "boxes", "type": "Tensor" }, + { "name": "batch_splits", "type": "Tensor" }, + { "name": "score_thresh", "type": "float32" }, + { "name": "nms", "type": "float32" }, + { "name": "detections_per_im", "type": "int64" }, + { "name": "soft_nms_enabled", "type": "boolean" }, + { "name": "soft_nms_method", "type": "string" }, + { "name": "soft_nms_sigma", "type": "float32" }, + { "name": "soft_nms_min_score_thres", "type": "float32" }, + { "name": "rotated", "type": "boolean" }, + { "name": "cls_agnostic_bbox_reg", "type": "boolean" }, + { "name": "input_boxes_include_bg_cls", "type": "boolean" }, + { "name": "output_classes_include_bg_cls", "type": "boolean" }, + { "name": "legacy_plus_one", "type": "boolean" } + ], + "outputs": [ + { "name": "scores", "type": "Tensor" }, + { "name": "boxes", "type": "Tensor" }, + { "name": "classes", "type": "Tensor" }, + { "name": "batch_splits", "type": "Tensor" }, + { "name": "keeps", "type": "Tensor" }, + { "name": "keeps_size", "type": "Tensor" } + ] + }, + { + "name": "_caffe2::CollectRpnProposals", + "inputs": [ + { "name": "input_list", "type": "Tensor[]" }, + { "name": "rpn_max_level", "type": "int64" }, + { "name": "rpn_min_level", "type": "int64" }, + { "name": "rpn_post_nms_topN", "type": "int64" } + ], + "outputs": [ + { "name": "rois", "type": "Tensor" } + ] + }, + { + "name": "_caffe2::CopyCPUToGPU", + "inputs": [ + { "name": "input", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "_caffe2::CopyGPUToCPU", + "inputs": [ + { "name": "input", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "_caffe2::DistributeFpnProposals", + "inputs": [ + { "name": "rois", "type": "Tensor" }, + { "name": "roi_canonical_scale", "type": "int64" }, + { "name": "roi_canonical_level", "type": "int64" }, + { "name": "roi_max_level", "type": "int64" }, + { "name": "roi_min_level", "type": "int64" }, + { "name": "legacy_plus_one", "type": "boolean" } + ], + "outputs": [ + { "name": "rois_fpn2", "type": "Tensor" }, + { "name": "rois_fpn3", "type": "Tensor" }, + { "name": "rois_fpn4", "type": "Tensor" }, + { "name": "rois_fpn5", "type": "Tensor" }, + { "name": "rois_idx_restore_int32", "type": "Tensor" } + ] + }, + { + "name": "_caffe2::GenerateProposals", + "inputs": [ + { "name": "scores", "type": "Tensor" }, + { "name": "bbox_deltas", "type": "Tensor" }, + { "name": "im_info", "type": "Tensor" }, + { "name": "anchors", "type": "Tensor" }, + { "name": "spatial_scale", "type": "float32" }, + { "name": "pre_nms_topN", "type": "int64" }, + { "name": "post_nms_topN", "type": "int64" }, + { "name": "nms_thresh", "type": "float32" }, + { "name": "min_size", "type": "float32" }, + { "name": "angle_bound_on", "type": "boolean" }, + { "name": "angle_bound_lo", "type": "int64" }, + { "name": "angle_bound_hi", "type": "int64" }, + { "name": "clip_angle_thresh", "type": "float32" }, + { "name": "legacy_plus_one", "type": "boolean" } + ], + "outputs": [ + { "name": "output_0", "type": "Tensor" }, + { "name": "output_1", "type": "Tensor" } + ] + }, + { + "name": "_caffe2::RoIAlign", + "inputs": [ + { "name": "features", "type": "Tensor" }, + { "name": "rois", "type": "Tensor" }, + { "name": "order", "type": "string" }, + { "name": "spatial_scale", "type": "float32" }, + { "name": "pooled_h", "type": "int64" }, + { "name": "pooled_w", "type": "int64" }, + { "name": "sampling_ratio", "type": "int64" }, + { "name": "aligned", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__and__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__and__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__iand__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__iand__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__ilshift__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__ilshift__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__interpolate", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64", "optional": true, "default": null }, + { "name": "scale_factor", "type": "float32", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null }, + { "name": "recompute_scale_factor", "type": "boolean", "optional": true, "default": null }, + { "name": "antialias", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__interpolate.scale_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64", "optional": true, "default": null }, + { "name": "scale_factor", "type": "float32[]", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null }, + { "name": "recompute_scale_factor", "type": "boolean", "optional": true, "default": null }, + { "name": "antialias", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__interpolate.size_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64[]", "optional": true, "default": null }, + { "name": "scale_factor", "type": "float32", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null }, + { "name": "recompute_scale_factor", "type": "boolean", "optional": true, "default": null }, + { "name": "antialias", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__interpolate.size_list_scale_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64[]", "optional": true, "default": null }, + { "name": "scale_factor", "type": "float32[]", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null }, + { "name": "recompute_scale_factor", "type": "boolean", "optional": true, "default": null }, + { "name": "antialias", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__irshift__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__irshift__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__ixor__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__ixor__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__lshift__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__lshift__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__or__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__or__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__rshift__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__rshift__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample.size_list", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64[]", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64", "optional": true, "default": null }, + { "name": "mode", "type": "string", "default": "nearest" }, + { "name": "align_corners", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample_bilinear", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample_bilinear.scale_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample_bilinear.size_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64[]", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__upsample_bilinear.size_list_scale_list", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "size", "type": "int64[]", "optional": true, "default": null }, + { "name": "scale_factor", "type": "int64[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__xor__.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::__xor__.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_add_relu.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_add_relu.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_add_relu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_add_relu_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_add_relu_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_aminmax", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "name": "min", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ] + }, + { + "name": "aten::_aminmax.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "min", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ] + }, + { + "name": "aten::_autocast_to_reduced_precision", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "cuda_enabled", "type": "boolean" }, + { "name": "cpu_enabled", "type": "boolean" }, + { "name": "cuda_dtype", "type": "ScalarType" }, + { "name": "cpu_dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Byte", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Char", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Double", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Float", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Half", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Long", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_cast_Short", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_coalesce", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_conj", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_convolution", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]", "default": 1 }, + { "name": "padding", "type": "SymInt[]", "default": 0 }, + { "name": "dilation", "type": "SymInt[]", "default": 1 }, + { "name": "transposed", "type": "boolean", "default": false }, + { "name": "output_padding", "type": "SymInt[]", "default": 0 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "benchmark", "type": "boolean", "visible": false }, + { "name": "deterministic", "type": "boolean", "visible": false }, + { "name": "cudnn_enabled", "type": "boolean", "visible": false }, + { "name": "allow_tf32", "type": "boolean", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_convolution.deprecated", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]", "default": 1 }, + { "name": "padding", "type": "SymInt[]", "default": 0 }, + { "name": "dilation", "type": "SymInt[]", "default": 1 }, + { "name": "transposed", "type": "boolean", "default": false }, + { "name": "output_padding", "type": "int64[]", "default": 0 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "benchmark", "type": "boolean", "visible": false }, + { "name": "deterministic", "type": "boolean", "visible": false }, + { "name": "cudnn_enabled", "type": "boolean", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_convolution_mode", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "string" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_ctc_loss", + "inputs": [ + { "name": "log_probs", "type": "Tensor" }, + { "name": "targets", "type": "Tensor" }, + { "name": "input_lengths", "type": "int64[]" }, + { "name": "target_lengths", "type": "int64[]" }, + { "name": "blank", "type": "int64", "default": 0 }, + { "name": "zero_infinity", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_ctc_loss.Tensor", + "inputs": [ + { "name": "log_probs", "type": "Tensor" }, + { "name": "targets", "type": "Tensor" }, + { "name": "input_lengths", "type": "Tensor" }, + { "name": "target_lengths", "type": "Tensor" }, + { "name": "blank", "type": "int64", "default": 0 }, + { "name": "zero_infinity", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_dim_arange", + "inputs": [ + { "name": "like", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_fake_quantize_learnable_per_tensor_affine", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" }, + { "name": "grad_factor", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" }, + { "name": "grad_factor", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_make_per_tensor_quantized_tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_pack_padded_sequence", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "lengths", "type": "Tensor" }, + { "name": "batch_first", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::_pad_packed_sequence", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "batch_first", "type": "boolean" }, + { "name": "padding_value", "type": "Scalar" }, + { "name": "total_length", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::_shape_as_tensor", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_sparse_coo_tensor_unsafe", + "category": "Tensor", + "inputs": [ + { "name": "indices", "type": "Tensor" }, + { "name": "values", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "is_coalesced", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor" } + ] + }, + { + "name": "aten::_test_serialization_subcmul", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_thnn_fused_gru_cell", + "inputs": [ + { "name": "input_gates", "type": "Tensor" }, + { "name": "hidden_gates", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "input_bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "hidden_bias", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "output2", "type": "Tensor" } + ] + }, + { + "name": "aten::_thnn_fused_lstm_cell", + "inputs": [ + { "name": "input_gates", "type": "Tensor" }, + { "name": "hidden_gates", "type": "Tensor" }, + { "name": "cx", "type": "Tensor" }, + { "name": "input_bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "hidden_bias", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "output2", "type": "Tensor" }, + { "name": "output3", "type": "Tensor" } + ] + }, + { + "name": "aten::_unique2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "sorted", "type": "boolean", "default": true }, + { "name": "return_inverse", "type": "boolean", "default": false }, + { "name": "return_counts", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_weight_norm", + "inputs": [ + { "name": "v", "type": "Tensor" }, + { "name": "g", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::_weight_norm_differentiable_backward", + "inputs": [ + { "name": "grad_w", "type": "Tensor" }, + { "name": "saved_v", "type": "Tensor" }, + { "name": "saved_g", "type": "Tensor" }, + { "name": "saved_norms", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_weight_norm_interface", + "inputs": [ + { "name": "v", "type": "Tensor" }, + { "name": "g", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::_weight_norm_interface_backward", + "inputs": [ + { "name": "grad_w", "type": "Tensor" }, + { "name": "saved_v", "type": "Tensor" }, + { "name": "saved_g", "type": "Tensor" }, + { "name": "saved_norms", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::abs", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::abs.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::abs_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acos", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acos.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acos_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acosh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acosh.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::acosh_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_avg_pool1d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[1]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_avg_pool2d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_avg_pool2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_avg_pool3d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_avg_pool3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_max_pool1d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[1]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_max_pool2d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[2]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_max_pool2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[2]" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_max_pool3d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[3]", "visible": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::adaptive_max_pool3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "int64[3]" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::add.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::add.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::add.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::add.t", + "inputs": [ + { "name": "a", "type": "t[]" }, + { "name": "b", "type": "t[]" } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::add_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::add_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::add_.t", + "inputs": [ + { "name": "self", "type": "t[]" }, + { "name": "b", "type": "t[]" } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::addcmul", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor1", "type": "Tensor" }, + { "name": "tensor2", "type": "Tensor" }, + { "name": "value", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addcmul.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor1", "type": "Tensor" }, + { "name": "tensor2", "type": "Tensor" }, + { "name": "value", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addcmul_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor1", "type": "Tensor" }, + { "name": "tensor2", "type": "Tensor" }, + { "name": "value", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmm", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat1", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat1", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmm_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat1", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmv", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat", "type": "Tensor" }, + { "name": "vec", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmv.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat", "type": "Tensor" }, + { "name": "vec", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::addmv_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat", "type": "Tensor" }, + { "name": "vec", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::affine_grid_generator", + "inputs": [ + { "name": "theta", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "align_corners", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::alias", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::alias_copy", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.all_out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.bool", + "inputs": [ + { "name": "self", "type": "boolean[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::all.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.dims", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.dims_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::all.float", + "inputs": [ + { "name": "self", "type": "float32[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::all.int", + "inputs": [ + { "name": "self", "type": "int64[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::all.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::alpha_dropout", + "category": "Dropout", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::alpha_dropout_", + "category": "Dropout", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::amax", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "default": [] }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::amax.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "default": [] }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::amin", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "default": [] }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::amin.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "default": [] }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::aminmax", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "min", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ] + }, + { + "name": "aten::aminmax.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "min", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ] + }, + { + "name": "aten::angle", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::angle.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.all_out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.bool", + "inputs": [ + { "name": "self", "type": "boolean[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::any.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.dims", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.dims_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.float", + "inputs": [ + { "name": "self", "type": "float32[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::any.int", + "inputs": [ + { "name": "self", "type": "int64[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::any.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::any.str", + "inputs": [ + { "name": "self", "type": "string[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::arange", + "inputs": [ + { "name": "end", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arange.out", + "inputs": [ + { "name": "end", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arange.start", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arange.start_out", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "step", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arange.start_out_", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arange.start_step", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "step", "type": "Scalar", "default": 1 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arctan", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arctan.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::arctan_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argmax", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argmax.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argmin", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argmin.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argsort", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argsort.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::argsort.stable", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "stable", "type": "boolean" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_strided", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "storage_offset", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_strided_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "storage_offset", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_strided_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "storage_offset", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_strided_scatter", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "storage_offset", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_tensor", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor", "name": "aten" } + ] + }, + { + "name": "aten::as_tensor.bool", + "inputs": [ + { "name": "t", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_tensor.complex", + "inputs": [ + { "name": "t", "type": "complex" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_tensor.float", + "inputs": [ + { "name": "t", "type": "float32" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_tensor.int", + "inputs": [ + { "name": "t", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::as_tensor.list", + "inputs": [ + { "name": "data", "type": "t[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan2_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atan_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atanh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atanh.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::atanh_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::avg_pool1d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[1]" }, + { "name": "stride", "type": "int64[1]", "default": [] }, + { "name": "padding", "type": "int64[1]", "default": 0 }, + { "name": "ceil_mode", "type": "boolean", "default": false }, + { "name": "count_include_pad", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::avg_pool2d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]", "default": [] }, + { "name": "padding", "type": "int64[2]", "default": 0 }, + { "name": "ceil_mode", "type": "boolean", "default": false }, + { "name": "count_include_pad", "type": "boolean", "default": true }, + { "name": "divisor_override", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::avg_pool2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]", "default": [] }, + { "name": "padding", "type": "int64[2]", "default": 0 }, + { "name": "ceil_mode", "type": "boolean", "default": false }, + { "name": "count_include_pad", "type": "boolean", "default": true }, + { "name": "divisor_override", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::avg_pool3d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]", "default": [] }, + { "name": "padding", "type": "int64[3]", "default": 0 }, + { "name": "ceil_mode", "type": "boolean", "default": false }, + { "name": "count_include_pad", "type": "boolean", "default": true }, + { "name": "divisor_override", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::avg_pool3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]", "default": [] }, + { "name": "padding", "type": "int64[3]", "default": 0 }, + { "name": "ceil_mode", "type": "boolean", "default": false }, + { "name": "count_include_pad", "type": "boolean", "default": true }, + { "name": "divisor_override", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::baddbmm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "batch1", "type": "Tensor" }, + { "name": "batch2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::baddbmm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "batch1", "type": "Tensor" }, + { "name": "batch2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::baddbmm_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "batch1", "type": "Tensor" }, + { "name": "batch2", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::batch_norm", + "category": "Normalization", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "running_mean", "type": "Tensor", "optional": true }, + { "name": "running_var", "type": "Tensor", "optional": true }, + { "name": "training", "type": "boolean", "visible": false }, + { "name": "momentum", "type": "float32", "default": 0.1 }, + { "name": "eps", "type": "float32", "default": 1e-05 }, + { "name": "cudnn_enabled", "type": "boolean", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bernoulli", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bernoulli.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bernoulli.p", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 0.5 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bernoulli_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bernoulli_.float", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 0.5 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bin", + "inputs": [ + { "name": "i", "type": "int64" } + ], + "outputs": [ + { "type": "string" } + ] + }, + { + "name": "aten::binary_cross_entropy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::binary_cross_entropy.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::binary_cross_entropy_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::binary_cross_entropy_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::binary_cross_entropy_with_logits", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "pos_weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bincount", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "weights", "type": "Tensor", "optional": true, "default": null }, + { "name": "minlength", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::binomial", + "inputs": [ + { "name": "count", "type": "Tensor" }, + { "name": "prob", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and.Scalar_Tensor", + "inputs": [ + { "name": "self", "type": "Scalar" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_and_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_not", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_not.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bitwise_not_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::block_diag", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bmm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bmm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::broadcast_tensors", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::broadcast_to", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bucketize.Scalar", + "inputs": [ + { "name": "self", "type": "Scalar" }, + { "name": "boundaries", "type": "Tensor" }, + { "name": "out_int32", "type": "boolean", "default": false }, + { "name": "right", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bucketize.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "boundaries", "type": "Tensor" }, + { "name": "out_int32", "type": "boolean", "default": false }, + { "name": "right", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::bucketize.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "boundaries", "type": "Tensor" }, + { "name": "out_int32", "type": "boolean", "default": false }, + { "name": "right", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cartesian_prod", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cat", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cat.names", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cat.names_out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cat.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cauchy_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "median", "type": "float32", "default": 0 }, + { "name": "sigma", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cdist", + "inputs": [ + { "name": "x1", "type": "Tensor" }, + { "name": "x2", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 2 }, + { "name": "compute_mode", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ceil", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ceil.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ceil_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::celu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::channel_shuffle", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::chunk", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "chunks", "type": "int64" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::clamp", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_max_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "max", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clamp_min_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::classes._nnapi.Compilation", + "inputs": [ + { "name": "serialized_model", "type": "Tensor" }, + { "name": "inputs", "type": "Tensor[]" }, + { "name": "parameter_buffers", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::clip", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clip.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clip.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clip.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clip_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Scalar", "optional": true, "default": null }, + { "name": "max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clip_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min", "type": "Tensor", "optional": true, "default": null }, + { "name": "max", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::clone", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::coalesce", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::col2im", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "dilation", "type": "int64[2]" }, + { "name": "padding", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::col2im.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "dilation", "type": "int64[2]" }, + { "name": "padding", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::column_stack", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::column_stack.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::complex", + "inputs": [ + { "name": "real", "type": "Tensor" }, + { "name": "imag", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::complex.out", + "inputs": [ + { "name": "real", "type": "Tensor" }, + { "name": "imag", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concat", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concat.names", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concat.names_out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concat.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concatenate", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concatenate.names", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concatenate.names_out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::concatenate.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conj", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::constant_pad_nd", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "pad", "type": "SymInt[]" }, + { "name": "value", "type": "Scalar", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::contiguous", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "memory_format", "type": "MemoryFormat", "default": "contiguous_format" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv1d", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[1]", "default": 1 }, + { "name": "padding", "type": "SymInt[1]", "default": 0 }, + { "name": "dilation", "type": "SymInt[1]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv1d.padding", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[1]", "default": 1 }, + { "name": "padding", "type": "string", "default": "valid" }, + { "name": "dilation", "type": "SymInt[1]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv2d", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[2]", "default": 1 }, + { "name": "padding", "type": "SymInt[2]", "default": 0 }, + { "name": "dilation", "type": "SymInt[2]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv2d.padding", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[2]", "default": 1 }, + { "name": "padding", "type": "string", "default": "valid" }, + { "name": "dilation", "type": "SymInt[2]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv3d", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[3]", "default": 1 }, + { "name": "padding", "type": "SymInt[3]", "default": 0 }, + { "name": "dilation", "type": "SymInt[3]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv3d.padding", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[3]", "default": 1 }, + { "name": "padding", "type": "string", "default": "valid" }, + { "name": "dilation", "type": "SymInt[3]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv_transpose1d", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[1]", "default": 1 }, + { "name": "padding", "type": "SymInt[1]", "default": 0 }, + { "name": "output_padding", "type": "SymInt[1]", "default": 0 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "dilation", "type": "SymInt[1]", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv_transpose2d.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[2]", "default": 1 }, + { "name": "padding", "type": "SymInt[2]", "default": 0 }, + { "name": "output_padding", "type": "SymInt[2]", "default": 0 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "dilation", "type": "SymInt[2]", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::conv_transpose3d.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "stride", "type": "SymInt[3]", "default": 1 }, + { "name": "padding", "type": "SymInt[3]", "default": 0 }, + { "name": "output_padding", "type": "SymInt[3]", "default": 0 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "dilation", "type": "SymInt[3]", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::convolution", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "transposed", "type": "boolean" }, + { "name": "output_padding", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::convolution_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias_sizes", "type": "SymInt[]", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "transposed", "type": "boolean" }, + { "name": "output_padding", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" }, + { "name": "output_mask", "type": "boolean[3]" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::convolution_backward_overrideable", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "transposed", "type": "boolean" }, + { "name": "output_padding", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" }, + { "name": "output_mask", "type": "boolean[3]" } + ], + "outputs": [ + { "name": "grad_input", "type": "Tensor" }, + { "name": "grad_weight", "type": "Tensor" }, + { "name": "grad_bias", "type": "Tensor" } + ] + }, + { + "name": "aten::convolution_overrideable", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "transposed", "type": "boolean" }, + { "name": "output_padding", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::copy_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cos", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cos.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cosh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cosh.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::count_nonzero", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::count_nonzero.dim_IntList", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cpu", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cross", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cross.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cross_entropy_loss", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "ignore_index", "type": "SymInt", "default": -100 }, + { "name": "label_smoothing", "type": "float32", "default": 0.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ctc_loss.IntList", + "inputs": [ + { "name": "log_probs", "type": "Tensor" }, + { "name": "targets", "type": "Tensor" }, + { "name": "input_lengths", "type": "int64[]" }, + { "name": "target_lengths", "type": "int64[]" }, + { "name": "blank", "type": "int64", "default": 0 }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "zero_infinity", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ctc_loss.Tensor", + "inputs": [ + { "name": "log_probs", "type": "Tensor" }, + { "name": "targets", "type": "Tensor" }, + { "name": "input_lengths", "type": "Tensor" }, + { "name": "target_lengths", "type": "Tensor" }, + { "name": "blank", "type": "int64", "default": 0 }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "zero_infinity", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cudnn_convolution_add_relu", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "z", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cudnn_convolution_relu", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "padding", "type": "SymInt[]" }, + { "name": "dilation", "type": "SymInt[]" }, + { "name": "groups", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cummax", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::cummax.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::cummax.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::cummax.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::cummaxmin_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "input", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::cumsum_.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dequantize.any", + "inputs": [ + { "name": "tensors", "type": "Any" } + ], + "outputs": [ + { "type": "Any" } + ] + }, + { + "name": "aten::dequantize.list", + "inputs": [ + { "name": "qtensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::dequantize.self", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dequantize.tensor", + "inputs": [ + { "name": "qtensor", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dequantize.tensors", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::detach", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::detach_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::detach_copy", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diag", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diag.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diag_embed", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dim1", "type": "int64", "default": -2 }, + { "name": "dim2", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagflat", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "offset", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagonal", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dim1", "type": "int64", "default": 0 }, + { "name": "dim2", "type": "int64", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagonal.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "outdim", "type": "Dimname" }, + { "name": "dim1", "type": "Dimname" }, + { "name": "dim2", "type": "Dimname" }, + { "name": "offset", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagonal_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "input_sizes", "type": "SymInt[]" }, + { "name": "offset", "type": "int64" }, + { "name": "dim1", "type": "int64" }, + { "name": "dim2", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagonal_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dim1", "type": "int64", "default": 0 }, + { "name": "dim2", "type": "int64", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diagonal_scatter", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dim1", "type": "int64", "default": 0 }, + { "name": "dim2", "type": "int64", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diff", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n", "type": "int64", "default": 1 }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "prepend", "type": "Tensor", "optional": true, "default": null }, + { "name": "append", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::diff.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n", "type": "int64", "default": 1 }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "prepend", "type": "Tensor", "optional": true, "default": null }, + { "name": "append", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dist", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "default": 2 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.Scalar_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.Tensor_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div.out_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div_.Scalar_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::div_.Tensor_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.Scalar_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.Tensor_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide.out_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide_.Scalar_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divide_.Tensor_mode", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "rounding_mode", "type": "string", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::divmod.float", + "inputs": [ + { "name": "x", "type": "float32" }, + { "name": "y", "type": "float32" } + ], + "outputs": [ + { "type": "float32" }, + { "type": "float32" } + ] + }, + { + "name": "aten::divmod.int", + "inputs": [ + { "name": "x", "type": "int64" }, + { "name": "y", "type": "int64" } + ], + "outputs": [ + { "type": "int64" }, + { "type": "int64" } + ] + }, + { + "name": "aten::dot", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dot.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dropout", + "category": "Dropout", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 0.5 }, + { "name": "train", "type": "boolean", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::dropout_", + "category": "Dropout", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 0.5 }, + { "name": "train", "type": "boolean", "visible": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::einsum", + "inputs": [ + { "name": "equation", "type": "string" }, + { "name": "tensors", "type": "Tensor[]" }, + { "name": "path", "type": "int64[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::einsum.sublist", + "inputs": [ + { "name": "a", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::elu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 }, + { "name": "scale", "type": "Scalar", "default": 1 }, + { "name": "input_scale", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::elu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 }, + { "name": "scale", "type": "Scalar", "default": 1 }, + { "name": "input_scale", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::elu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 }, + { "name": "scale", "type": "Scalar", "default": 1 }, + { "name": "input_scale", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::embedding", + "category": "Transform", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "padding_idx", "type": "SymInt", "default": -1 }, + { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, + { "name": "sparse", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::embedding_bag", + "category": "Transform", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "offsets", "type": "Tensor" }, + { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "sparse", "type": "boolean", "default": false }, + { "name": "per_sample_weights", "type": "Tensor", "optional": true, "default": null }, + { "name": "include_last_offset", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "output1", "type": "Tensor" }, + { "name": "output2", "type": "Tensor" }, + { "name": "output3", "type": "Tensor" }, + { "name": "output4", "type": "Tensor" } + ] + }, + { + "name": "aten::embedding_bag.padding_idx", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "offsets", "type": "Tensor" }, + { "name": "scale_grad_by_freq", "type": "boolean" }, + { "name": "mode", "type": "int64" }, + { "name": "sparse", "type": "boolean" }, + { "name": "per_sample_weights", "type": "Tensor", "optional": true }, + { "name": "include_last_offset", "type": "boolean" }, + { "name": "padding_idx", "type": "int64", "optional": true } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::embedding_renorm_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "max_norm", "type": "float32" }, + { "name": "norm_type", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::empty.memory_format", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::empty.names", + "inputs": [ + { "name": "size", "type": "int64[]" }, + { "name": "names", "type": "Dimname[]", "optional": true, "default": null }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::empty.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::empty_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eq.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eq.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eq.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eq.Tensor_list", + "inputs": [ + { "name": "a", "type": "Tensor[]" }, + { "name": "b", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eq.bool", + "inputs": [ + { "name": "a", "type": "boolean" }, + { "name": "b", "type": "boolean" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.bool_list", + "inputs": [ + { "name": "a", "type": "boolean[]" }, + { "name": "b", "type": "boolean[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.device", + "inputs": [ + { "name": "a", "type": "Device" }, + { "name": "b", "type": "Device" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.enum", + "inputs": [ + { "name": "a", "type": "AnyEnumType" }, + { "name": "b", "type": "AnyEnumType" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.float_list", + "inputs": [ + { "name": "a", "type": "float32[]" }, + { "name": "b", "type": "float32[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.int_list", + "inputs": [ + { "name": "a", "type": "int64[]" }, + { "name": "b", "type": "int64[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::eq.str_list", + "inputs": [ + { "name": "a", "type": "string[]" }, + { "name": "b", "type": "string[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::equal", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::erf", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::erf.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::exp", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::exp.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::exp_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::expand", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "implicit", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::expand_as", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::expm1", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::expm1.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::expm1_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::exponential_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "lambd", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eye", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eye.m", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "m", "type": "SymInt" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eye.m_out", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "m", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::eye.out", + "inputs": [ + { "name": "n", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fake_quantize_per_channel_affine", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "axis", "type": "int64" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fake_quantize_per_tensor_affine", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fake_quantize_per_tensor_affine_cachemask", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" } + ], + "outputs": [ + { "name": "output", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" } + ] + }, + { + "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::feature_alpha_dropout", + "category": "Dropout", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::feature_alpha_dropout_", + "category": "Dropout", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::feature_dropout", + "category": "Dropout", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::feature_dropout_", + "category": "Dropout", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "train", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "signal_ndim", "type": "int64" }, + { "name": "normalized", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fft", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n", "type": "SymInt", "optional": true, "default": null }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fft.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n", "type": "SymInt", "optional": true, "default": null }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_fftshift", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_hfft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_hfft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_hfftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_hfftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ifft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ifft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ifftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ifftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ifftshift", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ihfft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ihfft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ihfftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_ihfftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_irfft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_irfft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_irfftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_irfftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_rfft2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_rfft2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_rfftn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fft_rfftn.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "s", "type": "SymInt[1]", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "norm", "type": "string", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fill_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fill_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::flatten.DimnameList", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dims", "type": "Dimname[]" }, + { "name": "out_dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::flatten.named_out_dim", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "start_dim", "type": "int64" }, + { "name": "end_dim", "type": "int64" }, + { "name": "out_dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::flatten.using_ints", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "start_dim", "type": "int64", "default": 0 }, + { "name": "end_dim", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::flatten.using_names", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "start_dim", "type": "Dimname" }, + { "name": "end_dim", "type": "Dimname" }, + { "name": "out_dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::flip", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dims", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_divide", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_divide.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_divide.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_divide_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::floor_divide_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fmod.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fmod.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fmod.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fmod.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::format", + "inputs": [ + { "name": "self", "type": "string" } + ], + "outputs": [ + { "type": "string" } + ] + }, + { + "name": "aten::frobenius_norm.dim", + "category": "Normalization", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::frobenius_norm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::full", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "fill_value", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::full.names", + "inputs": [ + { "name": "size", "type": "int64[]" }, + { "name": "fill_value", "type": "Scalar" }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::full.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "fill_value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::full_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "fill_value", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::fused_moving_avg_obs_fake_quant", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "observer_on", "type": "Tensor" }, + { "name": "fake_quant_on", "type": "Tensor" }, + { "name": "running_min", "type": "Tensor" }, + { "name": "running_max", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "averaging_const", "type": "float32" }, + { "name": "quant_min", "type": "int64" }, + { "name": "quant_max", "type": "int64" }, + { "name": "ch_axis", "type": "int64" }, + { "name": "per_row_fake_quant", "type": "boolean", "default": false }, + { "name": "symmetric_quant", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gather", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "sparse_grad", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gather.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "sparse_grad", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gather.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "sparse_grad", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gather.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "sparse_grad", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gather_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "sparse_grad", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gcd", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gcd.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gcd_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ge_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gelu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "approximate", "type": "string", "default": "none" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gelu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "approximate", "type": "string", "default": "none" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gelu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "approximate", "type": "string", "default": "none" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gelu_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "approximate", "type": "string", "default": "none" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gelu_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "approximate", "type": "string", "default": "none" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::geometric_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::geqrf", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "name": "a", "type": "Tensor" }, + { "name": "tau", "type": "Tensor" } + ] + }, + { + "name": "aten::geqrf.a", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "name": "a", "type": "Tensor" }, + { "name": "tau", "type": "Tensor" } + ] + }, + { + "name": "aten::ger", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "vec2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ger.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "vec2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::get_device", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::glu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::glu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::greater_equal_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::grid_sampler", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "grid", "type": "Tensor" }, + { "name": "interpolation_mode", "type": "int64" }, + { "name": "padding_mode", "type": "int64" }, + { "name": "align_corners", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::group_norm", + "category": "Normalization", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "num_groups", "type": "int64" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "eps", "type": "float32", "default": 1e-05 }, + { "name": "cudnn_enabled", "type": "boolean", "visible": true, "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gru.data", + "category": "Layer", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::gru.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "batch_first", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::gt.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gt.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gt.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::gt.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hamming_window", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hamming_window.periodic", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "periodic", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hamming_window.periodic_alpha", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "periodic", "type": "boolean" }, + { "name": "alpha", "type": "float32" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hamming_window.periodic_alpha_beta", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "periodic", "type": "boolean" }, + { "name": "alpha", "type": "float32" }, + { "name": "beta", "type": "float32" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hann_window", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hann_window.periodic", + "inputs": [ + { "name": "window_length", "type": "int64" }, + { "name": "periodic", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardsigmoid", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardsigmoid.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardsigmoid_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardswish", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardswish.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardswish_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardswish_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardtanh", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min_val", "type": "Scalar", "default": -1 }, + { "name": "max_val", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardtanh.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min_val", "type": "Scalar", "default": -1 }, + { "name": "max_val", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardtanh_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "min_val", "type": "Scalar", "default": -1 }, + { "name": "max_val", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardtanh_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "min_val", "type": "Scalar" }, + { "name": "max_val", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hardtanh_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "min_val", "type": "Scalar" }, + { "name": "max_val", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hstack", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::hstack.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::huber_loss", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "delta", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::huber_loss.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "delta", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::huber_loss_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" }, + { "name": "delta", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::huber_loss_backward.out", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" }, + { "name": "delta", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::im2col", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "dilation", "type": "int64[2]" }, + { "name": "padding", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::im2col.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "dilation", "type": "int64[2]" }, + { "name": "padding", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::imag", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index.Tensor_hacked_twin", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index.str", + "inputs": [ + { "name": "self", "type": "string" }, + { "name": "substr", "type": "string" }, + { "name": "start", "type": "int64", "default": 0 }, + { "name": "end", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::index_add", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_add.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_add.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_add_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_copy.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_copy.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_copy_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_copy_.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill.Dimname_Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill.Dimname_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill.int_Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill.int_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill_.Dimname_Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill_.Dimname_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill_.int_Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_fill_.int_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_put", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]", "optional": true }, + { "name": "values", "type": "Tensor" }, + { "name": "accumulate", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_put.hacked_twin", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]" }, + { "name": "values", "type": "Tensor" }, + { "name": "accumulate", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_put_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]", "optional": true }, + { "name": "values", "type": "Tensor" }, + { "name": "accumulate", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_put_.hacked_twin", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor[]" }, + { "name": "values", "type": "Tensor" }, + { "name": "accumulate", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_reduce", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_reduce.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_reduce_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "source", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_select", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_select.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_select.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_select.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::index_select_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "self_sizes", "type": "SymInt[]" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::instance_norm", + "category": "Normalization", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "running_mean", "type": "Tensor", "optional": true }, + { "name": "running_var", "type": "Tensor", "optional": true }, + { "name": "use_input_stats", "type": "boolean" }, + { "name": "momentum", "type": "float32" }, + { "name": "eps", "type": "float32" }, + { "name": "cudnn_enabled", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::int_repr", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::inverse", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::inverse.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::is_contiguous", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::is_contiguous.memory_format", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "memory_format", "type": "MemoryFormat" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::is_floating_point", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::isfinite", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::isinf", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::isnan", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::istft", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n_fft", "type": "int64" }, + { "name": "hop_length", "type": "int64", "optional": true, "default": null }, + { "name": "win_length", "type": "int64", "optional": true, "default": null }, + { "name": "window", "type": "Tensor", "optional": true, "default": null }, + { "name": "center", "type": "boolean", "default": true }, + { "name": "normalized", "type": "boolean", "default": false }, + { "name": "onesided", "type": "boolean", "optional": true, "default": null }, + { "name": "length", "type": "int64", "optional": true, "default": null }, + { "name": "return_complex", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::item", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Scalar" } + ] + }, + { + "name": "aten::join", + "inputs": [ + { "name": "self", "type": "string" }, + { "name": "values", "type": "string[]" } + ], + "outputs": [ + { "type": "string" } + ] + }, + { + "name": "aten::kthvalue", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "int64" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::kthvalue.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "int64" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::kthvalue.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "int64" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::kthvalue.values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "int64" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::l1_loss", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::layer_norm", + "category": "Normalization", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "normalized_shape", "type": "SymInt[]" }, + { "name": "weight", "type": "Tensor", "optional": true, "default": null }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null }, + { "name": "eps", "type": "float32", "default": 1e-05 }, + { "name": "cudnn_enable", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::le.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::le.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::le.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::le.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::leaky_relu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "negative_slope", "type": "Scalar", "default": 0.01 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::leaky_relu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "negative_slope", "type": "Scalar", "default": 0.01 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::leaky_relu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "negative_slope", "type": "Scalar", "default": 0.01 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::lift_fresh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_cross", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_cross.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_inv", + "inputs": [ + { "name": "A", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_inv.out", + "inputs": [ + { "name": "A", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_inv_ex", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "check_errors", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "inverse", "type": "Tensor" }, + { "name": "info", "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_inv_ex.inverse", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "check_errors", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "inverse", "type": "Tensor" }, + { "name": "info", "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_norm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "Scalar", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_norm.ord_str", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "string" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_norm.ord_str_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "string" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_norm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "Scalar", "optional": true, "default": null }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "left", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve.out", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "left", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve_ex", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "left", "type": "boolean", "default": true }, + { "name": "check_errors", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "result", "type": "Tensor" }, + { "name": "info", "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve_ex.out", + "inputs": [ + { "name": "A", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "left", "type": "boolean", "default": true }, + { "name": "check_errors", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "result", "type": "Tensor" }, + { "name": "info", "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve_triangular", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "upper", "type": "boolean" }, + { "name": "left", "type": "boolean", "default": true }, + { "name": "unitriangular", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_solve_triangular.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "B", "type": "Tensor" }, + { "name": "upper", "type": "boolean" }, + { "name": "left", "type": "boolean", "default": true }, + { "name": "unitriangular", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_tensorinv", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ind", "type": "int64", "default": 2 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_tensorinv.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ind", "type": "int64", "default": 2 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_tensorsolve", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dims", "type": "int64[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_tensorsolve.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dims", "type": "int64[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_vector_norm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "Scalar", "default": 2 }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linalg_vector_norm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "ord", "type": "Scalar", "default": 2 }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linear", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linear.out", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linear_backward", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "grad_output", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "output_mask", "type": "boolean[3]" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64", "default": null }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Scalar_Tensor", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Scalar_Tensor_out", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Tensor_Scalar", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Tensor_Scalar_out", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Tensor_Tensor", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.Tensor_Tensor_out", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::linspace.out", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log10", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log10.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log10_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log1p", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log1p.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log1p_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log2", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log2.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log2_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_normal_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mean", "type": "float32", "default": 1 }, + { "name": "std", "type": "float32", "default": 2 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "buffer", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "buffer", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid_forward", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "name": "output", "type": "Tensor" }, + { "name": "buffer", "type": "Tensor" } + ] + }, + { + "name": "aten::log_sigmoid_forward.output", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_softmax.Dimname", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_softmax.int", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::log_softmax.int_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logaddexp", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logaddexp.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logaddexp2", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logaddexp2.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logcumsumexp", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logcumsumexp.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logcumsumexp.dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logcumsumexp.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logdet", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_and", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_and.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_and_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_not", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_not.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_not_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_or", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_or.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_or_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_xor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_xor.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logical_xor_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logit", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "eps", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logit.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "eps", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logit_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "eps", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logit_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "eps", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logit_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "eps", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Scalar_Tensor", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Scalar_Tensor_out", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Tensor_Scalar", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Tensor_Scalar_out", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Tensor_Tensor", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.Tensor_Tensor_out", + "inputs": [ + { "name": "start", "type": "Tensor" }, + { "name": "end", "type": "Tensor" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logspace.out", + "inputs": [ + { "name": "start", "type": "Scalar" }, + { "name": "end", "type": "Scalar" }, + { "name": "steps", "type": "int64" }, + { "name": "base", "type": "float32", "default": 10.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logsumexp", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logsumexp.names", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logsumexp.names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::logsumexp.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::lstm.data", + "category": "Layer", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::lstm.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "batch_first", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::lstm_cell", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "w_ih", "type": "Tensor" }, + { "name": "w_hh", "type": "Tensor" }, + { "name": "b_ih", "type": "Tensor", "optional": true, "default": null }, + { "name": "b_hh", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::lt.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::lt.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::lt.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::lt.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_fill.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_fill.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_fill_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_fill_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" }, + { "name": "value", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_scatter_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" }, + { "name": "source", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_select", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::masked_select.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mask", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::matmul", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::matmul.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max.dim_max", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max.names_dim_max", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max.other", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max.unary_out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool1d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[1]" }, + { "name": "stride", "type": "int64[1]", "default": [] }, + { "name": "padding", "type": "int64[1]", "default": 0 }, + { "name": "dilation", "type": "int64[1]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool1d_with_indices", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[1]" }, + { "name": "stride", "type": "int64[1]", "default": [] }, + { "name": "padding", "type": "int64[1]", "default": 0 }, + { "name": "dilation", "type": "int64[1]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool2d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]", "default": [] }, + { "name": "padding", "type": "int64[2]", "default": 0 }, + { "name": "dilation", "type": "int64[2]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool2d_with_indices", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]", "default": [] }, + { "name": "padding", "type": "int64[2]", "default": 0 }, + { "name": "dilation", "type": "int64[2]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool2d_with_indices.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[2]" }, + { "name": "stride", "type": "int64[2]", "default": [] }, + { "name": "padding", "type": "int64[2]", "default": 0 }, + { "name": "dilation", "type": "int64[2]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool3d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]", "default": [] }, + { "name": "padding", "type": "int64[3]", "default": 0 }, + { "name": "dilation", "type": "int64[3]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool3d_with_indices", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]", "default": [] }, + { "name": "padding", "type": "int64[3]", "default": 0 }, + { "name": "dilation", "type": "int64[3]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool3d_with_indices.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]", "default": [] }, + { "name": "padding", "type": "int64[3]", "default": 0 }, + { "name": "dilation", "type": "int64[3]", "default": 1 }, + { "name": "ceil_mode", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool3d_with_indices_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]" }, + { "name": "padding", "type": "int64[3]" }, + { "name": "dilation", "type": "int64[3]" }, + { "name": "ceil_mode", "type": "boolean" }, + { "name": "indices", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_pool3d_with_indices_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "kernel_size", "type": "int64[3]" }, + { "name": "stride", "type": "int64[3]" }, + { "name": "padding", "type": "int64[3]" }, + { "name": "dilation", "type": "int64[3]" }, + { "name": "ceil_mode", "type": "boolean" }, + { "name": "indices", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_unpool2d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_unpool2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_unpool3d", + "category": "Pool", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "stride", "type": "int64[3]" }, + { "name": "padding", "type": "int64[3]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::max_unpool3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "stride", "type": "int64[3]" }, + { "name": "padding", "type": "int64[3]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::maximum", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::maximum.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean.dtype_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean.names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mean.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::median", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::median.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::median.dim_values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::median.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::median.names_dim_values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::meshgrid", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::meshgrid.indexing", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "indexing", "type": "string" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::min", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::min.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::min.dim_min", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::min.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::min.names_dim_min", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::min.other", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::min.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::min.unary_out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::minimum", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::minimum.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mish", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mish.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mish_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mkldnn_reorder_conv2d_weight", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[2]", "default": 0 }, + { "name": "stride", "type": "SymInt[2]", "default": 1 }, + { "name": "dilation", "type": "SymInt[2]", "default": 1 }, + { "name": "groups", "type": "SymInt", "default": 1 }, + { "name": "input_size", "type": "SymInt[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mat2", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::moveaxis.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "source", "type": "int64" }, + { "name": "destination", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::moveaxis.intlist", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "source", "type": "int64[]" }, + { "name": "destination", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::movedim.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "source", "type": "int64" }, + { "name": "destination", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::movedim.intlist", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "source", "type": "int64[]" }, + { "name": "destination", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mse_loss", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mse_loss.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mse_loss_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mse_loss_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul.ScalarT", + "inputs": [ + { "name": "input", "type": "Tensor[]" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::mul.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul.left_t", + "inputs": [ + { "name": "l", "type": "t[]" }, + { "name": "n", "type": "int64" } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::mul.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul.right_", + "inputs": [ + { "name": "n", "type": "int64" }, + { "name": "l", "type": "t[]" } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::mul_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mul_.t", + "inputs": [ + { "name": "l", "type": "t[]" }, + { "name": "n", "type": "int64" } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::multinomial", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "num_samples", "type": "int64" }, + { "name": "replacement", "type": "boolean", "default": false }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multinomial.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "num_samples", "type": "int64" }, + { "name": "replacement", "type": "boolean", "default": false }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multiply.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multiply.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multiply.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multiply_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::multiply_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mv", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "vec", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mv.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "vec", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mvlgamma", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mvlgamma.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::mvlgamma_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nan_to_num", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "nan", "type": "float32", "optional": true, "default": null }, + { "name": "posinf", "type": "float32", "optional": true, "default": null }, + { "name": "neginf", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nan_to_num.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "nan", "type": "float32", "optional": true, "default": null }, + { "name": "posinf", "type": "float32", "optional": true, "default": null }, + { "name": "neginf", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nan_to_num_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "nan", "type": "float32", "optional": true, "default": null }, + { "name": "posinf", "type": "float32", "optional": true, "default": null }, + { "name": "neginf", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::narrow", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "start", "type": "SymInt" }, + { "name": "length", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::narrow.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "start", "type": "Tensor" }, + { "name": "length", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::narrow_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "start", "type": "SymInt" }, + { "name": "length", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::narrow_copy.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "start", "type": "SymInt" }, + { "name": "length", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ne.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ne.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ne.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ne.Tensor_list", + "inputs": [ + { "name": "a", "type": "Tensor[]" }, + { "name": "b", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ne.bool", + "inputs": [ + { "name": "a", "type": "boolean" }, + { "name": "b", "type": "boolean" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.bool_list", + "inputs": [ + { "name": "a", "type": "boolean[]" }, + { "name": "b", "type": "boolean[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.device", + "inputs": [ + { "name": "a", "type": "Device" }, + { "name": "b", "type": "Device" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.enum", + "inputs": [ + { "name": "a", "type": "AnyEnumType" }, + { "name": "b", "type": "AnyEnumType" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.float_list", + "inputs": [ + { "name": "a", "type": "float32[]" }, + { "name": "b", "type": "float32[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.int_list", + "inputs": [ + { "name": "a", "type": "int64[]" }, + { "name": "b", "type": "int64[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::ne.str_list", + "inputs": [ + { "name": "a", "type": "string[]" }, + { "name": "b", "type": "string[]" } + ], + "outputs": [ + { "type": "boolean" } + ] + }, + { + "name": "aten::neg", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::neg.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::new_empty", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::new_empty_strided", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "stride", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::new_full", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "fill_value", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::new_ones", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::new_zeros", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nonzero", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nonzero.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::nonzero_numpy", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::norm.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "default": 2 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.ScalarOpt_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.ScalarOpt_dim_dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.ScalarOpt_dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.dtype_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.names_ScalarOpt_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.names_ScalarOpt_dim_dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.names_dtype_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::norm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar", "optional": true }, + { "name": "dim", "type": "int64[1]" }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.Tensor_Tensor", + "inputs": [ + { "name": "mean", "type": "Tensor" }, + { "name": "std", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.Tensor_Tensor_out", + "inputs": [ + { "name": "mean", "type": "Tensor" }, + { "name": "std", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.Tensor_float", + "inputs": [ + { "name": "mean", "type": "Tensor" }, + { "name": "std", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.Tensor_float_out", + "inputs": [ + { "name": "mean", "type": "Tensor" }, + { "name": "std", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.float_Tensor", + "inputs": [ + { "name": "mean", "type": "float32" }, + { "name": "std", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.float_Tensor_out", + "inputs": [ + { "name": "mean", "type": "float32" }, + { "name": "std", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.float_float", + "inputs": [ + { "name": "mean", "type": "float32", "default": 0 }, + { "name": "std", "type": "float32" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal.float_float_out", + "inputs": [ + { "name": "mean", "type": "float32" }, + { "name": "std", "type": "float32" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::normal_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "mean", "type": "float32", "default": 0 }, + { "name": "std", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::numel", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::numpy_T", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::numpy_T.a", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::one_hot", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "num_classes", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ones", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ones.names", + "inputs": [ + { "name": "size", "type": "int64[]" }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ones.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::ones_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pad", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "pad", "type": "SymInt[]" }, + { "name": "mode", "type": "string", "default": "constant" }, + { "name": "value", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pad_sequence", + "inputs": [ + { "name": "sequences", "type": "Tensor[]" }, + { "name": "batch_first", "type": "boolean", "default": false }, + { "name": "padding_value", "type": "float32", "default": 0.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pairwise_distance", + "inputs": [ + { "name": "x1", "type": "Tensor" }, + { "name": "x2", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 2 }, + { "name": "eps", "type": "float32", "default": 1e-06 }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pdist", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "float32", "default": 2 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::permute", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dims", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pin_memory", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "device", "type": "Device", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pinverse", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "rcond", "type": "float32", "default": 1e-15 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pixel_shuffle", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "upscale_factor", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pixel_unshuffle", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "downscale_factor", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pop.t", + "inputs": [ + { "name": "self", "type": "t[]" }, + { "name": "idx", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "t" } + ] + }, + { + "name": "aten::pow.Scalar", + "inputs": [ + { "name": "self", "type": "Scalar" }, + { "name": "exponent", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.Scalar_out", + "inputs": [ + { "name": "self", "type": "Scalar" }, + { "name": "exponent", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.Tensor_Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.Tensor_Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.Tensor_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.Tensor_Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow.int_to_int", + "inputs": [ + { "name": "a", "type": "int64" }, + { "name": "b", "type": "int64" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::pow_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::pow_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "exponent", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prelu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prod", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prod.Dimname_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prod.dim_Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prod.dim_int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::prod.int_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantile", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "q", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "interpolation", "type": "string", "default": "linear" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantile.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "q", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "interpolation", "type": "string", "default": "linear" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantile.scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "q", "type": "float32" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "interpolation", "type": "string", "default": "linear" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantile.scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "q", "type": "float32" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "interpolation", "type": "string", "default": "linear" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantize_per_channel", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scales", "type": "Tensor" }, + { "name": "zero_points", "type": "Tensor" }, + { "name": "axis", "type": "int64" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantize_per_tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantize_per_tensor.tensor_qparams", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "scale", "type": "Tensor" }, + { "name": "zero_point", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantize_per_tensor.tensors", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "scales", "type": "Tensor" }, + { "name": "zero_points", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "aten::quantize_per_tensor_dynamic", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" }, + { "name": "reduce_range", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantized_lstm", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "batch_first", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "use_dynamic", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantized_lstm.data", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "use_dynamic", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::quantized_lstm_cell", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor[]" }, + { "name": "w_ih", "type": "Tensor" }, + { "name": "w_hh", "type": "Tensor" }, + { "name": "b_ih", "type": "Tensor" }, + { "name": "b_hh", "type": "Tensor" }, + { "name": "packed_ih", "type": "Tensor" }, + { "name": "packed_hh", "type": "Tensor" }, + { "name": "col_offsets_ih", "type": "Tensor" }, + { "name": "col_offsets_hh", "type": "Tensor" }, + { "name": "scale_ih", "type": "Scalar" }, + { "name": "scale_hh", "type": "Scalar" }, + { "name": "zero_point_ih", "type": "Scalar" }, + { "name": "zero_point_hh", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand.generator", + "inputs": [ + { "name": "size", "type": "SymInt[]", "default": null }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand.generator_out", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand.generator_with_names", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand.names", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rand_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint", + "inputs": [ + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.generator", + "inputs": [ + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.generator_out", + "inputs": [ + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.low", + "inputs": [ + { "name": "low", "type": "SymInt", "default": null }, + { "name": "high", "type": "SymInt", "default": null }, + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.low_generator", + "inputs": [ + { "name": "low", "type": "SymInt", "default": null }, + { "name": "high", "type": "SymInt", "default": null }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.low_generator_out", + "inputs": [ + { "name": "low", "type": "SymInt" }, + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.low_out", + "inputs": [ + { "name": "low", "type": "SymInt" }, + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint.out", + "inputs": [ + { "name": "high", "type": "SymInt" }, + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "high", "type": "SymInt" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randint_like.low_dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "low", "type": "SymInt" }, + { "name": "high", "type": "SymInt" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn.generator", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn.generator_out", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn.generator_with_names", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn.names", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randn_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::random_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::random_.from", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "from", "type": "int64" }, + { "name": "to", "type": "int64", "optional": true }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::random_.to", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "to", "type": "int64" }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randperm", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randperm.generator", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "generator", "type": "Generator", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randperm.generator_out", + "inputs": [ + { "name": "n", "type": "SymInt" }, + { "name": "generator", "type": "Generator", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::randperm.out", + "inputs": [ + { "name": "n", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::real", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reciprocal", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reciprocal.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad1d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad1d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad2d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[4]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[4]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad3d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[6]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reflection_pad3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[6]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::relu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::relu6", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::relu6_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::relu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder.Scalar_Tensor", + "inputs": [ + { "name": "self", "type": "Scalar" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder.Scalar_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::remainder_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::renorm", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar" }, + { "name": "dim", "type": "int64" }, + { "name": "maxnorm", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::renorm.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "p", "type": "Scalar" }, + { "name": "dim", "type": "int64" }, + { "name": "maxnorm", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::repeat", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "repeats", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::repeat_interleave.Tensor", + "inputs": [ + { "name": "repeats", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::repeat_interleave.self_Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "repeats", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "output_size", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::repeat_interleave.self_int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "repeats", "type": "SymInt" }, + { "name": "dim", "type": "int64", "optional": true, "default": null }, + { "name": "output_size", "type": "SymInt", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad1d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad1d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[2]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad2d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[4]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[4]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad3d", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[6]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::replication_pad3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "SymInt[6]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::requires_grad_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "requires_grad", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reshape", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "shape", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reshape_as", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::resize_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::resolve_conj", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::resolve_neg", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::reverse.t", + "inputs": [ + { "name": "self", "type": "t[]" } + ], + "outputs": [] + }, + { + "name": "aten::rnn_relu.data", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::rnn_relu.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "batch_first", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::rnn_tanh.data", + "inputs": [ + { "name": "data", "type": "Tensor" }, + { "name": "batch_sizes", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::rnn_tanh.input", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "params", "type": "Tensor[]" }, + { "name": "has_biases", "type": "boolean" }, + { "name": "num_layers", "type": "int64" }, + { "name": "dropout", "type": "float32" }, + { "name": "train", "type": "boolean" }, + { "name": "bidirectional", "type": "boolean" }, + { "name": "batch_first", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::rnn_tanh_cell", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "hx", "type": "Tensor" }, + { "name": "w_ih", "type": "Tensor" }, + { "name": "w_hh", "type": "Tensor" }, + { "name": "b_ih", "type": "Tensor", "optional": true, "default": null }, + { "name": "b_hh", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::roll", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "shifts", "type": "SymInt[1]" }, + { "name": "dims", "type": "int64[1]", "default": [] } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rot90", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "int64", "default": 1 }, + { "name": "dims", "type": "int64[]", "default": [ 0, 1 ] } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round.decimals", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "decimals", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round.decimals_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "decimals", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::round_.decimals", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "decimals", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rsqrt", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rsqrt.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rsub.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::rsub.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scalar_tensor", + "inputs": [ + { "name": "s", "type": "Scalar" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scaled_dot_product_attention", + "inputs": [ + { "name": "query", "type": "Tensor" }, + { "name": "key", "type": "Tensor" }, + { "name": "value", "type": "Tensor" }, + { "name": "attn_mask", "type": "Tensor", "optional": true, "default": null }, + { "name": "dropout_p", "type": "float32", "default": 0.0 }, + { "name": "is_causal", "type": "boolean", "default": false }, + { "name": "scale", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.dimname_src", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.dimname_value", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.reduce", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.reduce_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.src", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.src_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.value", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.value_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.value_reduce", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter.value_reduce_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_.reduce", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_.src", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_.value", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_.value_reduce", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "value", "type": "Scalar" }, + { "name": "reduce", "type": "string" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_add", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_add.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_add.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_add_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_reduce.two", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_reduce.two_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::scatter_reduce_.two", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "reduce", "type": "string" }, + { "name": "include_self", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::select.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "index", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::select.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::select.t", + "inputs": [ + { "name": "list", "type": "t[]" }, + { "name": "idx", "type": "int64" } + ], + "outputs": [ + { "type": "t" } + ] + }, + { + "name": "aten::select_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "input_sizes", "type": "SymInt[]" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::select_copy.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::select_scatter", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "src", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "index", "type": "SymInt" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::selu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::selu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sigmoid", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sigmoid.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sigmoid_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sign", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sign.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sign_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::signbit", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::signbit.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::silu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::silu.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::silu_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::silu_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::silu_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sin", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sin.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sinh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sinh.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::size", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "aten::size.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::size.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::slice.Tensor", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 }, + { "name": "start", "type": "SymInt", "optional": true, "default": null }, + { "name": "end", "type": "SymInt", "optional": true, "default": null }, + { "name": "step", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::slice.str", + "inputs": [ + { "name": "string", "type": "string" }, + { "name": "start", "type": "int64", "optional": true, "default": null }, + { "name": "end", "type": "int64", "optional": true, "default": null }, + { "name": "step", "type": "int64", "default": 1 } + ], + "outputs": [ + { "type": "string" } + ] + }, + { + "name": "aten::slice.t", + "inputs": [ + { "name": "l", "type": "t[]" }, + { "name": "start", "type": "int64", "optional": true, "default": null }, + { "name": "end", "type": "int64", "optional": true, "default": null }, + { "name": "step", "type": "int64", "default": 1 } + ], + "outputs": [ + { "type": "t[]" } + ] + }, + { + "name": "aten::slice_copy.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 }, + { "name": "start", "type": "SymInt", "optional": true, "default": null }, + { "name": "end", "type": "SymInt", "optional": true, "default": null }, + { "name": "step", "type": "SymInt", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::smooth_l1_loss", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "beta", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::smooth_l1_loss.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64", "default": "Mean" }, + { "name": "beta", "type": "float32", "default": 1.0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::smooth_l1_loss_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" }, + { "name": "beta", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::smooth_l1_loss_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "target", "type": "Tensor" }, + { "name": "reduction", "type": "int64" }, + { "name": "beta", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softmax.Dimname", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softmax.int", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softmax.int_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softplus", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "threshold", "type": "Scalar", "default": 20 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softplus.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "beta", "type": "Scalar", "default": 1 }, + { "name": "threshold", "type": "Scalar", "default": 20 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softshrink", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "lambd", "type": "Scalar", "default": 0.5 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::softshrink.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "lambd", "type": "Scalar", "default": 0.5 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sort", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.any", + "inputs": [ + { "name": "self", "type": "t[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.bool", + "inputs": [ + { "name": "self", "type": "boolean[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.dimname_stable", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "stable", "type": "boolean", "optional": true }, + { "name": "dim", "type": "Dimname" }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.dimname_values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.dimname_values_stable", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "stable", "type": "boolean", "optional": true }, + { "name": "dim", "type": "Dimname" }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.float", + "inputs": [ + { "name": "self", "type": "float32[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.int", + "inputs": [ + { "name": "self", "type": "int64[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.stable", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "stable", "type": "boolean", "optional": true }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.str", + "inputs": [ + { "name": "self", "type": "string[]" }, + { "name": "reverse", "type": "boolean", "default": false } + ], + "outputs": [] + }, + { + "name": "aten::sort.values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::sort.values_stable", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "stable", "type": "boolean", "optional": true }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "descending", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::special_expit", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::special_expit.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::split", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_sizes", "type": "int64[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::split.Tensor", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_size", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "aten::split.sizes", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_size", "type": "SymInt[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "aten::split.str", + "inputs": [ + { "name": "self", "type": "string" }, + { "name": "separator", "type": "string", "optional": true, "default": null }, + { "name": "max", "type": "int64", "default": -1 } + ], + "outputs": [ + { "type": "string[]" } + ] + }, + { + "name": "aten::split_copy.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_size", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::split_copy.Tensor_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_size", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [] + }, + { + "name": "aten::split_with_sizes", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_sizes", "type": "SymInt[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "aten::split_with_sizes_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_sizes", "type": "SymInt[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::split_with_sizes_copy.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_sizes", "type": "SymInt[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [] + }, + { + "name": "aten::splitlines", + "inputs": [ + { "name": "self", "type": "string" }, + { "name": "keepends", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "string[]" } + ] + }, + { + "name": "aten::sqrt", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sqrt.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sqrt_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::square", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::square.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::square_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze.dim", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze.dimname", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze.dims", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze_", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze_.dim", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze_.dimname", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::squeeze_.dims", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::stack", + "category": "Tensor", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::stack.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "unbiased", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.correction", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.correction_names", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.correction_names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.correction_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::std_mean", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "unbiased", "type": "boolean", "default": true } + ], + "outputs": [ + { "name": "result1", "type": "Tensor" }, + { "name": "result2", "type": "Tensor" } + ] + }, + { + "name": "aten::std_mean.correction", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::std_mean.correction_names", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::std_mean.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "result1", "type": "Tensor" }, + { "name": "result2", "type": "Tensor" } + ] + }, + { + "name": "aten::std_mean.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::stft", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n_fft", "type": "int64" }, + { "name": "hop_length", "type": "int64", "optional": true, "default": null }, + { "name": "win_length", "type": "int64", "optional": true, "default": null }, + { "name": "window", "type": "Tensor", "optional": true, "default": null }, + { "name": "normalized", "type": "boolean", "default": false }, + { "name": "onesided", "type": "boolean", "optional": true, "default": null }, + { "name": "return_complex", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::stft.center", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "n_fft", "type": "int64" }, + { "name": "hop_length", "type": "int64", "optional": true, "default": null }, + { "name": "win_length", "type": "int64", "optional": true, "default": null }, + { "name": "window", "type": "Tensor", "optional": true, "default": null }, + { "name": "center", "type": "boolean", "default": true }, + { "name": "pad_mode", "type": "string", "default": "reflect" }, + { "name": "normalized", "type": "boolean", "default": false }, + { "name": "onesided", "type": "boolean", "optional": true, "default": null }, + { "name": "return_complex", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::stride", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "aten::stride.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::stride.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::sub.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sub.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sub.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sub_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sub_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum.DimnameList_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum.IntList_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum.bool", + "inputs": [ + { "name": "self", "type": "boolean[]" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::sum.complex", + "inputs": [ + { "name": "self", "type": "complex[]" } + ], + "outputs": [ + { "type": "complex" } + ] + }, + { + "name": "aten::sum.dim_DimnameList", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum.dim_IntList", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "keepdim", "type": "boolean", "default": false }, + { "name": "dtype", "type": "ScalarType", "default": null, "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::sum.float", + "inputs": [ + { "name": "self", "type": "float32[]" } + ], + "outputs": [ + { "type": "float32" } + ] + }, + { + "name": "aten::sum.int", + "inputs": [ + { "name": "self", "type": "int64[]" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "aten::swapaxes", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "axis0", "type": "int64" }, + { "name": "axis1", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::swapaxes_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "axis0", "type": "int64" }, + { "name": "axis1", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::take", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::take.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "index", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::take_along_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::take_along_dim.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tan", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tan.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tan_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tanh", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tanh.out", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tanh_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tensor", + "inputs": [ + { "name": "data", "type": "t[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "requires_grad", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tensor_split.indices", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "indices", "type": "SymInt[]" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::tensor_split.sections", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "sections", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::tensor_split.tensor_indices_or_sections", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "tensor_indices_or_sections", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::tensordot", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dims_self", "type": "int64[]" }, + { "name": "dims_other", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tensordot.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "dims_self", "type": "int64[]" }, + { "name": "dims_other", "type": "int64[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::threshold", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "threshold", "type": "Scalar" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::threshold.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "threshold", "type": "Scalar" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::threshold_", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "threshold", "type": "Scalar" }, + { "name": "value", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tile", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dims", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.device", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "device", "type": "Device" }, + { "name": "dtype", "type": "ScalarType" }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.dtype_layout", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.other", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.prim_Device", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "device", "type": "Device", "optional": true }, + { "name": "dtype", "type": "int64", "optional": true, "default": null }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.prim_dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "int64", "optional": true, "default": null }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to.prim_other", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "non_blocking", "type": "boolean", "default": false }, + { "name": "copy", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_dense", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "masked_grad", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_dense_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "input", "type": "Tensor" }, + { "name": "masked_grad", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_mkldnn", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_mkldnn_backward", + "inputs": [ + { "name": "grad", "type": "Tensor" }, + { "name": "input", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_padded_tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "padding", "type": "float32" }, + { "name": "output_size", "type": "SymInt[]", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "blocksize", "type": "int64[2]", "optional": true, "default": null }, + { "name": "dense_dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse.sparse_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "sparse_dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse_bsc", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "blocksize", "type": "int64[2]" }, + { "name": "dense_dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse_bsr", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "blocksize", "type": "int64[2]" }, + { "name": "dense_dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse_csc", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dense_dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::to_sparse_csr", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dense_dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::topk", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "largest", "type": "boolean", "default": true }, + { "name": "sorted", "type": "boolean", "default": true } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::topk.values", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "k", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": -1 }, + { "name": "largest", "type": "boolean", "default": true }, + { "name": "sorted", "type": "boolean", "default": true } + ], + "outputs": [ + { "name": "values", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" } + ] + }, + { + "name": "aten::transpose.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim0", "type": "Dimname" }, + { "name": "dim1", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::transpose.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim0", "type": "int64" }, + { "name": "dim1", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::transpose_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim0", "type": "int64" }, + { "name": "dim1", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::transpose_copy.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim0", "type": "int64" }, + { "name": "dim1", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tril", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tril.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tril_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::tril_indices", + "category": "Layer", + "inputs": [ + { "name": "row", "type": "int64" }, + { "name": "col", "type": "int64" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::triu", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::triu.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::triu_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "diagonal", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::triu_indices", + "inputs": [ + { "name": "row", "type": "int64" }, + { "name": "col", "type": "int64" }, + { "name": "offset", "type": "int64", "default": 0 }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": "long" }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::true_divide.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::true_divide.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::true_divide.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::true_divide_.Scalar", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::true_divide_.Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::type_as", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unbind.Dimname", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::unbind.int", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::unflatten.Dimname", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname" }, + { "name": "sizes", "type": "SymInt[]" }, + { "name": "names", "type": "Dimname[]", "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unflatten.int", + "category": "Shape", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "sizes", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unfold", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dimension", "type": "int64" }, + { "name": "size", "type": "int64" }, + { "name": "step", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::uniform_", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "from", "type": "float32", "default": 0 }, + { "name": "to", "type": "float32", "default": 1 }, + { "name": "generator", "type": "Generator", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unique_consecutive", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "return_inverse", "type": "boolean", "default": false }, + { "name": "return_counts", "type": "boolean", "default": false }, + { "name": "dim", "type": "int64", "optional": true, "default": null } + ], + "outputs": [ + { "name": "output1", "type": "Tensor" }, + { "name": "output2", "type": "Tensor" }, + { "name": "output3", "type": "Tensor" } + ] + }, + { + "name": "aten::unique_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "sorted", "type": "boolean", "default": true }, + { "name": "return_inverse", "type": "boolean", "default": false }, + { "name": "return_counts", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::unique_dim_consecutive", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "return_inverse", "type": "boolean", "default": false }, + { "name": "return_counts", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "output1", "type": "Tensor" }, + { "name": "output2", "type": "Tensor" }, + { "name": "output3", "type": "Tensor" } + ] + }, + { + "name": "aten::unsafe_chunk", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "chunks", "type": "int64" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::unsafe_split.Tensor", + "category": "Tensor", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "split_size", "type": "SymInt" }, + { "name": "dim", "type": "int64", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "type": "Tensor[]" } + ] + }, + { + "name": "aten::unsqueeze", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unsqueeze_", + "category": "Transform", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::unsqueeze_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bicubic2d", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bicubic2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bicubic2d.vec", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bilinear2d", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bilinear2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bilinear2d.vec", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bilinear2d_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "input_size", "type": "SymInt[4]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_bilinear2d_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "input_size", "type": "SymInt[4]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_linear1d", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[1]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_linear1d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[1]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_linear1d.vec", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest1d", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[1]" }, + { "name": "scales", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest1d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[1]" }, + { "name": "scales", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest1d.vec", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest2d", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest2d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest2d.vec", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest2d_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "input_size", "type": "SymInt[4]" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest2d_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[2]" }, + { "name": "input_size", "type": "SymInt[4]" }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest3d", + "category": "Layer", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_nearest3d.vec", + "category": "Layer", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_trilinear3d", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_trilinear3d.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_trilinear3d.vec", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[]", "optional": true }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scale_factors", "type": "float32[]", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_trilinear3d_backward", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "input_size", "type": "SymInt[5]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::upsample_trilinear3d_backward.grad_input", + "inputs": [ + { "name": "grad_output", "type": "Tensor" }, + { "name": "output_size", "type": "SymInt[3]" }, + { "name": "input_size", "type": "SymInt[5]" }, + { "name": "align_corners", "type": "boolean" }, + { "name": "scales_d", "type": "float32", "optional": true, "default": null }, + { "name": "scales_h", "type": "float32", "optional": true, "default": null }, + { "name": "scales_w", "type": "float32", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "unbiased", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.correction", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.correction_names", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "default": null, "optional": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.correction_names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.correction_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.names_out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::var_mean", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "unbiased", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::var_mean.correction", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true, "default": null }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::var_mean.correction_names", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "correction", "type": "Scalar", "optional": true, "default": null }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::var_mean.dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "int64[1]", "optional": true }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "aten::var_mean.names_dim", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dim", "type": "Dimname[1]" }, + { "name": "unbiased", "type": "boolean", "default": true }, + { "name": "keepdim", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" }, + { "type": "Tensor" } + ] + }, + { + "name": "aten::vdot", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::vdot.out", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view.dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_as", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_as_complex", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_as_complex_copy", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_as_real", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_as_real_copy", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_copy", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::view_copy.dtype", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::vstack", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::vstack.out", + "inputs": [ + { "name": "tensors", "type": "Tensor[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::wait", + "inputs": [ + { "name": "self", "type": "Future" } + ], + "outputs": [ + { "type": "t" } + ] + }, + { + "name": "aten::where", + "inputs": [ + { "name": "condition", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor[]" } + ] + }, + { + "name": "aten::where.Scalar", + "inputs": [ + { "name": "condition", "type": "Tensor" }, + { "name": "self", "type": "Scalar" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::where.ScalarOther", + "inputs": [ + { "name": "condition", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Scalar" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::where.ScalarSelf", + "inputs": [ + { "name": "condition", "type": "Tensor" }, + { "name": "self", "type": "Scalar" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::where.self", + "inputs": [ + { "name": "condition", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::where.self_out", + "inputs": [ + { "name": "condition", "type": "Tensor" }, + { "name": "self", "type": "Tensor" }, + { "name": "other", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::zero_", + "inputs": [ + { "name": "self", "type": "Tensor" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::zeros", + "inputs": [ + { "name": "size", "type": "SymInt[]" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::zeros.names", + "inputs": [ + { "name": "size", "type": "int64[]" }, + { "name": "names", "type": "Dimname[]", "optional": true }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::zeros.out", + "inputs": [ + { "name": "size", "type": "SymInt[]" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "aten::zeros_like", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, + { "name": "layout", "type": "Layout", "optional": true, "default": null }, + { "name": "device", "type": "Device", "optional": true, "default": null }, + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "memory_format", "type": "MemoryFormat", "optional": true, "default": null } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "neuron::forward_v2_1", + "inputs": [ + { "name": "input", "type": "Tensor[]" }, + { "name": "model" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "prepacked::conv2d_clamp_prepack", + "inputs": [ + { "name": "W", "type": "Tensor" }, + { "name": "B", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[2]" }, + { "name": "padding", "type": "int64[2]" }, + { "name": "dilation", "type": "int64[2]" }, + { "name": "groups", "type": "int64" }, + { "name": "output_min", "type": "Scalar", "optional": true, "default": null }, + { "name": "output_max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "__torch__.torch.classes.xnnpack.Conv2dOpContext" } + ] + }, + { + "name": "prepacked::conv2d_clamp_run", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.xnnpack.Conv2dOpContext" } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "prepacked::linear_clamp_prepack", + "inputs": [ + { "name": "W", "type": "Tensor" }, + { "name": "B", "type": "Tensor", "optional": true, "default": null }, + { "name": "output_min", "type": "Scalar", "optional": true, "default": null }, + { "name": "output_max", "type": "Scalar", "optional": true, "default": null } + ], + "outputs": [ + { "type": "__torch__.torch.classes.xnnpack.LinearOpContext" } + ] + }, + { + "name": "prepacked::linear_clamp_run", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.xnnpack.LinearOpContext" } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "quantized::add", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add.Scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add.Scalar2", + "inputs": [ + { "name": "b", "type": "Scalar" }, + { "name": "qa", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add.Scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add.out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu.Scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu.Scalar2", + "inputs": [ + { "name": "b", "type": "Scalar" }, + { "name": "qa", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu.Scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu.out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_relu_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_out.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_relu", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_relu.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_relu_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::add_scalar_relu_out.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm1d", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm1d_relu", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm2d", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm2d_relu", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm3d", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm3d_relu", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::batch_norm_relu", + "category": "Normalization", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "mean", "type": "Tensor" }, + { "name": "var", "type": "Tensor" }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::cat", + "category": "Tensor", + "inputs": [ + { "name": "qx", "type": "Tensor[]" }, + { "name": "dim", "type": "int64" }, + { "name": "scale", "type": "float32", "optional": true }, + { "name": "zero_point", "type": "int64", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::cat_relu", + "category": "Tensor", + "inputs": [ + { "name": "qx", "type": "Tensor[]" }, + { "name": "dim", "type": "int64" }, + { "name": "scale", "type": "float32", "optional": true }, + { "name": "zero_point", "type": "int64", "optional": true } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::celu", + "category": "Activation", + "inputs": [ + { "name": "self", "type": "Tensor" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" }, + { "name": "alpha", "type": "Scalar", "default": 1 } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv1d", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv1d_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv1d_relu", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d.new", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d_dilation", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv2d_dynamic", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "reduce_range", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d_groups", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "quantized::conv2d_output_padding", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv2d_padding", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv2d_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv2d_relu", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d_relu.new", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv2d_stride", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv2d_transpose", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "quantized::conv2d_unpack", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "name": "unpacked_weights", "type": "Tensor" }, + { "name": "B_origin", "type": "Tensor", "optional": true } + ] + }, + { + "name": "quantized::conv2d_unpack_sizes", + "inputs": [ + { "name": "packed_weights", "type": "Any" } + ], + "outputs": [ + { "type": "Any" } + ] + }, + { + "name": "quantized::conv3d", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]", "default": 1 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "groups", "type": "int64", "default": 1 }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv3d.new", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv3d_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv3d_relu", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]", "default": 1 }, + { "name": "dilation", "type": "int64[]", "default": 0 }, + { "name": "groups", "type": "int64", "default": 1 }, + { "name": "output_scale", "type": "float32", "default": 1 }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv3d_relu.new", + "category": "Layer", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv_transpose1d_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "output_padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv_transpose2d", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv_transpose2d_dilation", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv_transpose2d_dynamic", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, + { "name": "reduce_range", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::conv_transpose2d_groups", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "quantized::conv_transpose2d_output_padding", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv_transpose2d_padding", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv_transpose2d_prepack", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "stride", "type": "int64[]" }, + { "name": "padding", "type": "int64[]" }, + { "name": "output_padding", "type": "int64[]" }, + { "name": "dilation", "type": "int64[]" }, + { "name": "groups", "type": "int64" } + ], + "outputs": [ + { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ] + }, + { + "name": "quantized::conv_transpose2d_stride", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64[]" } + ] + }, + { + "name": "quantized::conv_transpose2d_transpose", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "type": "int64" } + ] + }, + { + "name": "quantized::conv_transpose2d_unpack", + "inputs": [ + { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } + ], + "outputs": [ + { "name": "unpacked_weights", "type": "Tensor" }, + { "name": "B_origin", "type": "Tensor", "optional": true } + ] + }, + { + "name": "quantized::embedding_bag_4bit_rowwise_offsets", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "offsets", "type": "Tensor", "optional": true, "default": null }, + { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "pruned_weights", "type": "boolean", "default": false }, + { "name": "per_sample_weights", "type": "Tensor", "optional": true, "default": null }, + { "name": "compressed_indices_mapping", "type": "Tensor", "optional": true, "default": null }, + { "name": "include_last_offset", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::embedding_bag_byte_rowwise_offsets", + "inputs": [ + { "name": "weight", "type": "Tensor" }, + { "name": "indices", "type": "Tensor" }, + { "name": "offsets", "type": "Tensor", "optional": true, "default": null }, + { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, + { "name": "mode", "type": "int64", "default": 0 }, + { "name": "pruned_weights", "type": "boolean", "default": false }, + { "name": "per_sample_weights", "type": "Tensor", "optional": true, "default": null }, + { "name": "compressed_indices_mapping", "type": "Tensor", "optional": true, "default": null }, + { "name": "include_last_offset", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::embedding_byte", + "inputs": [ + { "name": "weight", "type": "__torch__.torch.classes.quantized.EmbeddingPackedParamsBase" }, + { "name": "indices", "type": "Tensor" }, + { "name": "pruned_weights", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::hardswish", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::instance_norm", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::layer_norm", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "normalized_shape", "type": "int64[]" }, + { "name": "weight", "type": "Tensor", "optional": true }, + { "name": "bias", "type": "Tensor", "optional": true }, + { "name": "eps", "type": "float32" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::leaky_relu", + "category": "Activation", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "negative_slope", "type": "Scalar", "default": false }, + { "name": "inplace", "type": "boolean" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::linear", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, + { "name": "Y_scale_i", "type": "float32" }, + { "name": "Y_zero_point_i", "type": "int64" } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "quantized::linear_dynamic", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, + { "name": "reduce_range", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "quantized::linear_prepack_fp16", + "inputs": [ + { "name": "W", "type": "Tensor" }, + { "name": "B", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" } + ] + }, + { + "name": "quantized::linear_prepack_fp16_legacy", + "inputs": [ + { "name": "W", "type": "Tensor" }, + { "name": "B", "type": "Tensor", "optional": true, "default": null } + ], + "outputs": [ + { "name": "W_prepack", "type": "Tensor" } + ] + }, + { + "name": "quantized::linear_relu", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, + { "name": "Y_scale_i", "type": "float32" }, + { "name": "Y_zero_point_i", "type": "int64" } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "quantized::linear_relu_dynamic", + "category": "Layer", + "inputs": [ + { "name": "X", "type": "Tensor" }, + { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, + { "name": "reduce_range", "type": "boolean", "default": false } + ], + "outputs": [ + { "name": "Y", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul.Scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul.Scalar2", + "inputs": [ + { "name": "b", "type": "Scalar" }, + { "name": "qa", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul.Scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul.out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "scale", "type": "float32" }, + { "name": "zero_point", "type": "int64" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu.Scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu.Scalar2", + "inputs": [ + { "name": "b", "type": "Scalar" }, + { "name": "qa", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu.Scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu.out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_relu_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "qb", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_out.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_relu", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_relu.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" } + ], + "outputs": [ + { "name": "qc", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_relu_out", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Scalar" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::mul_scalar_relu_out.Tensor", + "inputs": [ + { "name": "qa", "type": "Tensor" }, + { "name": "b", "type": "Tensor" }, + { "name": "out", "type": "Tensor" } + ], + "outputs": [ + { "name": "out", "type": "Tensor" } + ] + }, + { + "name": "quantized::prelu", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "weight", "type": "Tensor" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::relu6", + "category": "Activation", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "inplace", "type": "boolean", "default": false } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::sigmoid", + "category": "Activation", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "quantized::softmax", + "inputs": [ + { "name": "qx", "type": "Tensor" }, + { "name": "dim", "type": "int64" }, + { "name": "output_scale", "type": "float32" }, + { "name": "output_zero_point", "type": "int64" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "torch.nn.modules.activation.ELU", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.GELU", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.GLU", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.Hardsigmoid", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.Hardswish", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.Hardtanh", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.LeakyReLU", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.LogSoftmax", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.PReLU", + "category": "Activation", + "inputs": [] + }, + { + "name": "torch.nn.modules.activation.ReLU", + "category": "Activation", + "inputs": [ + { "name": "inplace", "default": false, "visible": false }, + { "name": "threshold", "default": 0 }, + { "name": "value", "default": 0 } + ] + }, + { + "name": "torch.nn.modules.activation.ReLU6", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.SiLU", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.Sigmoid", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.Softmax", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.Softmax2d", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.Softplus", + "category": "Activation" + }, + { + "name": "torch.nn.modules.activation.Tanh", + "category": "Activation" + }, + { + "name": "torch.nn.modules.batchnorm.BatchNorm1d", + "category": "Normalization", + "inputs": [] + }, + { + "name": "torch.nn.modules.batchnorm.BatchNorm2d", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "running_mean" }, + { "name": "running_var" }, + { "name": "num_batches_tracked", "visible": false }, + { "name": "eps", "default": 1e-05 }, + { "name": "momentum", "default": 0.1 }, + { "name": "affine", "default": true }, + { "name": "track_running_stats", "default": true } + ] + }, + { + "name": "torch.nn.modules.conv.Conv1d", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "output_padding", "visible": false }, + { "name": "in_channels", "visible": false }, + { "name": "out_channels", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "transposed", "default": false }, + { "name": "padding", "default": [ 0 ] }, + { "name": "dilation", "default": [ 1 ] }, + { "name": "stride", "default": [ 1 ] } + ] + }, + { + "name": "torch.nn.modules.conv.Conv2d", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "output_padding", "visible": false }, + { "name": "in_channels", "visible": false }, + { "name": "out_channels", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "transposed", "default": false }, + { "name": "padding", "default": [ 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "stride", "default": [ 1, 1 ] } + ] + }, + { + "name": "torch.nn.modules.conv.Conv3d", + "category": "Layer" + }, + { + "name": "torch.nn.modules.conv.ConvTranspose1d", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "output_padding", "visible": false }, + { "name": "in_channels", "visible": false }, + { "name": "out_channels", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "transposed", "default": true }, + { "name": "padding", "default": [ 0 ] }, + { "name": "dilation", "default": [ 1 ] }, + { "name": "stride", "default": [ 1 ] } + ] + }, + { + "name": "torch.nn.modules.conv.ConvTranspose2d", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "output_padding", "visible": false }, + { "name": "in_channels", "visible": false }, + { "name": "out_channels", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "transposed", "default": true }, + { "name": "padding", "default": [ 0, 0 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "stride", "default": [ 1, 1 ] } + ] + }, + { + "name": "torch.nn.modules.conv.ConvTranspose3d", + "category": "Layer" + }, + { + "name": "torch.nn.modules.dropout.Dropout", + "category": "Dropout", + "inputs": [ + { "name": "inplace", "default": false, "visible": false }, + { "name": "p", "default": 0.5 } + ] + }, + { + "name": "torch.nn.modules.dropout.Dropout2d", + "category": "Dropout", + "inputs": [ + { "name": "inplace", "default": false, "visible": false }, + { "name": "p", "default": 0.5 } + ] + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm1d" + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm2d" + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm3d" + }, + { + "name": "torch.nn.modules.linear.Linear", + "category": "Layer", + "inputs": [] + }, + { + "name": "torch.nn.modules.normalization.CrossMapLRN2d", + "category": "Normalization", + "inputs": [ + { "name": "alpha", "default": 0.0001 }, + { "name": "beta", "default": 0.75 }, + { "name": "k", "default": 1 } + ] + }, + { + "name": "torch.nn.modules.normalization.GroupNorm", + "category": "Normalization" + }, + { + "name": "torch.nn.modules.normalization.LayerNorm", + "category": "Normalization" + }, + { + "name": "torch.nn.modules.padding.ConstantPad1d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ConstantPad2d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ConstantPad3d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ReflectionPad1d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ReflectionPad2d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ReplicationPad1d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ReplicationPad2d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ReplicationPad3d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.padding.ZeroPad2d", + "category": "Tensor" + }, + { + "name": "torch.nn.modules.pixelshuffle.PixelShuffle" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool1d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool2d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool3d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool1d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool2d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool3d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.AvgPool2d", + "category": "Pool", + "inputs": [ + { "name": "padding", "default": 0 }, + { "name": "count_include_pad", "default": true }, + { "name": "ceil_mode", "visible": false } + ] + }, + { + "name": "torch.nn.modules.pooling.AvgPool3d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.MaxPool1d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.MaxPool2d", + "category": "Pool", + "inputs": [ + { "name": "input" }, + { "name": "padding", "default": 0 }, + { "name": "dilation", "default": 1 }, + { "name": "return_indices", "default": false }, + { "name": "ceil_mode", "visible": false } + ] + }, + { + "name": "torch.nn.modules.pooling.MaxPool3d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool1d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool2d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool3d", + "category": "Pool" + }, + { + "name": "torch.nn.modules.rnn.GRU", + "category": "Layer" + }, + { + "name": "torch.nn.modules.rnn.GRUCell", + "category": "Layer" + }, + { + "name": "torch.nn.modules.rnn.LSTM", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weight_ih_l0", "visible": false }, + { "name": "weight_hh_l0", "visible": false }, + { "name": "bias_ih_l0", "visible": false }, + { "name": "bias_hh_l0", "visible": false }, + { "name": "weight_ih_l1", "visible": false }, + { "name": "weight_hh_l1", "visible": false }, + { "name": "bias_ih_l1", "visible": false }, + { "name": "bias_hh_l1", "visible": false }, + { "name": "dropout", "default": 0 }, + { "name": "dropout_state", "default": {} }, + { "name": "num_layers", "default": 1 }, + { "name": "batch_first", "visible": false }, + { "name": "bidirectional", "visible": false }, + { "name": "bias", "visible": false } + ] + }, + { + "name": "torch.nn.modules.rnn.LSTMCell", + "category": "Layer" + }, + { + "name": "torch.nn.modules.rnn.RNN", + "category": "Layer" + }, + { + "name": "torch.nn.modules.sparse.Embedding", + "category": "Transform", + "inputs": [ + { "name": "norm_type", "default": 2 }, + { "name": "scale_grad_by_freq", "default": false }, + { "name": "sparse", "default": false }, + { "name": "max_norm", "default": null }, + { "name": "padding_idx", "default": null } + ] + }, + { + "name": "torch.nn.modules.upsampling.Upsample", + "category": "Data" + }, + { + "name": "torchaudio::sox_effects_apply_effects_tensor", + "inputs": [ + { "name": "tensor", "type": "Tensor" }, + { "name": "sample_rate", "type": "int64" }, + { "name": "effects", "type": "string[][]" }, + { "name": "channels_first", "type": "boolean", "default": true } + ], + "outputs": [ + { "type": "Tensor" }, + { "name": "?", "type": "Tensor" } + ] + }, + { + "name": "torchvision::nms", + "inputs": [ + { "name": "dets", "type": "Tensor" }, + { "name": "scores", "type": "Tensor" }, + { "name": "iou_threshold", "type": "float32" } + ], + "outputs": [ + { "type": "Tensor" } + ] + }, + { + "name": "torchvision::roi_align", + "inputs": [ + { "name": "input", "type": "Tensor" }, + { "name": "rois", "type": "Tensor" }, + { "name": "spatial_scale", "type": "float32" }, + { "name": "pooled_height", "type": "int64" }, + { "name": "pooled_width", "type": "int64" }, + { "name": "sampling_ratio", "type": "int64" }, + { "name": "aligned", "type": "boolean" } + ], + "outputs": [ + { "type": "Tensor" } + ] + } +] \ No newline at end of file diff --git a/pytorch-schema.js b/pytorch-schema.js new file mode 100644 index 00000000000..bb5cd626355 --- /dev/null +++ b/pytorch-schema.js @@ -0,0 +1,761 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('torch'); + +$root.torch = $root.torch || {}; + +$root.torch.jit = $root.torch.jit || {}; + +$root.torch.jit.mobile = $root.torch.jit.mobile || {}; + +$root.torch.jit.mobile.serialization = $root.torch.jit.mobile.serialization || {}; + +$root.torch.jit.mobile.serialization.Int = class Int { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Int(); + $.int_val = reader.int64(position + 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Bool = class Bool { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Bool(); + $.bool_val = reader.bool(position + 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Double = class Double { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Double(); + $.double_val = reader.float64(position + 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.PerTensorAffineSchema = class PerTensorAffineSchema { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.PerTensorAffineSchema(); + $.q_scale = reader.float64(position + 0); + $.q_zero_point = reader.int32(position + 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.QuantizedSchema = class QuantizedSchema { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.QuantizedSchema(); + $.qscheme = reader.int8_(position, 4, 0); + $.scale = reader.float64_(position, 6, 0); + $.zero_point = reader.int32_(position, 8, 0); + $.scales = reader.table(position, 10, $root.torch.jit.mobile.serialization.TensorMetadata.decode); + $.zero_points = reader.table(position, 12, $root.torch.jit.mobile.serialization.TensorMetadata.decode); + $.axis = reader.int32_(position, 14, 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.TensorMetadata = class TensorMetadata { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.TensorMetadata(); + $.storage_location_index = reader.uint32_(position, 4, 0); + $.scalar_type = reader.int8_(position, 6, 0); + $.storage_offset = reader.int32_(position, 8, 0); + $.sizes = reader.typedArray(position, 10, Int32Array); + $.strides = reader.typedArray(position, 12, Int32Array); + $.requires_grad = reader.bool_(position, 14, false); + $.quantized_schema = reader.table(position, 16, $root.torch.jit.mobile.serialization.QuantizedSchema.decode); + return $; + } +}; + +$root.torch.jit.mobile.serialization.String = class String { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.String(); + $.data = reader.string_(position, 4, null); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Device = class Device { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Device(); + $.str = reader.string_(position, 4, null); + return $; + } +}; + +$root.torch.jit.mobile.serialization.List = class List { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.List(); + $.items = reader.typedArray(position, 4, Uint32Array); + $.annotation_str = reader.string_(position, 6, null); + return $; + } +}; + +$root.torch.jit.mobile.serialization.IntList = class IntList { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.IntList(); + $.items = reader.int64s_(position, 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.DoubleList = class DoubleList { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.DoubleList(); + $.items = reader.typedArray(position, 4, Float64Array); + return $; + } +}; + +$root.torch.jit.mobile.serialization.BoolList = class BoolList { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.BoolList(); + $.items = reader.bools_(position, 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Tuple = class Tuple { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Tuple(); + $.items = reader.typedArray(position, 4, Uint32Array); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Dict = class Dict { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Dict(); + $.keys = reader.typedArray(position, 4, Uint32Array); + $.values = reader.typedArray(position, 6, Uint32Array); + $.annotation_str = reader.string_(position, 8, null); + return $; + } +}; + +$root.torch.jit.mobile.serialization.TypeType = { + UNSET: 0, + CLASS_WITH_FIELD: 1, + CUSTOM_CLASS: 2, + CLASS_WITH_SETSTATE: 3, + NON_OBJ: 4 +}; + +$root.torch.jit.mobile.serialization.ObjectType = class ObjectType { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.ObjectType(); + $.type_name = reader.string_(position, 4, null); + $.type = reader.uint8_(position, 6, 0); + $.attr_names = reader.strings_(position, 8); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Object = class Object { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Object(); + $.type_index = reader.uint32_(position, 4, 0); + $.state = reader.uint32_(position, 6, 0); + $.attrs = reader.typedArray(position, 8, Uint32Array); + $.setstate_func = reader.uint32_(position, 10, 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.ComplexDouble = class ComplexDouble { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.ComplexDouble(); + $.real = reader.float64(position + 0); + $.imag = reader.float64(position + 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.EnumValue = class EnumValue { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.EnumValue(); + $.type_name = reader.string_(position, 4, null); + $.value = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Instruction = class Instruction { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Instruction(); + $.op = reader.int8(position + 0); + $.n = reader.uint16(position + 2); + $.x = reader.int32(position + 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Operator = class Operator { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Operator(); + $.name = reader.string_(position, 4, null); + $.overload_name = reader.string_(position, 6, null); + $.num_args_serialized = reader.int32_(position, 8, -1); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Arg = class Arg { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Arg(); + $.name = reader.string_(position, 4, null); + $.type = reader.string_(position, 6, null); + $.default_value = reader.uint32_(position, 8, 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Schema = class Schema { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Schema(); + $.arguments = reader.tableArray(position, 4, $root.torch.jit.mobile.serialization.Arg.decode); + $.returns = reader.tableArray(position, 6, $root.torch.jit.mobile.serialization.Arg.decode); + return $; + } +}; + +$root.torch.jit.mobile.serialization.DebugInfo = class DebugInfo { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.DebugInfo(); + $.debug_handle = reader.int64s_(position, 4); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Function = class Function { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Function(); + $.qn = reader.string_(position, 4, null); + $.instructions = reader.structArray(position, 6, $root.torch.jit.mobile.serialization.Instruction.decode); + $.operators = reader.tableArray(position, 8, $root.torch.jit.mobile.serialization.Operator.decode); + $.constants = reader.typedArray(position, 10, Uint32Array); + $.type_annotations = reader.strings_(position, 12); + $.register_size = reader.int32_(position, 14, 0); + $.schema = reader.table(position, 16, $root.torch.jit.mobile.serialization.Schema.decode); + $.debug_info = reader.table(position, 18, $root.torch.jit.mobile.serialization.DebugInfo.decode); + $.class_type = reader.uint32_(position, 20, 0); + return $; + } +}; + +$root.torch.jit.mobile.serialization.StorageData = class StorageData { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.StorageData(); + $.data = reader.typedArray(position, 4, Uint8Array); + return $; + } +}; + +$root.torch.jit.mobile.serialization.IValueUnion = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.torch.jit.mobile.serialization.Int.decode(reader, position); + case 2: return $root.torch.jit.mobile.serialization.Bool.decode(reader, position); + case 3: return $root.torch.jit.mobile.serialization.Double.decode(reader, position); + case 4: return $root.torch.jit.mobile.serialization.ComplexDouble.decode(reader, position); + case 5: return $root.torch.jit.mobile.serialization.TensorMetadata.decode(reader, position); + case 6: return $root.torch.jit.mobile.serialization.String.decode(reader, position); + case 7: return $root.torch.jit.mobile.serialization.List.decode(reader, position); + case 8: return $root.torch.jit.mobile.serialization.Tuple.decode(reader, position); + case 9: return $root.torch.jit.mobile.serialization.Dict.decode(reader, position); + case 10: return $root.torch.jit.mobile.serialization.Object.decode(reader, position); + case 11: return $root.torch.jit.mobile.serialization.IntList.decode(reader, position); + case 12: return $root.torch.jit.mobile.serialization.DoubleList.decode(reader, position); + case 13: return $root.torch.jit.mobile.serialization.BoolList.decode(reader, position); + case 14: return $root.torch.jit.mobile.serialization.Device.decode(reader, position); + case 15: return $root.torch.jit.mobile.serialization.EnumValue.decode(reader, position); + case 16: return $root.torch.jit.mobile.serialization.Function.decode(reader, position); + default: return undefined; + } + } +}; + +$root.torch.jit.mobile.serialization.IValue = class IValue { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.IValue(); + $.val = reader.union(position, 4, $root.torch.jit.mobile.serialization.IValueUnion.decode); + return $; + } +}; + +$root.torch.jit.mobile.serialization.ExtraFile = class ExtraFile { + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.ExtraFile(); + $.name = reader.string_(position, 4, null); + $.content = reader.string_(position, 6, null); + return $; + } +}; + +$root.torch.jit.mobile.serialization.Module = class Module { + + static identifier(reader) { + return reader.identifier === 'PTMF'; + } + + static create(reader) { + return $root.torch.jit.mobile.serialization.Module.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.torch.jit.mobile.serialization.Module(); + $.bytecode_version = reader.uint32_(position, 4, 0); + $.extra_files = reader.tableArray(position, 6, $root.torch.jit.mobile.serialization.ExtraFile.decode); + $.methods = reader.typedArray(position, 8, Uint32Array); + $.state_obj = reader.uint32_(position, 10, 0); + $.ivalues = reader.tableArray(position, 12, $root.torch.jit.mobile.serialization.IValue.decode); + $.storage_data_size = reader.int32_(position, 14, 0); + $.storage_data = reader.tableArray(position, 16, $root.torch.jit.mobile.serialization.StorageData.decode); + $.object_types = reader.tableArray(position, 18, $root.torch.jit.mobile.serialization.ObjectType.decode); + $.jit_sources = reader.tableArray(position, 20, $root.torch.jit.mobile.serialization.ExtraFile.decode); + $.jit_constants = reader.typedArray(position, 22, Uint32Array); + $.operator_version = reader.uint32_(position, 24, 0); + $.mobile_ivalue_size = reader.uint32_(position, 26, 0); + return $; + } +}; + +$root.executorch_flatbuffer = $root.executorch_flatbuffer || {}; + +$root.executorch_flatbuffer.ScalarType = { + BYTE: 0, + CHAR: 1, + SHORT: 2, + INT: 3, + LONG: 4, + FLOAT: 6, + DOUBLE: 7, + BOOL: 11, + QINT8: 12, + QUINT8: 13, + QINT32: 14, + QUINT4X2: 16, + QUINT2X4: 17 +}; + +$root.executorch_flatbuffer.ContainerMetadata = class ContainerMetadata { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.ContainerMetadata(); + $.encoded_inp_str = reader.string_(position, 4, null); + $.encoded_out_str = reader.string_(position, 6, null); + return $; + } +}; + +$root.executorch_flatbuffer.Null = class Null { + + static decode(/* reader, position */) { + const $ = new $root.executorch_flatbuffer.Null(); + return $; + } +}; + +$root.executorch_flatbuffer.AllocationDetails = class AllocationDetails { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.AllocationDetails(); + $.memory_id = reader.uint32_(position, 4, 0); + $.memory_offset = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.executorch_flatbuffer.TensorShapeDynamism = { + STATIC: 0, + DYNAMIC_BOUND: 1, + DYNAMIC_UNBOUND: 2 +}; + +$root.executorch_flatbuffer.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Tensor(); + $.scalar_type = reader.int8_(position, 4, 0); + $.storage_offset = reader.int32_(position, 6, 0); + $.sizes = reader.typedArray(position, 8, Int32Array); + $.dim_order = reader.typedArray(position, 10, Uint8Array); + $.requires_grad = reader.bool_(position, 12, false); + $.constant_buffer_idx = reader.uint32_(position, 14, 0); + $.allocation_info = reader.table(position, 16, $root.executorch_flatbuffer.AllocationDetails.decode); + $.layout = reader.int8_(position, 18, 0); + $.shape_dynamism = reader.int8_(position, 20, 0); + return $; + } +}; + +$root.executorch_flatbuffer.Int = class Int { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Int(); + $.int_val = reader.int64_(position, 4, 0); + return $; + } +}; + +$root.executorch_flatbuffer.Bool = class Bool { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Bool(); + $.bool_val = reader.bool_(position, 4, false); + return $; + } +}; + +$root.executorch_flatbuffer.Double = class Double { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Double(); + $.double_val = reader.float64_(position, 4, 0); + return $; + } +}; + +$root.executorch_flatbuffer.String = class String { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.String(); + $.string_val = reader.string_(position, 4, null); + return $; + } +}; + +$root.executorch_flatbuffer.IntList = class IntList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.IntList(); + $.items = reader.int64s_(position, 4); + return $; + } +}; + +$root.executorch_flatbuffer.DoubleList = class DoubleList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.DoubleList(); + $.items = reader.typedArray(position, 4, Float64Array); + return $; + } +}; + +$root.executorch_flatbuffer.BoolList = class BoolList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.BoolList(); + $.items = reader.bools_(position, 4); + return $; + } +}; + +$root.executorch_flatbuffer.TensorList = class TensorList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.TensorList(); + $.items = reader.typedArray(position, 4, Int32Array); + return $; + } +}; + +$root.executorch_flatbuffer.OptionalTensorList = class OptionalTensorList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.OptionalTensorList(); + $.items = reader.typedArray(position, 4, Int32Array); + return $; + } +}; + +$root.executorch_flatbuffer.KernelTypes = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.executorch_flatbuffer.Null.decode(reader, position); + case 2: return $root.executorch_flatbuffer.Int.decode(reader, position); + case 3: return $root.executorch_flatbuffer.Bool.decode(reader, position); + case 4: return $root.executorch_flatbuffer.Double.decode(reader, position); + case 5: return $root.executorch_flatbuffer.Tensor.decode(reader, position); + case 6: return $root.executorch_flatbuffer.String.decode(reader, position); + case 7: return $root.executorch_flatbuffer.IntList.decode(reader, position); + case 8: return $root.executorch_flatbuffer.DoubleList.decode(reader, position); + case 9: return $root.executorch_flatbuffer.BoolList.decode(reader, position); + case 10: return $root.executorch_flatbuffer.TensorList.decode(reader, position); + case 11: return $root.executorch_flatbuffer.OptionalTensorList.decode(reader, position); + default: return undefined; + } + } +}; + +$root.executorch_flatbuffer.EValue = class EValue { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.EValue(); + $.val = reader.union(position, 4, $root.executorch_flatbuffer.KernelTypes.decode); + return $; + } +}; + +$root.executorch_flatbuffer.Operator = class Operator { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Operator(); + $.name = reader.string_(position, 4, null); + $.overload = reader.string_(position, 6, null); + return $; + } +}; + +$root.executorch_flatbuffer.KernelCall = class KernelCall { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.KernelCall(); + $.op_index = reader.int32_(position, 4, 0); + $.args = reader.typedArray(position, 6, Int32Array); + return $; + } +}; + +$root.executorch_flatbuffer.DelegateCall = class DelegateCall { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.DelegateCall(); + $.delegate_index = reader.int32_(position, 4, 0); + $.args = reader.typedArray(position, 6, Int32Array); + return $; + } +}; + +$root.executorch_flatbuffer.MoveCall = class MoveCall { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.MoveCall(); + $.move_from = reader.int32_(position, 4, 0); + $.move_to = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.executorch_flatbuffer.JumpFalseCall = class JumpFalseCall { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.JumpFalseCall(); + $.cond_value_index = reader.int32_(position, 4, 0); + $.destination_instruction = reader.int32_(position, 6, 0); + return $; + } +}; + +$root.executorch_flatbuffer.FreeCall = class FreeCall { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.FreeCall(); + $.value_index = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.executorch_flatbuffer.InstructionArguments = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.executorch_flatbuffer.KernelCall.decode(reader, position); + case 2: return $root.executorch_flatbuffer.DelegateCall.decode(reader, position); + case 3: return $root.executorch_flatbuffer.MoveCall.decode(reader, position); + case 4: return $root.executorch_flatbuffer.JumpFalseCall.decode(reader, position); + case 5: return $root.executorch_flatbuffer.FreeCall.decode(reader, position); + default: return undefined; + } + } +}; + +$root.executorch_flatbuffer.Instruction = class Instruction { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Instruction(); + $.instr_args = reader.union(position, 4, $root.executorch_flatbuffer.InstructionArguments.decode); + return $; + } +}; + +$root.executorch_flatbuffer.Frame = class Frame { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Frame(); + $.filename = reader.string_(position, 4, null); + $.lineno = reader.int32_(position, 6, 0); + $.name = reader.string_(position, 8, null); + $.context = reader.string_(position, 10, null); + return $; + } +}; + +$root.executorch_flatbuffer.FrameList = class FrameList { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.FrameList(); + $.items = reader.tableArray(position, 4, $root.executorch_flatbuffer.Frame.decode); + return $; + } +}; + +$root.executorch_flatbuffer.DataLocation = { + INLINE: 0, + SEGMENT: 1 +}; + +$root.executorch_flatbuffer.BackendDelegateDataReference = class BackendDelegateDataReference { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.BackendDelegateDataReference(); + $.location = reader.int8_(position, 4, 0); + $.index = reader.uint32_(position, 6, 0); + return $; + } +}; + +$root.executorch_flatbuffer.CompileSpec = class CompileSpec { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.CompileSpec(); + $.key = reader.string_(position, 4, null); + $.value = reader.typedArray(position, 6, Uint8Array); + return $; + } +}; + +$root.executorch_flatbuffer.BackendDelegate = class BackendDelegate { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.BackendDelegate(); + $.id = reader.string_(position, 4, null); + $.processed = reader.table(position, 6, $root.executorch_flatbuffer.BackendDelegateDataReference.decode); + $.compile_specs = reader.tableArray(position, 8, $root.executorch_flatbuffer.CompileSpec.decode); + return $; + } +}; + +$root.executorch_flatbuffer.Chain = class Chain { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Chain(); + $.inputs = reader.typedArray(position, 4, Int32Array); + $.outputs = reader.typedArray(position, 6, Int32Array); + $.instructions = reader.tableArray(position, 8, $root.executorch_flatbuffer.Instruction.decode); + $.stacktrace = reader.tableArray(position, 10, $root.executorch_flatbuffer.FrameList.decode); + return $; + } +}; + +$root.executorch_flatbuffer.ExecutionPlan = class ExecutionPlan { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.ExecutionPlan(); + $.name = reader.string_(position, 4, null); + $.container_meta_type = reader.table(position, 6, $root.executorch_flatbuffer.ContainerMetadata.decode); + $.values = reader.tableArray(position, 8, $root.executorch_flatbuffer.EValue.decode); + $.inputs = reader.typedArray(position, 10, Int32Array); + $.outputs = reader.typedArray(position, 12, Int32Array); + $.chains = reader.tableArray(position, 14, $root.executorch_flatbuffer.Chain.decode); + $.operators = reader.tableArray(position, 16, $root.executorch_flatbuffer.Operator.decode); + $.delegates = reader.tableArray(position, 18, $root.executorch_flatbuffer.BackendDelegate.decode); + $.non_const_buffer_sizes = reader.int64s_(position, 20); + return $; + } +}; + +$root.executorch_flatbuffer.Buffer = class Buffer { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Buffer(); + $.storage = reader.typedArray(position, 4, Uint8Array); + return $; + } +}; + +$root.executorch_flatbuffer.BackendDelegateInlineData = class BackendDelegateInlineData { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.BackendDelegateInlineData(); + $.data = reader.typedArray(position, 4, Uint8Array); + return $; + } +}; + +$root.executorch_flatbuffer.DataSegment = class DataSegment { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.DataSegment(); + $.offset = reader.uint64_(position, 4, 0); + $.size = reader.uint64_(position, 6, 0); + return $; + } +}; + +$root.executorch_flatbuffer.SubsegmentOffsets = class SubsegmentOffsets { + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.SubsegmentOffsets(); + $.segment_index = reader.uint32_(position, 4, 0); + $.offsets = reader.uint64s_(position, 6); + return $; + } +}; + +$root.executorch_flatbuffer.Program = class Program { + + static identifier(reader) { + return reader.identifier === 'ET12'; + } + + static create(reader) { + return $root.executorch_flatbuffer.Program.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.executorch_flatbuffer.Program(); + $.version = reader.uint32_(position, 4, 0); + $.execution_plan = reader.tableArray(position, 6, $root.executorch_flatbuffer.ExecutionPlan.decode); + $.constant_buffer = reader.tableArray(position, 8, $root.executorch_flatbuffer.Buffer.decode); + $.backend_delegate_data = reader.tableArray(position, 10, $root.executorch_flatbuffer.BackendDelegateInlineData.decode); + $.segments = reader.tableArray(position, 12, $root.executorch_flatbuffer.DataSegment.decode); + $.constant_segment = reader.table(position, 14, $root.executorch_flatbuffer.SubsegmentOffsets.decode); + return $; + } +}; diff --git a/pytorch.js b/pytorch.js new file mode 100644 index 00000000000..8d77151fca9 --- /dev/null +++ b/pytorch.js @@ -0,0 +1,4275 @@ + +// Experimental + +import * as base from './base.js'; +import * as flatbuffers from './flatbuffers.js'; +import * as python from './python.js'; + +const pytorch = {}; + +pytorch.ModelFactory = class { + + match(context) { + return pytorch.Container.open(context); + } + + async open(context, target) { + const metadata = await pytorch.Metadata.open(context); + target.on('resolve', (_, name) => { + context.exception(new pytorch.Error(`Unknown type name '${name}'.`), false); + }); + await target.read(metadata); + return new pytorch.Model(metadata, target); + } +}; + +pytorch.Model = class { + + constructor(metadata, target) { + this.format = target.format; + this.producer = target.producer || ''; + this.graphs = []; + for (const [name, value] of target.modules) { + const graph = new pytorch.Graph(metadata, name, value); + this.graphs.push(graph); + } + } +}; + +pytorch.Graph = class { + + constructor(metadata, name, module) { + this.nodes = []; + this.inputs = []; + this.outputs = []; + this.groups = true; + this.name = name || ''; + const values = new Map(); + values.map = (name, type, tensor) => { + if (tensor) { + return new pytorch.Value(name, type, null, tensor); + } + if (!values.has(name)) { + values.set(name, new pytorch.Value(name, type, null, tensor)); + } else if (type || tensor) { + throw new pytorch.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + const createNode = (groups, key, obj, args, output) => { + let type = obj.__class__ && obj.__class__.__module__ && obj.__class__.__name__ ? `${obj.__class__.__module__}.${obj.__class__.__name__}` : 'Module'; + if (type === 'torch.jit._script.RecursiveScriptModule' && obj._c && obj._c.qualified_name) { + type = obj._c.qualified_name; + } + const schema = metadata.type(type); + const inputSchema = schema && schema.inputs && schema.inputs.length > 0 ? schema.inputs.slice() : [ { name: 'input' } ]; + const inputName = inputSchema.shift().name; + const inputs = []; + if (args.length > 0) { + const argument = new pytorch.Argument(inputName, args.map((argument) => values.map(argument))); + inputs.push(argument); + } + const group = groups.join('/'); + const name = group ? (`${group}/${key}`) : key; + const outputs = output ? [ new pytorch.Argument('output', [ values.map(name) ]) ] : []; + const item = { + name: name, + type: type, + obj: obj, + inputs: inputs, + outputs: outputs + }; + const node = new pytorch.Node(metadata, group, item, {}, values); + this.nodes.push(node); + return [ node.name ]; + }; + const loadModule = (current, groups, inputs) => { + if (!current._modules || current._modules.size == 0) { + createNode(groups, '', current, inputs, false); + } else { + const sequential = current.__class__ && current.__class__.__module__ === 'torch.nn.modules.container' && current.__class__.__name__ === 'Sequential'; + for (const [key, value] of current._modules) { + if (value) { + const type = value.__class__ ? `${value.__class__.__module__}.${value.__class__.__name__}` : null; + switch (type) { + case 'torch.nn.modules.container.Sequential': + groups.push(key); + inputs = loadModule(value, groups, sequential ? inputs : []); + groups.pop(key); + break; + default: { + inputs = createNode(groups, key, value, sequential ? inputs : [], sequential); + break; + } + } + } + } + } + return inputs; + }; + const getSubmodules = (module) => { + const submodules = []; + if (module && module.__class__ && module.__class__.__module__ && module.__class__.__name__) { + for (const [key, value] of Object.entries(module)) { + if (!key.startsWith('__')) { + if (value && value.__class__ && value.__class__.__module__ && value.__class__.__name__ && !pytorch.Utility.isTensor(value)) { + submodules.push(value); + } + } + } + } + return submodules; + }; + const loadScriptModule = (module, initializers) => { + if (module) { + if (pytorch.Graph._getParameters(module).size > 0 && !module.__hide__) { + const item = { module: module }; + this.nodes.push(new pytorch.Node(metadata, '', item, initializers, values)); + } + const submodules = getSubmodules(module); + for (const submodule of submodules) { + loadScriptModule(submodule, initializers); + } + } + }; + const type = module && module.__class__ && module.__class__.__module__ && module.__class__.__name__ ? `${module.__class__.__module__}.${module.__class__.__name__}` : null; + if ((type === 'torch.ScriptModule' || type === 'torch.jit._script.ScriptModule' || type === 'torch.jit._script.RecursiveScriptModule') && module.graph) { + const initializers = new Map(); + const graph = module.graph; + const constants = module.code_with_constants[1].const_mapping; + if (constants) { + for (const [key, value] of constants) { + const name = `CONSTANTS.${key}`; + if (pytorch.Utility.isTensor(value)) { + initializers.set(value, new pytorch.Tensor(name, value)); + } else if (value && value.__class__ && value.__class__.__module__ && value.__class__.__name__) { + const type = `${value.__class__.__module__}.${value.__class__.__name__}`; + switch (type) { + case '__torch__.torch.classes.xnnpack.LinearOpContext': + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.quantized.LinearPackedParamsBase': + case '__torch__.torch.classes.quantized.Conv2dPackedParamsBase': { + for (const [key, tensor] of Object.entries(value)) { + if (pytorch.Utility.isTensor(tensor)) { + initializers.set(value, new pytorch.Tensor(`${name}.${key}`, tensor)); + } + } + break; + } + default: { + throw new pytorch.Error(`Unsupported constant context '${type}'.`); + } + } + } else { + throw new pytorch.Error('Unsupported constant.'); + } + } + } + const queue = [ module.data ]; + while (queue.length > 0) { + const module = queue.shift(); + if (module.__class__ && module.__class__.__module__ === '__torch__.torch.classes._nnapi' && module.__class__.__name__ === 'Compilation') { + continue; + } + for (const [key, obj] of Object.entries(module)) { + if (key !== '__module__' && key !== '__name__' && key !== '__class__' && key !== '__parent__') { + if (!Array.isArray(obj) && obj === Object(obj)) { + if (pytorch.Utility.isTensor(obj)) { + const parameter = obj; + parameter.__parent__ = module; + if (parameter.storage()) { + if (parameter.__count__ === undefined || parameter.__count__ === 1) { + initializers.set(parameter, new pytorch.Tensor(parameter.name, parameter)); + } + } + } else if (obj && obj.__class__) { + obj.__parent__ = module; + obj.__name__ = obj.__name__ || key; + queue.push(obj); + } + } + } + } + } + for (const value of graph.inputs()) { + const identifier = value.unique().toString(); + const name = value.debugName() || identifier; + this.inputs.push(new pytorch.Argument(name, [ values.map(identifier) ])); + } + for (const value of graph.outputs()) { + const identifier = value.unique().toString(); + this.outputs.push(new pytorch.Argument(identifier, [ values.map(identifier) ])); + } + for (const node of graph.nodes()) { + if (node === graph.param_node() || + node === graph.return_node()) { + continue; + } + if (node.kind() === 'prim::ListConstruct' && + node.outputs().length === 1 && + node.outputs().every((output) => output.uses().length === 1) && + node.inputs().every((input) => pytorch.Utility.isTensor(input.value))) { + continue; + } + if (node.kind() === 'prim::ListUnpack' && + node.inputs().length === 1 && + node.inputs().every((input) => input.uses().length === 1) && + node.outputs().every((output) => pytorch.Utility.isTensor(output.value))) { + continue; + } + const item = { + type: node.kind(), + node: node + }; + this.nodes.push(new pytorch.Node(metadata, '', item, initializers, values)); + } + if (module) { + loadScriptModule(module.data, initializers); + } + } else if (Array.isArray(module) && module.every((module) => module && module._modules !== undefined)) { + for (const value of module) { + loadModule(value, [], []); + } + } else { + this.type = (module.__module__ && module.__name__) ? (`${module.__module__}.${module.__name__}`) : ''; + loadModule(module, [], []); + } + } + + static _getParameters(module) { + const parameters = new Map(); + if (module && module.__class__.__module__ && module.__class__.__name__) { + for (const [key, value] of Object.entries(module)) { + if (pytorch.Utility.isTensor(value)) { + parameters.set(key, value); + } + } + } + return parameters; + } +}; + +pytorch.Argument = class { + + constructor(name, value, type, visible) { + this.name = name; + this.value = value; + if (type) { + this.type = type; + } + if (visible === false) { + this.visible = visible; + } + } +}; + +pytorch.Value = class { + + constructor(name, type, quantization, initializer) { + if (typeof name !== 'string') { + throw new pytorch.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer && initializer.type ? initializer.type : type || null; + this.quantization = quantization; + this.initializer = initializer || null; + } +}; + +pytorch.Node = class { + + constructor(metadata, group, item, initializers, values, stack) { + this.group = group || ''; + this.name = item.name || ''; + this.nodes = []; + const type = (metadata, name) => { + if (name instanceof pytorch.nnapi.Graph) { + return name; + } + const type = Object.assign({}, metadata.type(name) || { name: name }); + type.identifier = type.name; + type.name = type.name.indexOf('::') !== -1 ? type.name.split('::').pop().split('.')[0] : type.name; + return type; + }; + const createAttribute = (metadata, name, value) => { + let visible = false; + let type = null; + if (name === 'training') { + visible = false; + type = 'boolean'; + } else if (metadata) { + if (metadata.type) { + type = metadata.type; + } + if (metadata.visible === false) { + visible = false; + } else if (metadata.default !== undefined) { + if (Array.isArray(value)) { + if (Array.isArray(metadata.default)) { + visible = value.length !== metadata.default || !value.every((item, index) => item == metadata.default[index]); + } else { + visible = !value.every((item) => item == metadata.default); + } + } else { + visible = value !== metadata.default; + } + } + } + if (Array.isArray(value) && value.length > 0 && value.every((obj) => obj && obj.__class__ && obj.__class__.__module__ && obj.__class__.__module__.startsWith('torch.nn'))) { + value = '?'; + } + return new pytorch.Argument(name, value, type, visible); + }; + const isObject = (obj) => { + if (obj && typeof obj === 'object') { + const proto = Object.getPrototypeOf(obj); + return proto === Object.prototype || proto === null; + } + return false; + }; + if (!item.module && !item.node) { + this.type = type(metadata, item.type); + this.inputs = item.inputs || []; + this.outputs = item.outputs || []; + let obj = item.obj; + if (obj && obj.__class__ && obj.__class__.__module__ === 'builtins' && obj.__class__.__name__ === 'function') { + this.type = { name: `${obj.__module__}.${obj.__name__}` }; + obj = {}; + } + const parameters = new Map(); + const entries = []; + const attributes = new Map(); + stack = stack || new Set(); + for (const [name, value] of Object.entries(obj)) { + if (name === '__class__') { + continue; + } else if (name === '_parameters' && value instanceof Map) { + for (const [name, parameter] of Array.from(value)) { + parameters.set(name, parameter); + } + } else if (name === '_buffers' && value instanceof Map) { + for (const [name, buffer] of Array.from(value)) { + parameters.set(name, buffer); + } + } else if (Array.isArray(value) && value.every((tensor) => pytorch.Utility.isTensor(tensor))) { + parameters.set(name, value); + } else if (pytorch.Utility.isTensor(value)) { + parameters.set(name, value); + } else if (value && value.__class__ && value.__class__.__module__ === 'collections' && value.__class__.__name__ === 'OrderedDict' && + value instanceof Map && value.size === 0) { + continue; + } else if (value && value.__class__ && value.__class__.__module__ === 'builtins' && value.__class__.__name__ === 'set' && + value instanceof Set && value.size === 0) { + continue; + } else if (value && value.__class__ && value.__class__.__module__ === 'builtins' && value.__class__.__name__ === 'list' && + Array.isArray(value) && value.length === 0) { + continue; + } else { + entries.push([name, value]); + } + } + for (const [name, value] of entries) { + if (!parameters.has(name)) { + attributes.set(name, value); + } + } + const inputs = new Map(Array.isArray(this.type.inputs) ? this.type.inputs.map((input) => [ input.name, input ]) : []); + for (const [name, value] of parameters) { + const list = Array.isArray(value) ? value.map((item) => pytorch.Utility.toTensor(item)) : [ pytorch.Utility.toTensor(value) ]; + const visible = inputs.has(name) ? inputs.get(name).visible || true : true; + const values = list.filter((value) => value !== null).map((value) => { + const identifier = value && value.name ? value.name : ''; + const tensor = value ? new pytorch.Tensor(identifier, value) : null; + return new pytorch.Value(identifier, null, null, tensor); + }); + const argument = new pytorch.Argument(name, values, null, visible); + this.inputs.push(argument); + } + this.attributes = Array.from(attributes).map(([name, value]) => { + const type = this.type.identifier; + if (pytorch.Utility.isTensor(value)) { + const tensor = new pytorch.Tensor('', value); + return new pytorch.Argument(name, tensor, 'tensor'); + } else if (Array.isArray(value) && value.every((value) => pytorch.Utility.isTensor(value))) { + const tensors = value.map((value) => new pytorch.Tensor('', value)); + return new pytorch.Argument(name, tensors, 'tensor[]'); + } else if (Array.isArray(value) && value.every((value) => typeof value === 'string')) { + return new pytorch.Argument(name, value, 'string[]'); + } else if (Array.isArray(value) && value.every((value) => typeof value === 'number')) { + return new pytorch.Argument(name, value); + } else if (name === '_modules' && value && value.__class__ && value.__class__.__module__ === 'collections' && value.__class__.__name__ === 'OrderedDict' && + value instanceof Map && Array.from(value).every(([, value]) => value.__class__)) { + const values = Array.from(value).map(([name, value]) => { + const item = { + name: name, + type: `${value.__class__.__module__}.${value.__class__.__name__}`, + obj: value + }; + return new pytorch.Node(metadata, group, item); + }); + return new pytorch.Argument(name, values, 'object[]'); + } else if (value && Array.isArray(value) && value.length > 0 && value.every((obj) => obj && (obj.__class__ || obj === Object(obj)))) { + const values = value.filter((value) => !stack.has(value)); + const nodes = values.map((value) => { + stack.add(value); + const item = { + type: value.__class__ ? `${value.__class__.__module__}.${value.__class__.__name__}` : 'builtins.object', + obj: value + }; + const node = new pytorch.Node(metadata, group, item, initializers, values, stack); + stack.delete(value); + return node; + }); + return new pytorch.Argument(name, nodes, 'object[]'); + } else if (value && (value.__class__ || isObject(value))) { + const item = { + type: value.__class__ ? `${value.__class__.__module__}.${value.__class__.__name__}` : 'builtins.object', + obj: value + }; + const node = new pytorch.Node(metadata, group, item, initializers, values, stack); + return new pytorch.Argument(name, node, 'object'); + } + return createAttribute(metadata.attribute(type, name), name, value); + }); + } else { + this.attributes = []; + this.inputs = []; + this.outputs = []; + let module = item.module; + if (module) { + this.type = { name: 'torch.nn.modules.module.Module' }; + for (const [name, tensor] of pytorch.Graph._getParameters(module)) { + const initializer = initializers.get(tensor) || (tensor ? new pytorch.Tensor('', tensor) : null); + const value = values.map('', null, initializer || null); + this.inputs.push(new pytorch.Argument(name, [ value ])); + if (tensor.__variable__) { + const value = values.map(tensor.__variable__); + const argument = new pytorch.Argument(name, [ value ]); + this.outputs.push(argument); + } + } + } + const node = item.node; + if (node) { + this.type = type(metadata, item.type); + module = null; + let match = true; + let count = 0; + for (const input of node.inputs()) { + const value = input.value; + const name = value && value.__class__ && value.__class__.__module__ && value.__class__.__name__ ? `${value.__class__.__module__}.${value.__class__.__name__}` : ''; + let values = []; + switch (name) { + case '__torch__.torch.classes.quantized.Conv2dPackedParamsBase': + case '__torch__.torch.classes.quantized.Conv3dPackedParamsBase': + case '__torch__.torch.classes.quantized.LinearPackedParamsBase': + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.xnnpack.LinearOpContext': { + values = Object.values(value); + break; + } + default: { + if (pytorch.Utility.isTensor(value)) { + values = [ value ]; + } + if (input.node() && + input.node().kind() === 'prim::ListConstruct' && + input.uses().length === 1 && + input.node().inputs().every((input) => pytorch.Utility.isTensor(input.value))) { + values = input.node().inputs().map((input) => input.value); + } + break; + } + } + for (const value of values) { + const parameter = initializers.get(value); + if (parameter) { + if (value.__parent__ && (module == null || module == value.__parent__)) { + module = value.__parent__; + count++; + } else if (value.__name__ && value.__name__.startsWith('CONSTANTS.c')) { + count++; + } else { + match = false; + break; + } + } + } + if (!match) { + break; + } + } + if (module) { + const parameters = pytorch.Graph._getParameters(module); + parameters.delete('num_batches_tracked'); + if (parameters.size == count && match) { + module.__hide__ = true; + } else { + module = null; + } + } + const inputs = node.inputs(); + for (let i = 0; i < inputs.length; i++) { + const input = inputs[i]; + const metadata = this.type && this.type.inputs && i < this.type.inputs.length ? this.type.inputs[i] : null; + const name = metadata && metadata.name ? metadata.name : i.toString(); + const type = metadata && metadata.type ? metadata.type : null; + switch (type) { + case '__torch__.torch.classes.quantized.Conv2dPackedParamsBase': + case '__torch__.torch.classes.quantized.Conv3dPackedParamsBase': + case '__torch__.torch.classes.quantized.LinearPackedParamsBase': + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.xnnpack.LinearOpContext': { + for (const [key, value] of Object.entries(input.value)) { + if (key.startsWith('__') && key.endsWith('__')) { + continue; + } + if (pytorch.Utility.isTensor(value)) { + const initializer = initializers.get(value); + const identifier = initializer ? initializer.name : input.unique().toString(); + const argument = new pytorch.Argument(key, [ values.map(identifier, null, initializer) ]); + this.inputs.push(argument); + } else { + const attribute = createAttribute(null, key, value); + this.attributes.push(attribute); + } + } + break; + } + default: { + if (pytorch.Utility.isTensor(input.value) || input.value === undefined || input.value === null) { + let list = [ input ]; + if (input.node() && + input.node().kind() === 'prim::ListConstruct' && + input.uses().length === 1 && + input.node().inputs().every((input) => pytorch.Utility.isTensor(input.value))) { + list = input.node().inputs(); + } + const args = list.map((input) => { + let initializer = null; + let identifier = input.unique().toString(); + if (input.value) { + const value = input.value; + const hide = value.__parent__ ? value.__parent__.__hide__ : true; + initializer = hide ? initializers.get(value) : null; + identifier = initializer ? initializer.name : identifier; + } + if (initializer) { + return new pytorch.Value(identifier, null, null, initializer); + } + return values.map(identifier); + }); + const argument = new pytorch.Argument(name, args); + this.inputs.push(argument); + } else { + const attribute = createAttribute(metadata, metadata.name, input.value); + this.attributes.push(attribute); + } + break; + } + } + } + const outputs = node.outputs(); + for (let i = 0; i < outputs.length; i++) { + const output = outputs[i]; + const metadata = this.type && this.type.outputs && i < this.type.outputs.length ? this.type.outputs[i] : null; + const name = metadata && metadata.name ? metadata.name : i === 0 ? 'output' : `output${i}`; + let list = [ output ]; + if (output.uses().length === 1 && + output.uses()[0].user && + output.uses()[0].user.kind() == 'prim::ListUnpack' && + output.uses()[0].user.outputs().every((output) => pytorch.Utility.isTensor(output.value))) { + list = output.uses()[0].user.outputs(); + } + const args = list.map((output) => values.map(output.unique().toString())); + const argument = new pytorch.Argument(name, args); + this.outputs.push(argument); + } + } + if (module) { + if (module.__name__) { + let current = module; + this.name = current.__name__; + while (current.__parent__ != null) { + current = current.__parent__; + if (!current.__parent__ && !current.__name__) { + break; + } + this.name = [ current.__name__, this.name ].join('.'); + } + } + } + } + } +}; + +pytorch.Tensor = class { + + constructor(name, tensor) { + this.name = name || ''; + const layout = tensor.layout ? tensor.layout.__str__() : null; + const storage = tensor.storage(); + const size = tensor.size() || []; + if (layout && layout.startsWith('torch.sparse_')) { + this.type = new pytorch.TensorType(storage.dtype.__reduce__(), new pytorch.TensorShape(size), layout.split('.').pop().replace('_', '.')); + this.indices = new pytorch.Tensor('', tensor.indices); + this._values = new pytorch.Tensor('', tensor.values); + } else if (!layout || layout === 'torch.strided') { + this.type = new pytorch.TensorType(storage.dtype.__reduce__(), new pytorch.TensorShape(size)); + this._data = storage.data; + this.encoding = '<'; + this.indices = null; + this.stride = tensor.stride(); + const stride = this.stride; + const offset = tensor.storage_offset(); + const length = stride ? size.every((v) => v !== 0) ? size.reduce((a, v, i) => a + stride[i] * (v - 1), 1) : 0 : storage.size(); + if (offset !== 0 || length !== storage.size()) { + const itemsize = storage.dtype.itemsize(); + this._offset = itemsize * offset; + this._length = itemsize * length; + } + } else { + throw new pytorch.Error(`Unsupported tensor layout '${layout}'.`); + } + } + + get values() { + const type = this.type.layout; + if (type && type.startsWith('sparse.')) { + return this._values; + } + if (this._data instanceof Uint8Array) { + return this._data; + } + if (this._offset !== undefined) { + const stream = this._data; + const position = stream.position; + stream.seek(this._offset); + const values = stream.peek(this._length); + stream.seek(position); + return values; + } + if (this._data) { + return this._data.peek(); + } + return null; + } + + decode() { + if (this.encoding !== '<') { + throw new pytorch.Error(`Tensor encoding '${this.encoding}' not implemented.`); + } + const type = this.type; + const data = this.values; + const view = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (type.dataType) { + case 'int16': { + const array = new Uint16Array(data.length >> 1); + for (let i = 0; i < array.length; i++) { + array[i] = view.getInt16(i << 1, true); + } + return array; + } + case 'int64': { + const array = new Uint32Array(data.length >> 3); + for (let i = 0; i < array.length; i++) { + array[i] = view.getUint32(i << 3, true); + if (view.getUint32((i << 3) + 4, true) !== 0) { + throw new pytorch.Error('Signed 64-bit value exceeds 32-bit range.'); + } + } + return array; + } + default: { + throw new pytorch.Error(`Tensor data type '${type.dataType}' not implemented.`); + } + } + } +}; + +pytorch.TensorType = class { + + constructor(dataType, shape, layout) { + this.dataType = dataType; + this.shape = shape; + this.layout = layout; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +pytorch.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions || []; + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +pytorch.Container = class { + + static open(context) { + const zip = pytorch.Container.Zip.open(context); + if (zip) { + return zip; + } + const pickle = pytorch.Container.Pickle.open(context); + if (pickle) { + return pickle; + } + const tar = pytorch.Container.Tar.open(context); + if (tar) { + return tar; + } + const data = pytorch.Container.data_pkl.open(context); + if (data) { + return data; + } + const torch_utils = pytorch.Container.torch_utils.open(context); + if (torch_utils) { + return torch_utils; + } + const mobile = pytorch.Container.Mobile.open(context); + if (mobile) { + return mobile; + } + const index = pytorch.Container.Index.open(context); + if (index) { + return index; + } + const dynamo = pytorch.Container.ExportedProgram.open(context); + if (dynamo) { + return dynamo; + } + const executorch = pytorch.Container.ExecuTorch.open(context); + if (executorch) { + return executorch; + } + return null; + } + + constructor() { + this._events = []; + } + + async read() { + } + + on(event, callback) { + this._events.push([ event, callback ]); + } + + get format() { + throw new pytorch.Error('Container format not implemented.'); + } + + get modules() { + throw new pytorch.Error('Container modules not implemented.'); + } +}; + +pytorch.Container.Tar = class extends pytorch.Container { + + static open(context) { + const entries = context.peek('tar'); + if (entries instanceof Map && entries.has('pickle')) { + return new pytorch.Container.Tar(entries); + } + return null; + } + + constructor(entries) { + super(); + this._entries = entries; + } + + async read() { + const entries = this._entries; + delete this._entries; + const execution = new pytorch.Execution(); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + const obj = torch.load(entries); + this._modules = pytorch.Utility.findWeights(obj); + if (!this._modules) { + throw new pytorch.Error('File does not contain root module or state dictionary.'); + } + } + + get format() { + return 'PyTorch v0.1.1'; + } + + get modules() { + return this._modules; + } +}; + +pytorch.Container.Pickle = class extends pytorch.Container { + + static open(context) { + const stream = context.stream; + const signature = [ 0x80, undefined, 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => signature[index] === undefined || signature[index] === value)) { + return new pytorch.Container.Pickle(stream); + } + return null; + } + + constructor(stream) { + super(); + this._stream = stream; + } + + async read() { + const data = this._stream.length < 0x7ffff000 ? this._stream.peek() : this._stream; + delete this._stream; + const execution = new pytorch.Execution(); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + const obj = torch.load(data); + this._modules = pytorch.Utility.find(obj); + } + + get format() { + return 'PyTorch v0.1.10'; + } + + get modules() { + return this._modules; + } +}; + +pytorch.Container.data_pkl = class extends pytorch.Container { + + static open(context) { + const obj = context.peek('pkl'); + if (obj) { + if (obj.__class__ && obj.__class__.__module__ && obj.__class__.__name__) { + const name = `${obj.__class__.__module__}.${obj.__class__.__name__}`; + if (name.startsWith('__torch__.')) { + return new pytorch.Container.data_pkl('', obj); + } + } + if (pytorch.Utility.isTensor(obj)) { + return new pytorch.Container.data_pkl('tensor', obj); + } + if (obj instanceof Map) { + const entries = Array.from(obj).filter(([name, value]) => name === '_metadata' || pytorch.Utility.isTensor(value)); + if (entries.length > 0) { + return new pytorch.Container.data_pkl('tensor<>', obj); + } + } else if (!Array.isArray(obj)) { + const entries = Object.entries(obj).filter(([name, value]) => name === '_metadata' || pytorch.Utility.isTensor(value)); + if (entries.length > 0) { + return new pytorch.Container.data_pkl('tensor<>', obj); + } + } + for (const key of [ '', 'model', 'net' ]) { + const module = key === '' ? obj : obj[key]; + if (module && module._modules && pytorch.Utility.isInstance(module._modules, 'collections.OrderedDict')) { + return new pytorch.Container.data_pkl('module', module); + } + } + } + return null; + } + + constructor(type, data) { + super(); + this._type = type; + this._data = data; + } + + get format() { + switch (this._type) { + case 'module': return 'PyTorch'; + case 'tensor': return 'PyTorch Tensor'; + case 'tensor<>': return 'PyTorch Pickle Weights'; + default: return 'PyTorch Pickle'; + } + } + + get modules() { + switch (this._type) { + case 'module': { + if (this._data) { + this._modules = pytorch.Utility.findModule(this._data); + delete this._data; + } + if (!this._modules) { + throw new pytorch.Error('File does not contain root module or state dictionary.'); + } + return this._modules; + } + case 'tensor': + case 'tensor<>': { + if (this._data) { + this._modules = pytorch.Utility.findWeights(this._data); + delete this._data; + } + if (!this._modules) { + throw new pytorch.Error('File does not contain root module or state dictionary.'); + } + return this._modules; + } + default: { + throw new pytorch.Error("PyTorch standalone 'data.pkl' not supported."); + } + } + + } +}; + +pytorch.Container.torch_utils = class extends pytorch.Container { + + static open(context) { + const stream = context.stream; + if (stream && stream.length > 1) { + const buffer = stream.peek(Math.min(1024, stream.length)); + if (buffer[0] === 0x80) { + const content = String.fromCharCode.apply(null, buffer); + if (content.indexOf('torch_utils') !== -1) { + const obj = context.peek('pkl'); + if (obj && Object.entries(obj).some(([, value]) => pytorch.Utility.isInstance(value, 'torch.nn.modules.module.Module'))) { + return new pytorch.Container.torch_utils(obj); + } + } + } + } + return null; + } + + constructor(obj) { + super(); + this._obj = obj; + } + + async read() { + this._modules = pytorch.Utility.find(this._obj); + delete this._obj; + } + + get format() { + return 'PyTorch torch_utils'; + } + + get modules() { + return this._modules; + } +}; + +pytorch.Container.Mobile = class extends pytorch.Container { + + static open(context) { + const tags = context.tags('flatbuffers'); + if (tags.get('file_identifier') === 'PTMF') { + return new pytorch.Container.Mobile(context); + } + return null; + } + + constructor(context) { + super(); + this._context = context; + } + + async read(metadata) { + await this._context.require('./pytorch-schema'); + this._modules = new Map(); + const execution = new pytorch.jit.Execution(null, metadata); + for (const event in this._events) { + execution.on(event[0], event[1]); + } + const stream = this._context.stream; + const torch = execution.__import__('torch'); + const module = torch.jit.jit_module_from_flatbuffer(stream); + const version = module._c._bytecode_version.toString(); + this._format = pytorch.Utility.format('PyTorch Mobile', version); + if (module && module.forward) { + this._modules = new Map([ ['', module] ]); + } else { + this._modules = pytorch.Utility.find(module); + } + delete this._context; + } + + get format() { + return this._format; + } + + get modules() { + return this._modules; + } +}; + +pytorch.Container.ExecuTorch = class extends pytorch.Container { + + static open(context) { + const tags = context.tags('flatbuffers'); + if (tags.get('file_identifier') === 'ET12') { + return new pytorch.Container.ExecuTorch(context); + } + return null; + } + + constructor(context) { + super(); + this._context = context; + } + + async read() { + await this._context.require('./pytorch-schema'); + pytorch.executorch = flatbuffers.get('torch').executorch_flatbuffer; + const stream = this._context.stream; + const reader = flatbuffers.BinaryReader.open(stream); + /* const program = */ pytorch.executorch.Program.create(reader); + throw new pytorch.Error('Invalid file content. File contains executorch.Program data.'); + } +}; + +pytorch.Container.Zip = class extends pytorch.Container { + + static open(context) { + const entries = context.peek('zip'); + if (entries instanceof Map && entries.size > 0) { + let prefix = 0; + const paths = Array.from(entries.keys()).map((path) => path.replace(/\\/g, '/').split('/').reverse()); + for (let set = new Set(); set && paths.length > 0;) { + set = new Set(paths.map((path) => path.length > 1 ? path.pop() : null)); + set = set.size > 1 || set.keys().next().value === null ? null : set; + prefix += set ? set.keys().next().value.length + 1 : 0; + } + const records = new Map(Array.from(entries).map(([name, value]) => [ name.substring(prefix), value ])); + if (records.has('model.json')) { + try { + const stream = records.get('model.json'); + const buffer = stream.peek(); + const decoder = new TextDecoder('utf-8'); + const content = decoder.decode(buffer); + const model = JSON.parse(content); + if (model.mainModule) { + return new pytorch.Container.Zip(entries, model); + } + } catch (error) { + // continue regardless of error + } + } + if (records.has('data.pkl')) { + return new pytorch.Container.Zip(entries); + } + if (records.has('.data/version')) { + return new pytorch.Container.Package(entries); + } + const tags = context.tags('flatbuffers'); + if (tags.get('file_identifier') === 'PTMF') { + return new pytorch.Container.Mobile(context); + } + } + return null; + } + + constructor(entries, model) { + super(); + // https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/serialization.md + this._entries = entries; + this._model = model; + } + + async read(metadata) { + const execution = new pytorch.jit.Execution(null, metadata); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + const reader = new torch.PyTorchFileReader(this._entries); + const torchscript = this._model ? true : reader.has_record('constants.pkl'); + if (this._model) { + this._producer = this._model && this._model.producerName ? this._model.producerName + (this._model.producerVersion ? ` v${this._model.producerVersion}` : '') : ''; + this._format = reader.has_record('attributes.pkl') ? 'TorchScript v1.1' : 'TorchScript v1.0'; + } else { + const name = torchscript ? 'TorchScript' : 'PyTorch'; + const version = reader.version(); + this._format = pytorch.Utility.format(name, version); + } + if (torchscript) { + const module = torch.jit.load(reader); + if (module.data && module.data.forward) { + this._modules = new Map([ [ '', module ] ]); + } else { + this._modules = pytorch.Utility.find(module.data); + } + } else { + const records = reader.get_all_records().map((key) => [ key, reader.get_record(key) ]); + const entries = new Map(records); + const module = torch.load(entries); + this._modules = pytorch.Utility.find(module); + } + delete this._model; + delete this._entries; + } + + get format() { + return this._format; + } + + get modules() { + return this._modules; + } + + get producer() { + return this._producer || ''; + } +}; + +pytorch.Container.Index = class extends pytorch.Container { + + static open(context) { + const obj = context.peek('json'); + if (obj && obj.weight_map) { + const entries = Object.entries(obj.weight_map); + if (entries.length > 0 && entries.every(([, value]) => typeof value === 'string' && value.endsWith('.bin'))) { + return new pytorch.Container.Index(context, entries); + } + } + return null; + } + + constructor(context, entries) { + super(); + this._context = context; + this._entries = entries; + this._format = 'PyTorch'; + } + + async read(metadata) { + const weight_map = new Map(this._entries); + const keys = new Set(weight_map.keys()); + const files = Array.from(new Set(weight_map.values())); + const contexts = await Promise.all(files.map((name) => this._context.fetch(name))); + const execution = new pytorch.jit.Execution(null, metadata); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + const archives = contexts.map((context) => { + return context.peek('zip'); + }); + const formats = new Set(archives.map((entries) => { + const reader = new torch.PyTorchFileReader(entries); + const version = reader.version(); + return pytorch.Utility.format('PyTorch', version); + })); + if (formats.size === 1) { + this._format = formats.values().next().value; + } + const shards = archives.map((entries) => { + return torch.load(entries); + }); + const entries = new Map(); + for (const shard of shards) { + for (const [key, value] of Object.entries(shard)) { + if (keys.has(key)) { + entries.set(key, value); + } + } + } + this._modules = pytorch.Utility.findWeights(entries); + delete this._context; + delete this._entries; + } + + get format() { + return this._format; + } + + get modules() { + return this._modules; + } +}; + +pytorch.Container.ExportedProgram = class extends pytorch.Container { + + static open(context) { + const program = context.peek('json'); + if (program && program.schema_version && program.graph_module) { + return new pytorch.Container.ExportedProgram(context, program); + } + return null; + } + + constructor(context, serialized_exported_program) { + super(); + this._context = context; + this._serialized_exported_program = serialized_exported_program; + } + + async read() { + this._format = 'PyTorch Export'; + const serialized_state_dict = await this._fetch('serialized_state_dict.pt') || await this._fetch('serialized_state_dict.json'); + const serialized_constants = await this._fetch('serialized_constants.pt') || await this._fetch('serialized_constants.json'); + const f = new Map(); + f.set('serialized_exported_program.json', this._serialized_exported_program); + f.set('serialized_state_dict.pt', serialized_state_dict); + f.set('serialized_constants.pt', serialized_constants); + const execution = new pytorch.Execution(); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + if (this._serialized_exported_program.graph_module.graph.constants) { + const zip = await import('./zip.js'); + const constants = this._serialized_exported_program.graph_module.graph.constants; + for (const key of Object.keys(constants)) { + const value = constants[key]; + const str = atob(value); + const buffer = new Uint8Array(str.length); + for (let i = 0; i < str.length; i++) { + buffer[i] = str.charCodeAt(i); + } + const archive = zip.Archive.open(buffer); + constants[key] = archive.entries; + } + } + /* const exported_program = */ torch._export.load(f); + throw new pytorch.Error(`'torch.export' not supported.`); + } + + get format() { + return this._format; + } + + get modules() { + return this._modules; + } + + async _fetch(name) { + try { + const context = await this._context.fetch(name); + if (context) { + return context.peek('zip'); + } + } catch (error) { + // continue regardless of error + } + return null; + } +}; + +pytorch.Execution = class extends python.Execution { + + constructor(sources) { + super(sources); + const execution = this; + const torch = this.register('torch'); + const pickle = this.register('pickle'); + this.register('torch.jit._script'); + this.register('torch.jit._trace'); + this.registerType('torch.package.PackageImporter', class { + constructor(reader) { + this.zip_reader = reader; + } + load_pickle(module, resource) { + const name = `${module.replace(/\./, '/')}/${resource}`; + const stream = this.zip_reader.get_record(name); + const loaded_reduces = new Map(); + this.storage_context = new torch._C.DeserializationStorageContext(); + const unpickler = new pickle.Unpickler(stream); + unpickler.persistent_load = (saved_id) => { + switch (saved_id[0]) { + case 'storage': { + const [, storage_type, key, , size] = saved_id; + if (!this.storage_context.has_storage(key)) { + const storage = new storage_type(size); + const stream = this.zip_reader.get_record(`.data/${key}.storage`); + const buffer = stream.peek(); + storage._set_cdata(buffer); + this.storage_context.add_storage(key, storage); + } + return this.storage_context.get_storage(key); + } + case 'reduce_package': { + if (saved_id.length === 2) { + const [, func, args] = saved_id; + return execution.invoke(func, args); + } + const [, reduce_id, func, args] = saved_id; + if (!loaded_reduces.has(reduce_id)) { + const value = execution.invoke(func, [ this ].concat(args)); + loaded_reduces.set(reduce_id, value); + } + return loaded_reduces.get(reduce_id); + } + default: { + throw new pytorch.Error(`Unknown package typename '${saved_id[0]}'.`); + } + } + }; + const obj = unpickler.load(); + this.storage_context = null; + return obj; + } + import_module(name) { + return execution.import(name); + } + }); + this.registerFunction('torch.jit.load', function(file, map_location, extra_files) { + const cu = new torch.jit.CompilationUnit(); + cu.execution = execution; + const cpp_module = torch._C.import_ir_module(cu, file, map_location, extra_files); + return new torch.jit._script.RecursiveScriptModule(cpp_module); + }); + this.registerFunction('torch._C.import_ir_module', function(cu, reader) { + switch (arguments.length) { + case 4: { + const [, reader, device, extra_files] = arguments; + const deserializer = new pytorch.jit.ScriptModuleDeserializer(cu, reader); + return deserializer.deserialize(device, extra_files); + } + case 5: { + const [, , storage_context, device, ts_id] = arguments; + const deserializer = new pytorch.jit.ScriptModuleDeserializer(cu, reader, `.data/ts_code/${ts_id}/`, '.data/', storage_context); + return deserializer.deserialize(device, null); + } + default: { + throw new pytorch.Error("Invalid 'torch._C.import_ir_module' signature."); + } + } + + }); + this.registerFunction('torch._C._import_ir_module_from_package', function(cu, reader, storage_context, map_location, ts_id) { + return torch._C.import_ir_module(cu, reader, storage_context, null, ts_id); + }); + this.registerFunction('torch._C._jit_pass_inline', function(graph) { + const tryToGraphFunction = (node) => { + if (node.kind() === 'prim::CallFunction') { + // TODO + } + if (node.kind() === 'prim::CallMethod') { + const name = null; // node.s(attr::name); + const class_type = node.input(0).type(); + if (class_type) { + const fn = class_type.getMethod(name); + return tryToGraphFunction(fn); + } + } + return null; + }; + const inlineCallTo = (/* to_replace, callee, use_graph */) => { + // TODO + }; + const inlineCalls = (block) => { + for (const cur of block.nodes()) { + switch (cur.kind()) { + case 'prim::CallFunction': { + throw new pytorch.Error(); + } + case 'prim::CallMethod': { + const graphFunction = tryToGraphFunction(cur); + inlineCallTo(cur, graphFunction, true); + break; + } + default: { + for (const b of block.nodes()) { + inlineCalls(b); + } + } + } + } + }; + inlineCalls(graph.blocks()); + }); + this.registerFunction('torch.jit._script.unpackage_script_module', function(importer, script_module_id) { + const cu = new torch.jit.CompilationUnit(); + cu.execution = execution; + const cpp_module = torch._C._import_ir_module_from_package(cu, importer.zip_reader, importer.storage_context, importer.last_map_location, script_module_id); + return new torch.jit._script.RecursiveScriptModule(cpp_module); + }); + this.registerFunction('torch.jit.jit_module_from_flatbuffer', function(f) { + pytorch.mobile = flatbuffers.get('torch').torch.jit.mobile; + const cu = new torch.jit.CompilationUnit(); + cu.execution = execution; + const stream = f; + const reader = flatbuffers.BinaryReader.open(stream); + const module = pytorch.mobile.serialization.Module.create(reader); + const loader = new pytorch.jit.FlatBuffersLoader(cu); + const cpp_module = loader.parseModule(module); + // parse_and_initialize_jit_module + // const mobilem = parse_and_initialize_mobile_module_for_jit(data, jit_files, jit_constants); + // const m = jitModuleFromSourceAndConstants(mobilem._ivalue(), jit_files, jit_constants, mobilem.bytecode_version()); + // throw new pytorch.Error('torch.jit.mobile.serialization.Module not supported.'); + return torch.jit._script.wrap_cpp_module(cpp_module); + }); + this.registerFunction('torch.jit._script.wrap_cpp_module', function(cpp_module) { + const init_fn = (script_module) => { + for (const [name, module] of new torch.ModuleDict(script_module._c).items()) { + script_module.__setattr__(name, torch.jit._script.wrap_cpp_module(module)); + } + }; + return torch.jit._script.RecursiveScriptModule._construct(cpp_module, init_fn); + }); + this.registerType('torch._C.DeserializationStorageContext', class extends Map { + has_storage(name) { + return this.has(name); + } + get_storage(name) { + return this.get(name); + } + add_storage(name, storage) { + return this.set(name, storage); + } + }); + this.registerType('torch.Type', class {}); + this.registerType('torch.ClassType', class extends torch.Type { + constructor(qualified_name, cu, is_module) { + super(); + this._qualified_name = qualified_name; + this._is_module = is_module; + } + qualified_name() { + return this._qualified_name; + } + name() { + return this._qualified_name.split('.').pop(); + } + is_module() { + return this._is_module; + } + addMethod(/* name, fn */) { + // TODO + } + addAttribute(/* name */) { + // TODO + } + hasAttribute(/* name */) { + // TODO + } + hasConstant(/* name */) { + // TODO + } + methods() { + // TODO + } + }); + this.registerType('torch.TupleType', class extends torch.Type { + constructor(/* elements, name, schema */) { + super(); + // TODO + } + }); + this.registerType('torch.ScriptFunction', class { + constructor(name, graph /*, function_creator */) { + this._name = name; + this._graph = graph; + } + }); + this.registerType('torch.ScriptMethod', class { + constructor(owner, value) { + this._owner = owner; + this._function = value; + } + get name() { + return this._function.name(); + } + get owner() { + return this._owner; + } + __call__(/* args, kwargs */) { + throw new pytorch.Error(); + } + get graph() { + return this._function.graph(); + } + get schema() { + // return this.function().getSchema(); + throw new pytorch.Error(); + } + get code() { + throw new pytorch.Error(); + } + get code_with_constants() { + throw new pytorch.Error(); + } + }); + this.registerType('torch.ScriptObject', class { + constructor(type) { + this._type = type; + } + static create(type) { + if (type.is_module()) { + return new torch.ScriptModule(type); + } + return new torch.ScriptObject(type); + } + _type() { + return this._type; + } + _get_method(name) { + for (const method of this._type.methods()) { + if (name == method.name) { + return method; + } + } + return null; + } + _has_method(/* name */) { + throw new pytorch.Error(); + } + __setattr__(name, value) { + // TODO if (this._type.hasContant(name)) + this[name] = value; + } + __getattr__(name) { + return this[name]; + } + hasattr(name) { + return this._type.hasAttribute(name) || this._type.hasConstant(name); + } + _properties() { + throw new pytorch.Error(); + } + }); + this.registerType('torch.ScriptModule', class extends torch.ScriptObject { + constructor(type) { + super(type); + } + get qualified_name() { + return this._type.qualified_name(); + } + get code_with_constants() { + const const_map = {}; + const_map.const_mapping = new Map(Object.entries(execution.builtins.CONSTANTS)); + return [ null, const_map ]; + } + get graph() { + if (!this._graph) { + if (!this.data) { + return null; + } + if (!this.data.forward) { + throw new pytorch.Error("Module 'forward' not implemented."); + } + const args = [ this.data ]; // self + if (this.data.forward.__code__ && this.data.forward.__code__.parameters) { + for (const parameter of this.data.forward.__code__.parameters) { + const defaultValue = (type, name) => { + if (type.type === 'type' && type.name.type) { + switch (type.name.value) { + case 'Tensor': { + const tensor = execution.invoke('torch.Tensor', []); + tensor.__variable__ = name; + tensor.__origin__ = 'graph-input'; + const value = execution.variable(tensor, execution.graph.param_node()); + if (value && name) { + value.setDebugName(name); + } + return tensor; + } + case 'Tuple': { + return type.arguments.map((type, index) => defaultValue(type, `${name}[${index}]`)); + } + case 'List': { + return type.arguments.map((type, index) => defaultValue(type, `${name}[${index}]`)); + } + case 'Dict': { + if (type.arguments[1].name.value === 'Tensor') { + const Dict = class extends Map { + get(key) { + if (!super.has(key)) { + super.set(key, defaultValue(type.arguments[1], `${name}:${key}`)); + } + return super.get(key); + } + }; + return new Dict(); + } + return new Map(); + } + case 'int': { + return 0; + } + case 'float': { + return 0.0; + } + case 'bool': { + return false; + } + case 'Optional': { + return undefined; + } + case 'str': + return ''; + default: { + break; + } + } + } + throw new pytorch.Error(`Unsupported parameter type '${JSON.stringify(type)}'.`); + }; + if (parameter.name !== 'self') { + const type = parameter.parameterType; + const value = defaultValue(type, parameter.name); + if (pytorch.Utility.isTensor(value)) { + value.__variable__ = parameter.name; + value.__origin__ = 'graph-input'; + } + args.push(value); + } + } + } + const result = this.data.forward.__call__(args); + if (Array.isArray(result)) { + for (const output of result) { + if (pytorch.Utility.isTensor(output)) { + const value = execution.variable(output); + execution.graph.return_node().addInput(value); + } + } + } else if (pytorch.Utility.isTensor(result)) { + const value = execution.variable(result); + execution.graph.return_node().addInput(value); + } else if (Object(result) === result) { + for (const key of Object.keys(result)) { + const item = result[key]; + if (Array.isArray(item)) { + for (const output of item) { + if (pytorch.Utility.isTensor(output)) { + const value = execution.variable(output); + execution.graph.return_node().addInput(value); + } + } + } else if (pytorch.Utility.isTensor(item)) { + const value = execution.variable(item); + execution.graph.return_node().addInput(value); + } + } + } + this._graph = execution.graph; + } + return this._graph; + } + }); + this.registerType('torch.ModuleDict', class { + constructor(module) { + this._items = Object.entries(module).filter(([, value]) => value instanceof torch.ScriptModule); + } + items() { + return this._items; + } + }); + this.registerType('torch.jit.CompilationUnit', class { + constructor() { + this._functions = new Map(); + this._classes = new Map(); + } + register_function(fn) { + this._functions.set(fn.name, fn); + } + define(prefix, properties, propResolvers, definitions /*, defResolvers, self, shouldMangle, operator_set_version */) { + for (const def of definitions) { + const name = def.name; + const qualified_name = prefix ? `${prefix}.${name}` : name; + const graph = new torch.Graph(); + const fn = new torch.ScriptFunction(qualified_name, graph, null); + this.register_function(fn); + } + } + get_class(name) { + return this._classes.get(name); + } + register_type(name, cls) { + this._classes.set(name, cls); + } + }); + this.registerType('torch.jit._script.ScriptModule', class extends torch.nn.modules.module.Module { + constructor(/* obj */) { + super(); + // TODO + } + }); + this.registerType('torch.jit._trace.TracedModule', class extends torch.jit._script.ScriptModule {}); + this.registerType('torch.jit._trace.TopLevelTracedModule', class extends torch.jit._trace.TracedModule {}); + this.registerType('torch.jit._script.RecursiveScriptModule', class extends torch.jit._script.ScriptModule { + constructor(cpp_module) { + super(); + this._initializing = true; + this._c = cpp_module; + } + static _construct(cpp_module, init_fn) { + const script_module = new torch.jit._script.RecursiveScriptModule(cpp_module); + init_fn(script_module); + torch.jit._script.RecursiveScriptModule._finalize_scriptmodule(script_module); + return script_module; + } + static _finalize_scriptmodule() { + this._initializing = false; + } + get data() { + return this._c.data; + } + get graph() { + // return this._c._get_method("forward").graph; + return this._c.graph; + } + get code_with_constants() { + // return this.forward.code_with_constants; + return this._c.code_with_constants; + } + __setattr__(name, value) { + if (this._initializing) { + super.__setattr__(name, value); + } else if (this._modules.has(name)) { + this._modules.set(name, value); + } else if (this._c.hasattr(name)) { + this._c.setattr(name, value); + } else { + // TODO + } + } + __getattr__(name) { + if (this._initializing) { + return super.__getattr__(name); + } + if (this._modules.has(name)) { + return this._modules.get(name); + } + if (this._c.hasattr(name)) { + return this._c.getattr(name); + } + if (this._c._has_method(name)) { + // TODO + } + return super.__getattr__(name); + } + }); + torch.jit.ScriptModule = torch.jit._script.ScriptModule; + torch.jit.RecursiveScriptModule = torch.jit._script.RecursiveScriptModule; + torch.jit.TopLevelTracedModule = torch.jit._trace.TopLevelTracedModule; + torch.CompilationUnit = torch.jit.CompilationUnit; + torch._C.CompilationUnit = torch.jit.CompilationUnit; + torch._C.ScriptModule = torch.ScriptModule; + torch._C.ClassType = torch.ClassType; + } +}; + +pytorch.jit = {}; + +pytorch.jit.Execution = class extends pytorch.Execution { + + constructor(sources, metadata) { + super(sources); + this._metadata = metadata; + const execution = this; + this.registerType('__torch__.torch.classes._nnapi.Compilation', class { + constructor() { + this.__hide__ = true; + } + __init__() { + } + init(serialized_model_tensor, parameter_buffers) { + this.serialized_model_tensor = serialized_model_tensor; + this.parameter_buffers = parameter_buffers; + const buffers = parameter_buffers.map((buffer) => buffer.__source__.storage()); + const serialized_model = serialized_model_tensor.storage().data; + this.serialized_model = new pytorch.nnapi.SerializedModel(serialized_model, buffers); + } + run(inputs, outputs) { + execution.variable(this.serialized_model_tensor); + this.serialized_model_tensor.__count__ = (this.serialized_model_tensor.__count__ || 0) + 1; + const type = new pytorch.nnapi.Graph(this.serialized_model); + const node = execution._graph.create(type); + for (const tensor of inputs) { + const value = execution.variable(tensor); + node.addInput(value); + } + for (const tensor of outputs) { + execution.variable(tensor, node); + } + } + }); + this.registerType('__torch__.torch.classes.quantized.Conv2dPackedParamsBase', class { + __setstate__(state) { + if (state[0] !== '2') { + throw new pytorch.Error(`Unsupported pack version '${state[0]}'.`); + } + const [/* pack_version */, tensors, opt_tensors] = state; + const packed_config_tensor = new pytorch.Tensor('', tensors[0], true); + const packed_config = packed_config_tensor.decode(); + /* eslint-disable prefer-destructuring */ + this.weight = tensors[1]; + this.bias = opt_tensors[0]; + this.stride = [ packed_config[1], packed_config[2] ]; + this.padding = [ packed_config[3], packed_config[4] ]; + this.dilation = [ packed_config[5], packed_config[6] ]; + this.output_padding = [ packed_config[7], packed_config[8] ]; + this.groups = packed_config[9]; + /* eslint-enable prefer-destructuring */ + } + }); + this.registerType('__torch__.torch.classes.quantized.Conv3dPackedParamsBase', class { + __setstate__(state) { + if (state[0] !== '2') { + throw new pytorch.Error(`Unsupported pack version '${state[0]}'.`); + } + const [/* pack_version */, tensors, opt_tensors] = state; + const packed_config_tensor = new pytorch.Tensor('', tensors[0], true); + const packed_config = packed_config_tensor.decode(); + /* eslint-disable prefer-destructuring */ + this.weight = tensors[1]; + this.bias = opt_tensors[0]; + this.stride = [ packed_config[1], packed_config[2] ]; + this.padding = [ packed_config[3], packed_config[4] ]; + this.dilation = [ packed_config[5], packed_config[6] ]; + this.output_padding = [ packed_config[7], packed_config[8] ]; + this.groups = packed_config[9]; + /* eslint-enable prefer-destructuring */ + } + }); + this.registerType('__torch__.torch.classes.quantized.LinearPackedParamsBase', class { + __setstate__(state) { + [this.weight, this.bias] = state; + } + }); + this.registerType('__torch__.torch.classes.xnnpack.Conv2dOpContext', class { + __setstate__(state) { + [this.weight, this.bias, this.stride, this.padding, this.dilation, this.groups, this.output_min, this.output_max] = state; + } + }); + this.registerType('__torch__.torch.classes.xnnpack.LinearOpContext', class { + __setstate__(state) { + [this.weight, this.bias, this.output_min, this.output_max] = state; + } + }); + this.registerType('torch.Graph', class { + constructor() { + this._unique = 1; + this._nodes = []; + this._block = execution.invoke('torch.Block', [ this ]); + } + create(kind) { + return execution.invoke('torch.Node', [ this, kind ]); + } + inputs() { + return this._block.inputs(); + } + outputs() { + return this._block.outputs(); + } + nodes() { + return this._nodes; + // return this._block.nodes(); + } + param_node() { + return this._block.param_node(); + } + return_node() { + return this._block.return_node(); + } + }); + this.registerType('torch.Block', class { + constructor(graph) { + this._unique = 1; + this._graph = graph; + this._input = graph.create('prim::Param'); + this._output = graph.create('prim::Return'); + } + param_node() { + return this._input; + } + return_node() { + return this._output; + } + inputs() { + return this._input.outputs(); + } + outputs() { + return this._output.inputs(); + } + addInput(name) { + const value = this._input.addOutput(); + value.setDebugName(name || ''); + return value; + } + registerOutput(value) { + this._output.addInput(value); + return this.outputs().length - 1; + } + }); + this.registerType('torch.Node', class { + constructor(graph, kind) { + this._graph = graph; + this._graph._nodes.push(this); + this._kind = kind; + this._inputs = []; + this._outputs = []; + this._blocks = []; + } + kind() { + return this._kind; + } + inputs() { + return this._inputs; + } + outputs() { + return this._outputs; + } + blocks() { + return this._blocks; + } + addInput(value) { + const use = execution.invoke('torch.Use', [ this ]); + value.uses().push(use); + this._inputs.push(value); + return value; + } + addOutput() { + const value = execution.invoke('torch.Value', [ this ]); + this._outputs.push(value); + return value; + } + addBlock() { + const block = execution.invoke('torch.Block', [ this._graph, this ]); + this._blocks.push(block); + return block; + } + }); + this.registerType('torch.Value', class { + constructor(node) { + this._unique = node && node._unique ? node._unique++ : node._graph._unique++; + this._node = node && node._unique ? null : node; + this._uses = []; + } + unique() { + return this._unique; + } + node() { + return this._node; + } + uses() { + return this._uses; + } + setDebugName(name) { + this._unique_name = name; + } + debugName() { + return this._unique_name; + } + }); + this.registerType('torch.Use', class { + constructor(node) { + this._node = node; + } + get user() { + return this._node; + } + }); + this._metadata = metadata; + this._types = new Map(); + for (const [, value] of this._metadata._types) { + const name = value.name; + if (name.indexOf('::') !== -1) { + const index = name.lastIndexOf('.'); + const key = index === -1 ? name : name.substring(0, index); + if (!this._types.has(key)) { + this._types.set(key, []); + } + this._types.get(key).push(value); + } + } + this._graph = this.invoke('torch.Graph', []); + this._values = new Map(); + } + + debug(file) { + const buffer = this.source(`${file}.debug_pkl`); + if (buffer) { + return null; + // const unpickler = this.invoke('pickle.Unpickler', [ buffer ]); + // return unpickler.load(); + } + return null; + } + + get graph() { + return this._graph; + } + + variable(tensor, node) { + if (this._values.has(tensor)) { + return this._values.get(tensor); + } + const value = node ? node.addOutput() : this.invoke('torch.Value', [ node ? node : this._graph ]); + value.value = tensor; + if (typeof tensor !== 'string' && typeof tensor !== 'number') { + this._values.set(tensor, value); + } + if (pytorch.Utility.isTensor(tensor)) { + tensor.__variable__ = value.unique().toString(); + } + return value; + } + + resolve(name) { + const index = name.lastIndexOf('.'); + const memberName = index === -1 ? name : name.substring(index + 1, name.length); + const moduleName = index === -1 ? '' : name.substring(0, index); + const module = this.import(moduleName); + let type = module ? module[memberName] : null; + if (!type) { + if (name.startsWith('__torch__.')) { + throw new pytorch.Error(`Unknown type name '${name}'.`); + } + type = super.resolve(name); + } + return type; + } + + target(expression, context) { + if (expression.type === 'id') { + switch (expression.value) { + case 'torch': + case 'ops': + case 'CONSTANTS': + case 'uninitialized': + return this.builtins[expression.value]; + default: + break; + } + } + let current = expression; + let path = []; + for (;;) { + if (current.type === '.' && current.member && current.member.type === 'id') { + path.push(current.member.value); + current = current.target; + } else if (current.type === 'id' && current.value !== 'self' && current.value !== 'CONSTANTS') { + path.push(current.value); + break; + } else { + path = null; + break; + } + } + if (path) { + let target = null; + for (let i = path.length - 1; i >= 0; i--) { + target = target ? target[path[i]] : context.get(path[i]); + if (!target) { + break; + } + } + if (!target) { + path.reverse(); + const name = path.join('.'); + const file = `${path.join('/')}.py`; + if (this.source(file)) { + return this.import(name); + } + return this.resolve(name); + } + } + return super.target(expression, context); + } + + call(target, name, args, context) { + const overload = this._overload(target, name, args, context); + if (overload) { + const [schema, args, evalArgs] = overload; + const copyArgs = Array.prototype.slice.call(args); + const copyEvalArgs = Array.prototype.slice.call(evalArgs); + const node = this._graph.create(schema.name); + node.schema = schema; + const referencedParameters = []; + const parameters = Array.prototype.slice.call(schema.inputs || []).concat(Array.prototype.slice.call(schema.attributes || [])); + while (copyEvalArgs.length > 0) { + if (parameters.length <= 0) { + if (schema.name.startsWith('_caffe2::')) { + break; + } + throw new pytorch.Error(); + } + if (copyArgs.every((arg) => arg.type === '=' && arg.target && arg.target.type === 'id') && + parameters.every((parameter) => parameter.type !== 'Tensor' && parameter.type !== 'Tensor[]')) { + const map = new Map(parameters.map((parameter) => [ parameter.name, parameter ])); + while (copyArgs.length > 0) { + const argument = copyArgs.shift(); + const arg = copyEvalArgs.shift(); + const parameter = map.get(argument.target.value); + if (!parameter) { + throw new pytorch.Error(); + } + if (!pytorch.Utility.isType(arg, parameter.type)) { + if (parameter.optional) { + continue; + } + throw new pytorch.Error(); + } + const value = this.variable(arg); + value.value = arg; + node.addInput(value); + } + continue; + } + const parameter = parameters.shift(); + const [argument] = copyEvalArgs; + if (parameter.type === 'Tensor' || (parameter.type === 'Scalar' && pytorch.Utility.isTensor(argument))) { + if (Array.isArray(argument) || (!pytorch.Utility.isTensor(argument) && argument !== null && argument !== undefined)) { + if (parameter.optional) { + continue; + } + throw new pytorch.Error(); + } else { + copyArgs.shift(); + copyEvalArgs.shift(); + const tensor = (argument === null || argument === undefined) ? {} : argument; + const value = this.variable(tensor); + referencedParameters.push(tensor); + node.addInput(value); + } + } else if (parameter.type === 'Tensor[]') { + const [argument] = copyEvalArgs; + if (!Array.isArray(argument) || !argument.every((item) => pytorch.Utility.isTensor(item) || item === null)) { + if (parameter.optional) { + continue; + } + throw new pytorch.Error(); + } else { + copyArgs.shift(); + copyEvalArgs.shift(); + + const list = this._graph.create('prim::ListConstruct'); + for (const arg of argument) { + const tensor = arg; + if (tensor) { + tensor.__count__ = (tensor.__count__ || 0) + 1; + } + const value = this.variable(tensor); + list.addInput(value); + } + + const value = list.addOutput(); + node.addInput(value); + } + } else { + const [arg] = copyArgs; + if (!pytorch.Utility.isType(argument, parameter.type) && argument !== null) { + if (parameter.optional) { + continue; + } + throw new pytorch.Error(); + } else if (arg.type !== '=') { + copyArgs.shift(); + copyEvalArgs.shift(); + switch (parameter.type) { + case '__torch__.torch.classes.quantized.Conv2dPackedParamsBase': + case '__torch__.torch.classes.quantized.Conv3dPackedParamsBase': + case '__torch__.torch.classes.quantized.LinearPackedParamsBase': + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.xnnpack.LinearOpContext': { + const value = this.variable(argument); + value.value = argument; + node.addInput(value); + for (const [, value] of Object.entries(argument)) { + if (pytorch.Utility.isTensor(value)) { + const tensor = value; + referencedParameters.push(tensor); + } + } + break; + } + default: { + const value = this.variable(argument); + node.addInput(value); + value.value = argument; + break; + } + } + } else { + throw new pytorch.Error('Expected named argument.'); + } + } + } + const result = []; + for (let i = 0; i < schema.outputs.length; i++) { + const parameter = schema.outputs[i]; + switch (parameter.type) { + case 'Scalar': + case 'Tensor': { + const output = this.invoke('torch.Tensor', []); + output.__origin__ = schema.name; + if (i === 0) { + switch (schema.name) { + case 'aten::conv1d': + case 'aten::embedding': { + output.resize_([ NaN, NaN, NaN ]); + break; + } + case 'aten::cat': + case 'aten::conv2d': + case 'aten::dropout': + case 'aten::flatten': + case 'aten::flatten.named_out_dim': + case 'aten::max_pool2d': + case 'aten::adaptive_avg_pool2d': + case 'aten::avg_pool2d': + case 'aten::quantize_per_tensor': + case 'aten::relu_': + case 'aten::prelu': + case 'aten::hardtanh_': + case 'aten::upsample_bilinear2d': + case 'prepacked::conv2d_clamp_run': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && input.size() === undefined) { + input.resize_([ NaN, NaN, NaN, NaN ]); + } + output.resize_([ NaN, NaN, NaN, NaN ]); + break; + } + case 'aten::slice': + case 'aten::slice.Tensor': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size(); + output.resize_(size); + } + break; + } + case 'aten::to': + case 'aten::to.device': + case 'aten::to.dtype': + case 'aten::to.dtype_layout': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size(); + output.resize_(size); + } + break; + } + case 'aten::conv3d': { + output.resize_([ NaN, NaN, NaN, NaN, NaN ]); + break; + } + case 'aten::roll': + case 'aten::detach': + case 'aten::mean': + case 'aten::mul': + case 'aten::mul.Scalar': + case 'aten::div': + case 'aten::div.Scalar': + case 'aten::batch_norm': + case 'aten::gelu': + case 'aten::relu': + case 'aten::clamp': + case 'aten::clamp_': + case 'aten::_add_relu_': + case 'aten::hardswish_': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(input.size()); + } + break; + } + case 'aten::add': + case 'aten::add.Scalar': + case 'aten::sub': + case 'aten::sub.Scalar': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(input.size()); + } else { + const [, other] = evalArgs; + if (pytorch.Utility.isTensor(other) && Array.isArray(other.size())) { + output.resize_(other.size()); + } + } + break; + } + case 'aten::select': + case 'aten::select.int': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(Array(input.size().length - 1).fill(NaN)); + } + break; + } + case 'aten::layer_norm': { + const [input, normalized_shape] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const shape = input.size(); + if (Array.isArray(normalized_shape) && normalized_shape.length === 1) { + const [value] = normalized_shape; + shape[shape.length - 1] = value; + } + output.resize_(shape); + } + break; + } + case 'aten::empty': + case 'aten::ones': + case 'aten::zeros': + case 'aten::zeros_like': { + output.resize_(evalArgs[0]); + break; + } + case 'aten::view': + case 'aten::reshape': + case 'aten::new_full': { + output.resize_(evalArgs[1]); + break; + } + case 'aten::squeeze': + case 'aten::squeeze.dim': { + const [input] = evalArgs; + const size = input.size(); + if (Array.isArray(size)) { + switch (evalArgs.length) { + case 1: { + output.resize_(size.filter((value) => value !== 1)); + break; + } + case 2: { + const [, dim] = evalArgs; + output.resize_(size.filter((value, index) => (value !== 1 && !isNaN(value)) || index !== dim)); + break; + } + default: { + break; + } + } + } + break; + } + case 'aten::unsqueeze': { + const [input, dim] = evalArgs; + const size = input.size(); + if (Array.isArray(size) && dim !== undefined) { + const shape = size.slice(); + shape.splice(dim, 0, 1); + output.resize_(shape); + } else { + output.resize_([ NaN, NaN, NaN, NaN ]); + } + break; + } + case 'aten::transpose': + case 'aten::transpose.int': { + const [input, dim0, dim1] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size().slice(); + const d0 = dim0 >= 0 ? dim0 : size.length + dim0; + const d1 = dim1 >= 0 ? dim1 : size.length + dim1; + const value = size[dim0]; + /* eslint-disable prefer-destructuring */ + size[d0] = size[1]; + /* eslint-enable prefer-destructuring */ + size[d1] = value; + output.resize_(size); + } + break; + } + case 'aten::contiguous': { + const [source] = evalArgs; + output.__source__ = source; + break; + } + case 'quantized::cat': + case 'quantized::cat_relu': + case 'quantized::linear': + case 'quantized::conv2d': + case 'quantized::conv2d.new': + case 'quantized::conv2d_relu': + case 'quantized::conv2d_relu.new': + case 'quantized::add': + case 'quantized::add_relu': + output.resize_([ NaN, NaN, NaN, NaN ]); + output.__quantized__ = true; + break; + default: + break; + } + } + this.variable(output, node); + result.push(output); + break; + } + case 'Tensor[]': { + let count = 1; + switch (schema.name) { + case 'aten::chunk': + count = node.inputs()[1].value; + break; + case 'aten::meshgrid': { + const list = node.inputs()[0].node(); + if (list.kind() === 'prim::ListConstruct') { + count = list.inputs().length; + } + break; + } + case 'aten::unbind': + case 'aten::unbind.int': + count = args[0].__tuple__ || count; + break; + case 'aten::broadcast_tensors': + case 'aten::split': + case 'aten::split.Tensor': + case 'aten::split_with_sizes': + if (context.target.length > 0) { + count = context.target[context.target.length - 1].length; + } + break; + default: + break; + } + + const value = node.addOutput(); + const list = this._graph.create('prim::ListUnpack'); + list.addInput(value); + + const tensors = []; + for (let i = 0; i < count; i ++) { + const tensor = this.invoke('torch.Tensor', []); + tensor.__origin__ = schema.name; + this.variable(tensor, list); + tensors.push(tensor); + } + result.push(tensors); + break; + } + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.xnnpack.LinearOpContext': { + const value = this.invoke(parameter.type, []); + this.variable(value, node); + result.push(value); + break; + } + default: { + const output = this.invoke('torch.Tensor', []); + output.resize_([]); + output.__origin__ = schema.name; + this.variable(output, node); + result.push(output); + break; + } + } + } + for (const referencedParameter of referencedParameters) { + referencedParameter.__count__ = (referencedParameter.__count__ || 0) + 1; + } + if (result.length > 1) { + return result; + } + return result[0]; + } + return super.call(target, name, args, context); + } + + _overload(target, name, args, context) { + let moduleName = pytorch.Utility.target(target); + if (moduleName && name) { + let outputTypes = null; + let type = `${moduleName}.${name}`; + if (type === 'ops.prim.NumToTensor' && args.length === 1 && args[0].type === 'call' && args[0].target.member.type == 'id') { + const [arg] = args; + moduleName = pytorch.Utility.target(arg.target.target); + name = arg.target.member.value; + args = arg.args; + outputTypes = [ 'int64' ]; + type = `${moduleName}.${name}`; + } + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml + let overloads = null; + if (type.startsWith('torch.')) { + overloads = this._types.get(`aten::${type.substring(6)}`); + } else if (type.startsWith('ops.') && !type.startsWith('ops.prim.')) { + const path = type.split('.'); + if (path.length === 3) { + overloads = this._types.get(`${path[1]}::${path[2]}`); + } + if (!overloads) { + const module = this.import(moduleName); + if (!module || !module[name]) { + const metadata = {}; + metadata.name = type; + metadata.inputs = []; + metadata.outputs = []; + for (let i = 0; i< args.length; i++) { + const input = {}; + let argument = args[i]; + input.name = i.toString(); + if (argument.type === '=' && argument.target && argument.target.type === 'id') { + input.name = this.expression(argument.target, context); + argument = argument.expression; + } + const obj = this.expression(argument, context); + input.type = pytorch.Utility.getType(obj); + metadata.inputs.push(input); + } + const count = context.target.length > 0 ? context.target[context.target.length - 1].length : 0; + for (let i = 0; i < count; i++) { + metadata.outputs.push({ name: '', type: '' }); + } + this._metadata.add(type, metadata); + overloads = [ metadata ]; + } + } + } + if (overloads) { + overloads = !Array.isArray(overloads) ? [ overloads ] : overloads; + const evalArgs = args.map((argument) => { + if (argument.type === '=' && argument.target && argument.target.type === 'id') { + argument = argument.expression; + } + return this.expression(argument, context); + }); + for (const schema of overloads) { + const copyArgs = Array.prototype.slice.call(args); + const copyEvalArgs = Array.prototype.slice.call(evalArgs); + const parameters = Array.prototype.slice.call(schema.inputs || []).concat(Array.prototype.slice.call(schema.attributes || [])); + let next = false; + while (copyEvalArgs.length > 0) { + if (parameters.length <= 0) { + next = !schema.name.startsWith('_caffe2::'); + break; + } + if (copyArgs.every((arg) => arg.type === '=' && arg.target && arg.target.type === 'id') && + parameters.every((parameter) => parameter.type !== 'Tensor' && parameter.type !== 'Tensor[]')) { + const map = new Map(parameters.map((parameter) => [ parameter.name, parameter ])); + while (copyArgs.length > 0) { + const argument = copyArgs.shift(); + const arg = copyEvalArgs.shift(); + const parameter = map.get(argument.target.value); + if (!parameter) { + next = true; + break; + } + if (!pytorch.Utility.isType(arg, parameter.type)) { + if (parameter.optional) { + continue; + } + next = true; + break; + } + } + continue; + } + if (next) { + break; + } + const parameter = parameters.shift(); + const [argument] = copyEvalArgs; + if (parameter.type === 'Tensor' || (parameter.type === 'Scalar' && pytorch.Utility.isTensor(argument))) { + if (Array.isArray(argument) || (!pytorch.Utility.isTensor(argument) && argument !== null && argument !== undefined)) { + if (parameter.optional) { + continue; + } + next = true; + } else { + copyArgs.shift(); + copyEvalArgs.shift(); + } + } else if (parameter.type === 'Tensor[]') { + const [argument] = copyEvalArgs; + if (!Array.isArray(argument) || !argument.every((item) => pytorch.Utility.isTensor(item) || item === null)) { + if (parameter.optional) { + continue; + } + next = true; + } else { + copyArgs.shift(); + copyEvalArgs.shift(); + } + } else { + const [arg] = copyArgs; + if (!pytorch.Utility.isType(argument, parameter.type) && argument !== null) { + if (parameter.optional) { + continue; + } + next = true; + } else if (arg.type !== '=') { + copyArgs.shift(); + copyEvalArgs.shift(); + } else { + throw new pytorch.Error('Expected named argument.'); + } + } + if (next) { + break; + } + } + if (next) { + continue; + } + for (let i = 0; i < schema.outputs.length; i++) { + const parameter = schema.outputs[i]; + switch (parameter.type) { + case 'Scalar': + case 'Tensor': + case 'Tensor[]': + break; + case '__torch__.torch.classes.xnnpack.Conv2dOpContext': + case '__torch__.torch.classes.xnnpack.LinearOpContext': + break; + default: { + if (!outputTypes || schema.outputs.length !== 1 || schema.outputs[0].type !== outputTypes[0]) { + next = true; + } + break; + } + } + } + if (next) { + continue; + } + return [ schema, args, evalArgs ]; + } + } + } + return null; + } + + block(statements, context) { + statements = Array.prototype.slice.call(statements); + while (statements.length > 0) { + if (statements.length > 1) { + const [assign, condition] = statements; + // _x = torch.ne(torch.len(torch.size(input)), 5) + // if _x: + // ops.prim.RaiseException(...) + if (assign.type === '=' && + condition.type === 'if' && + pytorch.Utility.isEqual(assign.target, condition.condition) && + pytorch.Utility.isCall(assign.expression, 'torch.ne', 2) && + pytorch.Utility.isCall(assign.expression.args[0], 'torch.len', 1) && + pytorch.Utility.isCall(assign.expression.args[0].args[0], 'torch.size', 1) && + condition.then.statements.length == 1 && + pytorch.Utility.isCall(condition.then.statements[0], 'ops.prim.RaiseException', 1)) { + const tensor = this.expression(assign.expression.args[0].args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.size) { + const number = this.expression(assign.expression.args[1], context); + const size = tensor.size(); + if (number >= 3 && number <= 5) { + if (!Array.isArray(size) || size.length !== number) { + tensor.resize_(Array(number).fill(NaN)); + } + } + } + } + // _x = torch.ne(torch.dim(input), 5) + // if _x: + // ops.prim.RaiseException(...) + if (assign.type === '=' && + condition.type === 'if' && + pytorch.Utility.isEqual(assign.target, condition.condition) && + pytorch.Utility.isCall(assign.expression, 'torch.ne', 2) && + pytorch.Utility.isCall(assign.expression.args[0], 'torch.dim', 1) && + condition.then.statements.length > 0 && + pytorch.Utility.isCall(condition.then.statements[condition.then.statements.length - 1], 'ops.prim.RaiseException', 1)) { + const tensor = this.expression(assign.expression.args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor)) { + const size = this.expression(assign.expression.args[1], context); + tensor.resize_(Array(size).fill(NaN)); + } + } + // _0 = torch.eq(torch.len(torch.size(x)), 2) + // if _0: + // pass + // else: + // ops.prim.RaiseException("AssertionError: ") + if (assign.type === '=' && + condition.type === 'if' && + pytorch.Utility.isEqual(assign.target, condition.condition) && + pytorch.Utility.isCall(assign.expression, 'torch.eq', 2) && + pytorch.Utility.isCall(assign.expression.args[0], 'torch.len', 1) && + pytorch.Utility.isCall(assign.expression.args[0].args[0], 'torch.size', 1) && + condition.else.statements.length == 1 && + pytorch.Utility.isCall(condition.else.statements[0], 'ops.prim.RaiseException', 1)) { + const tensor = this.expression(assign.expression.args[0].args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.shape === undefined) { + const number = this.expression(assign.expression.args[1], context); + tensor.resize_(Array(number).fill(NaN)); + } + } + // val = torch.slice(torch.size(img), -2) + // if torch.eq(torch.len(val), 2): + // pass + // else: + // ops.prim.RaiseException("AssertionError: ") + if (assign.type === '=' && + condition.type === 'if' && + pytorch.Utility.isCall(assign.expression, 'torch.slice', 2) && + pytorch.Utility.isCall(assign.expression.args[0], 'torch.size', 1) && + pytorch.Utility.isCall(condition.condition, 'torch.eq', 2) && + pytorch.Utility.isCall(condition.condition.args[0], 'torch.len', 1) && + pytorch.Utility.isEqual(condition.condition.args[0].args[0], assign.target) && + condition.else.statements.length == 1 && + pytorch.Utility.isCall(condition.else.statements[0], 'ops.prim.RaiseException', 1)) { + const tensor = this.expression(assign.expression.args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.shape === undefined) { + const start = this.expression(assign.expression.args[1], context); + const value = this.expression(condition.condition.args[1], context); + if (Number.isInteger(start) && start < 0 && Number.isInteger(value) && value > 0) { + tensor.resize_(Array(value - start).fill(NaN)); + } + } + } + } + if (statements.length > 1) { + // getattr_1 = torch.size(x) + // getitem = torch.slice(getattr_1, -2, 9223372036854775807, 1) + const [size, statement] = statements; + if (size.type === '=' && statement.type === '=' && + size.target.type === 'id' && + pytorch.Utility.isCall(size.expression, 'torch.size', 1) && + pytorch.Utility.isCall(statement.expression, 'torch.slice', 4) && + statement.expression.arguments[0].type === 'id' && size.target.value === statement.expression.arguments[0].value) { + const tensor = this.expression(size.expression.arguments[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + tensor.resize_([ 1, 3, 299, 299 ]); + } + } + } + if (statements.length > 1) { + // _0 = torch.split_with_sizes(...) + // a, a_1, a_2, = _0 + const [statement, tuple] = statements; + if (statement.type === '=' && statement.target.type === 'id' && statement.expression.type == 'call' && + tuple.type === '=' && tuple.target.type === 'tuple' && + tuple.target.value.every((item) => item.type === 'id') && + tuple.expression.value === statement.target.value) { + const containsVariableReference = (queue, value) => { + while (queue.length > 0) { + const obj = queue.shift(); + if (obj && obj.type === 'id' && obj.value === value) { + return true; + } else if (Array.isArray(obj)) { + for (const item of obj) { + if (Array.isArray(item) || (Object(item) === item && item.type)) { + queue.push(item); + } + } + } else if (Object(obj) === obj) { + for (const [key, value] of Object.entries(obj)) { + if (key === 'location') { + continue; + } + if (Array.isArray(value)) { + for (const item of value) { + if (Array.isArray(item) || (Object(item) === item && item.type)) { + queue.push(item); + } + } + } else if (Object(value) === value && value.type) { + queue.push(value); + } + } + } + } + return false; + }; + if (!containsVariableReference(statements.slice(2, statements.length - 1), statement.target.value)) { + statements[0] = Object.assign({}, statement); + statements[0].target = tuple.target; + statements.splice(1, 1); + } + } + } + const statement = statements.shift(); + // input_shape = torch.slice(torch.size(x), -2, 9223372036854775807, 1) + if (statement.type === '=' && + pytorch.Utility.isCall(statement.expression, 'torch.slice', 4) && + pytorch.Utility.isCall(statement.expression.args[0], 'torch.size', 1)) { + const tensor = this.expression(statement.expression.args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.shape === undefined) { + tensor.resize_([ 1, 3, 299, 299 ]); + } + } + // torch.slice(ops.prim.shape(input), 0, 2, 1) + if (statement.type === '=' && + pytorch.Utility.isCall(statement.expression, 'torch.slice', 4) && + pytorch.Utility.isCall(statement.expression.args[0], 'ops.prim.shape', 1)) { + const tensor = this.expression(statement.expression.args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + tensor.resize_([ NaN, NaN, NaN, NaN ]); + } + } + // _3 = torch.le(xxxx, torch.dim(f0)) + if (statement.type === '=' && + pytorch.Utility.isCall(statement.expression, 'torch.le', 2) && + pytorch.Utility.isCall(statement.expression.args[1], 'torch.dim', 1)) { + const tensor = this.expression(statement.expression.args[1].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + tensor.resize_([ NaN, NaN, NaN, NaN ]); + } + } + // if torch.ne(torch.dim(image), 3): + // xxxx + // ops.prim.RaiseException(_7) + if (statement.type === 'if' && + pytorch.Utility.isCall(statement.condition, 'torch.ne', 2) && + pytorch.Utility.isCall(statement.condition.args[0], 'torch.dim', 1) && + statement.then.statements.length > 0 && + pytorch.Utility.isCall(statement.then.statements.slice(-1).pop(), 'ops.prim.RaiseException', 1)) { + const tensor = this.expression(statement.condition.args[0].args[0], context); + const size = this.expression(statement.condition.args[1], context); + if (pytorch.Utility.isTensor(tensor) && Number.isInteger(size) && size < 10) { + tensor.resize_(Array.isArray(tensor.shape) && tensor.shape.length > size ? tensor.shape.slice(-size) : Array(size).fill(NaN)); + } + } + // if torch.gt(torch.dim(x), 1): + // xxxx + // ops.prim.RaiseException(...) + if (statement.type === 'if' && + pytorch.Utility.isCall(statement.condition, 'torch.gt', 2) && + pytorch.Utility.isCall(statement.condition.args[0], 'torch.dim', 1) && + statement.then.statements.length > 0 && + pytorch.Utility.isCall(statement.then.statements.slice(-1).pop(), 'ops.prim.RaiseException')) { + const tensor = this.expression(statement.condition.args[0].args[0], context); + const size = this.expression(statement.condition.args[1], context); + if (pytorch.Utility.isTensor(tensor) && Number.isInteger(size) && size < 10) { + tensor.resize_(Array.isArray(tensor.shape) && tensor.shape.length > size ? tensor.shape.slice(-size) : Array(size).fill(NaN)); + } + } + // if bool(...): + // ops.prim.RaiseException(torch.format(_1, dtype)) + // else: + // pass + if (statement.type === 'if' && + pytorch.Utility.isCall(statement.condition, 'bool', 1) && + statement.then.statements.length > 0 && + pytorch.Utility.isCall(statement.then.statements.slice(-1).pop(), 'ops.prim.RaiseException', 1)) { + statement.condition = { type: 'id', value: 'False' }; + } + // dim = torch.sub(torch.dim(input), 2) + if (statement.type === '=' && + statement.target.type === 'id' && statement.target.value === 'dim' && + pytorch.Utility.isCall(statement.expression, 'torch.sub', 2) && + pytorch.Utility.isCall(statement.expression.args[0], 'torch.dim', 1)) { + const tensor = this.expression(statement.expression.args[0].args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + tensor.resize_([ NaN, NaN, NaN, NaN ]); + } + } + // a, b = torch.unbind(size, 0) + if (statement.type === '=' && + statement.target.type === 'tuple' && + (pytorch.Utility.isCall(statement.expression, 'torch.unbind', 1) || + pytorch.Utility.isCall(statement.expression, 'torch.unbind', 2))) { + statement.expression.args[0].__tuple__ = statement.target.value.length; + } + // a, b, c = torch.size(input) + if (statement.type === '=' && + statement.target.type === 'tuple' && + pytorch.Utility.isCall(statement.expression, 'torch.size', 1)) { + const tensor = this.expression(statement.expression.args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + const dim = statement.target.value.length; + tensor.resize_(Array(dim).fill(NaN)); + } + } + // x = torch.len(input) + if (statement.type === '=' && + statement.target.type === 'id' && + pytorch.Utility.isCall(statement.expression, 'torch.len', 1)) { + const tensor = this.expression(statement.expression.args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input' && tensor.shape === undefined) { + tensor.resize_([ NaN, NaN, NaN, NaN ]); + } + } + // x = _(torch.size(foo ,2)) + if (statement.type === '=' && + statement.expression.type === 'call' && statement.expression.args.length > 0 && + pytorch.Utility.isCall(statement.expression.args[0], 'torch.size', 2)) { + const tensor = this.expression(statement.expression.args[0].args[0], context); + const dim = this.expression(statement.expression.args[0].args[1], context); + if (pytorch.Utility.isTensor(tensor) && Number.isInteger(dim) && dim >= 0) { + if (tensor.shape === undefined) { + tensor.resize_(Array(dim + 1).fill(NaN)); + } else if (Array.isArray(tensor.shape) && tensor.shape.length <= dim) { + tensor.resize_(tensor.shape.concat(Array(dim + 1 - tensor.shape.length).fill(NaN))); + } + } + } + if (statement.type === '=' && statement.target.type === 'tuple' && + statement.expression.type === 'call' && statement.expression.args.length > 0 && + pytorch.Utility.isCall(statement.expression, 'torch.size', 1)) { + const tensor = this.expression(statement.expression.args[0], context); + if (pytorch.Utility.isTensor(tensor) && tensor.__origin__ === 'graph-input') { + if (tensor.shape === undefined) { + tensor.resize_(Array(statement.target.value.length).fill(NaN)); + } + } + } + const value = this.statement(statement, context); + if (value !== undefined) { + return value; + } + } + return undefined; + } +}; + +pytorch.jit.Source = class { + + constructor(text) { + this._text = text; + } +}; + +pytorch.jit.SourceLoader = class { + + constructor(reader, code_prefix) { + this._reader = reader; + this._code_prefix = code_prefix; + } + + loadSource(qualifier) { + const path = `${this._code_prefix}/${qualifier}.py`; + if (this._reader.has_record(path)) { + const data = this._reader.get_record(path); + return new pytorch.jit.Source(data); + } + return null; + } +}; + +pytorch.jit.SourceImporter = class { + + constructor(cu, constant_table, source_loader, version) { + this._cu = cu; + this._constant_table = constant_table; + this._source_loader = source_loader; + this._version = version; + } + + loadType(/* name */) { + // TODO; + } + + resolveType(name) { + return this.findNamedType(new pytorch.jit.QualifiedName(name)); + } + + findNamedType(name) { + // TODO + this.parseSourceIfNeeded(name.prefix()); + } + + parseSourceIfNeeded(/* qualifier */) { + // TODO + } +}; + +pytorch.jit.ScriptModuleDeserializer = class { + + constructor(cu, reader, pickle_dir_prefix, tensor_dir_prefix, storage_context) { + this._compilation_unit = cu; + this._reader = reader; + this._storage_context = storage_context; + this._code_prefix = !pickle_dir_prefix && !tensor_dir_prefix ? 'code/' : '.data/ts_code/code/'; + this._pickle_dir_prefix = pickle_dir_prefix || ''; + this._tensor_dir_prefix = tensor_dir_prefix || ''; + this._source_importer = new pytorch.jit.SourceImporter( + this._compilation_unit, this._constants_table, + new pytorch.jit.SourceLoader(this._reader, this._code_prefix), reader.version()); + } + + deserialize() { + const execution = this._compilation_unit.execution; + const code_prefix = this._code_prefix; + for (const name of this._reader.get_all_records()) { + if (name.startsWith(code_prefix) && name.endsWith('.py')) { + const file = name.substring(code_prefix.length); + const stream = this._reader.get_record(name); + const buffer = stream.peek(); + execution.add(file, buffer); + } + } + const torch = execution.import('torch'); + execution.builtins.torch = torch; + execution.builtins.Tensor = torch.Tensor; + execution.builtins.ops = torch.ops; + execution.builtins.inf = torch.inf; + execution.builtins.CONSTANTS = {}; + if (this._reader.has_record('model.json')) { + return this.LEGACY_deserialize(); + } + const constants = this.readArchive('constants'); + for (let i = 0; i < constants.length; i++) { + execution.builtins.CONSTANTS[`c${i}`] = constants[i]; + } + const module = this.readArchive('data'); + const result = new torch.ScriptModule(); + result.data = module; + return result; + } + + LEGACY_deserialize() { + const execution = this._compilation_unit.execution; + const torch = execution.import('torch'); + const stream = this._reader.get_record('model.json'); + const buffer = stream.peek(); + const decoder = new TextDecoder('utf-8'); + const content = decoder.decode(buffer); + const model = JSON.parse(content); + const data = model.mainModule || {}; + const queue = [ data ]; + const tensorTypeMap = new Map([ + [ 'FLOAT', 'Float' ], + [ 'FLOAT16', 'Half' ], + [ 'DOUBLE', 'Double' ], + [ 'INT8', 'Char' ], + [ 'INT32', 'Int' ], + [ 'INT64', 'Long' ] + ]); + const constants = (model.tensors || []).map((constant) => { + const key = constant.data.key; + if (!tensorTypeMap.has(constant.dataType)) { + throw new pytorch.Error(`Unsupported tensor data type '${constant.dataType}'.`); + } + const type = tensorTypeMap.get(constant.dataType); + const shape = constant.dims ? constant.dims.map((dim) => parseInt(dim, 10)) : null; + const strides = constant.strides ? constant.strides.map((dim) => parseInt(dim, 10)) : null; + const storage_type = execution.resolve(`torch.${type}Storage`); + const size = (shape || []).reduce((a, b) => a * b, 1); + const offset = parseInt(constant.offset, 10) || 0; + const storage = new storage_type(size); + const itemsize = storage.dtype.itemsize(); + const stream = this._reader.get_record(key); + const buffer = stream.peek(); + const length = size * itemsize; + const data = buffer.slice(offset, offset + length); + storage._set_cdata(data); + const tensor = execution.invoke('torch._utils._rebuild_tensor', [ storage, 0, shape, strides ]); + tensor.name = constant.data.key; + return tensor; + }); + execution.builtins.CONSTANTS = {}; + for (let i = 0; i < constants.length; i++) { + execution.builtins.CONSTANTS[`c${i}`] = constants[i]; + } + const attributes = []; + if (this._reader.has_record('attributes.pkl')) { + const stream = this._reader.get_record('attributes.pkl'); + const buffer = stream.peek(); + const unpickler = execution.invoke('pickle.Unpickler', [ buffer ]); + const obj = unpickler.load(); + attributes.push(...obj); + } + while (queue.length > 0) { + const module = queue.shift(); + module.__class__ = module.__class__ || { __module__: 'torch.nn.modules.module', __name__: 'Module' }; + if (module.name) { + module.__name__ = module.name; + } + if (module.submodules) { + for (const submodule of module.submodules) { + module[submodule.name] = submodule; + submodule.__parent__ = module; + queue.push(submodule); + } + delete module.submodules; + } + const parameters = []; + if (module.parameters) { + parameters.push(...module.parameters); + delete module.parameters; + } + if (module.arguments) { + parameters.push(...module.arguments); + delete module.arguments; + } + for (const parameter of parameters) { + const tensor = constants[parameter.tensorId]; + module[parameter.name] = tensor; + parameter.__class__ = parameter.__class__ || { __module__: 'torch', __name__: 'Tensor' }; + } + for (const attribute of module.attributes || []) { + module[attribute.name] = attributes[attribute.id]; + } + delete module.attributes; + } + const arena = data.torchscriptArena; + if (arena && arena.key && arena.key.startsWith('code/')) { + const file = arena.key.substring('code/'.length); + const name = file.replace(/\.py$/, '').split('/').join('.'); + const module = execution.import(name); + if (module.forward.__class__ === execution.builtins.function) { + data.forward = module.forward; + } + } + const result = new torch.ScriptModule(); + result.data = data; + return result; + } + + readArchive(archive_name) { + const type_resolver = null; + const obj_loader = null; + return this.readArchiveAndTensors(archive_name, this._pickle_dir_prefix, this._tensor_dir_prefix, type_resolver, obj_loader, this._device, this._reader, null, this._storage_context); + } + + readArchiveAndTensors(archive_name, pickle_prefix, tensor_prefix, type_resolver, obj_loader, device, stream_reader, type_parser, storage_context) { + const picklename = `${pickle_prefix + archive_name}.pkl`; + const stream = stream_reader.get_record(picklename); + const buffer = stream.peek(); + const tensor_dir_path = tensor_prefix ? tensor_prefix : `${archive_name}/`; + const read_record = (name) => { + const stream = stream_reader.get_record(tensor_dir_path + name); + return stream.length <= 0x40000 ? stream.peek() : stream; + }; + const execution = this._compilation_unit.execution; + const pickle = execution.__import__('pickle'); + const Unpickler = class extends pickle.Unpickler { + find_class(module, name) { + return super.find_class(module, name); + } + }; + const unpickler = new Unpickler(buffer); + unpickler.persistent_load = (saved_id) => { + if (saved_id[0] !== 'storage') { + throw new pytorch.Error(`Unsupported persistent load type '${saved_id[0]}'.`); + } + const [, storage_type, key, , size] = saved_id; + if (storage_context && storage_context.has_storage(key)) { + return storage_context.get_storage(key); + } + const storage = new storage_type(size); + const storage_ptr = read_record(key); + storage._set_cdata(storage_ptr); + if (storage_context) { + storage_context.add_storage(key); + } + return storage; + }; + return unpickler.load(); + } +}; + +pytorch.jit.FlatBuffersLoader = class { + + constructor(cu) { + this._cu = cu; + const torch = cu.execution.__import__('torch'); + this._torch = torch; + const dtypes = Array.from(new Set(Object.values(torch).filter((obj) => obj instanceof torch.dtype))); + this._dtypes = new Map(dtypes.map((dtype) => [ dtype.scalar_type(), dtype ])); + this._ivalue_parsers = new Map(); + this._ivalue_parsers.set(pytorch.mobile.serialization.Int, (ivalue) => ivalue.val.int_val); + this._ivalue_parsers.set(pytorch.mobile.serialization.Bool, (ivalue) => ivalue.val.bool_val); + this._ivalue_parsers.set(pytorch.mobile.serialization.Double, (ivalue) => ivalue.val.double_val); + this._ivalue_parsers.set(pytorch.mobile.serialization.TensorMetadata, (ivalue) => this.parseTensor(ivalue)); + this._ivalue_parsers.set(pytorch.mobile.serialization.Object, (ivalue) => this.parseObject(ivalue)); + } + + parseModule(module) { + this._module = module; + this._all_functions = new Map(); + this._all_ivalues = new Array(module.ivalues.length); + this._all_types = new Array(module.object_types.length); + const mobile_ivalue_size = module.mobile_ivalue_size ? module.mobile_ivalue_size : module.ivalues.length; + for (let i = 0; i < mobile_ivalue_size; i++) { + this.parseAndPopulate(i, module.ivalues[i]); + } + const m = this._all_ivalues[module.state_obj]; + for (const [name, value] of this._all_functions) { + const class_index = module.ivalues[name].val.class_type; + const class_type = this._all_types[class_index]; + class_type.addMethod(value); + } + m._min_operator_version = module.operator_version; + m._bytecode_version = module.bytecode_version; + return m; + } + + parseAndPopulate(i, ivalue) { + if (ivalue.val instanceof pytorch.mobile.serialization.Function) { + this._all_functions.set(i, this.parseFunction(ivalue.val)); + } else { + this._all_ivalues[i] = this.parseIValue(ivalue); + } + } + + parseFunction(/* val */) { + return null; + } + + parseIValue(ivalue) { + if (ivalue.val) { + const callback = this._ivalue_parsers.get(ivalue.val.constructor); + return callback(ivalue); + } + return null; + } + + parseTensor(ivalue) { + return this.parseTensorFromMetadata(ivalue.val); + } + + parseTensorFromMetadata(metadata) { + if (metadata.quantized_schema) { + throw new pytorch.Error('Quantized schema not implemented.'); + } + const index = metadata.storage_location_index; + const data = this._module.storage_data[index].data; + const dtype = this._dtypes.get(metadata.scalar_type); + const size = data.length / dtype.itemsize(); + const storage = this._cu.execution.invoke('torch.storage._TypedStorage', [ size, dtype ]); + storage._set_cdata(data); + const tensor = this._cu.execution.invoke('torch.Tensor', []); + const shape = Array.from(metadata.sizes); + const stride = Array.from(metadata.strides); + tensor.__setstate__([ storage, metadata.storage_offset, shape, stride ]); + return tensor; + } + + parseObject(ivalue) { + const object = ivalue.val; + const obj_type = this._module.object_types[object.type_index]; + const cls = this.getOrCreateClassTypeForObject(object); + switch (obj_type.type) { + case pytorch.mobile.serialization.TypeType.CLASS_WITH_FIELD: { + const torch = this._torch; + const obj = torch.ScriptObject.create(cls); + for (let i = 0; i < object.attrs.length; i++) { + const attr_name = obj_type.attr_names[i]; + const val = this._all_ivalues[object.attrs[i]]; + obj.__setattr__(attr_name, val); + } + return obj; + } + case pytorch.mobile.serialization.TypeType.CUSTOM_CLASS: + case pytorch.mobile.serialization.TypeType.CLASS_WITH_SETSTATE: + default: { + throw new pytorch.Error(`Unknown object type type '${obj_type.type}'.`); + } + } + } + + getOrCreateClassTypeForObject(object) { + let cls = this._all_types[object.type_index]; + const obj_type = this._module.object_types[object.type_index]; + if (!cls) { + const name = obj_type.type_name; + if (name.startsWith('__torch__') || name.startsWith('torch.jit')) { + cls = this._cu.get_class(name); + if (!cls) { + const torch = this._torch; + cls = new torch.ClassType(name, this._cu, true); + this._cu.register_type(cls); + } + } else { + // TODO cls = c10::parseType(qn_str)->cast(); + } + this._all_types[object.type_index] = cls; + if (obj_type.type === pytorch.mobile.serialization.TypeType.CLASS_WITH_FIELD) { + for (let i = 0; i < object.attrs.length; i++) { + // const val = this._all_ivalues[object.attrs[i]]; + cls.addAttribute(obj_type.attr_names[i] /*, null val.type(c10::DynamicType) */); + } + } + } + return cls; + } +}; + +pytorch.Container.Package = class extends pytorch.Container { + + constructor(entries) { + super(); + this._entries = entries; + } + + async read() { + const execution = new pytorch.Execution(); + for (const event of this._events) { + execution.on(event[0], event[1]); + } + const torch = execution.__import__('torch'); + const reader = new torch.PyTorchFileReader(this._entries); + const version = reader.version(); + this._format = pytorch.Utility.format('PyTorch Package', version); + this._modules = new Map(); + const records = reader.get_all_records().filter((name) => { + if (!name.startsWith('.data/') && !name.endsWith('.py')) { + const stream = reader.get_record(name); + if (stream && stream.length > 2) { + const signature = stream.peek(2); + if (signature[0] === 0x80 && signature[1] < 7) { + return true; + } + } + } + return false; + }); + const entries = records.map((name) => { + const parts = name.split('/'); + const resource = parts.pop(); + const module = parts.join('.'); + return [ module, resource ]; + }); + if (entries.length > 0) { + for (const name of reader.get_all_records()) { + if (!name.startsWith('.data/') && name.endsWith('.py')) { + const stream = reader.get_record(name); + const buffer = stream.peek(); + execution.add(name, buffer); + } + } + const importer = new torch.package.PackageImporter(reader); + for (const entry of entries) { + const module = importer.load_pickle(entry[0], entry[1]); + const key = `${entry[0].replace(/\./, '/')}/${entry[1]}`; + this._modules.set(key, module); + } + } + } + + get format() { + return this._format; + } + + get modules() { + return this._modules; + } +}; + +pytorch.MemoryFormat = { + Contiguous: 0, + Preserve: 1, + ChannelsLast: 2, + ChannelsLast3d: 3 +}; + +pytorch.Layout = { + Strided: 0, + Sparse: 1, + Mkldnn: 2 +}; + +pytorch.Utility = class { + + static target(expression) { + if (expression.type == 'id') { + return expression.value; + } + if (expression.type == '.') { + return `${pytorch.Utility.target(expression.target)}.${pytorch.Utility.target(expression.member)}`; + } + return null; + } + + static isTensor(obj) { + const name = obj && obj.__class__ ? obj.__class__.__module__ : null; + switch (name) { + case 'torch': + case 'torch.cuda': + return obj.__class__.__name__.endsWith('Tensor'); + case 'torch.nn.parameter': + return obj.__class__.__name__ === 'Parameter'; + default: + return false; + } + } + + static toTensor(obj) { + const name = obj && obj.__class__ ? obj.__class__.__module__ : null; + switch (name) { + case 'torch': + case 'torch.cuda': + return obj.__class__.__name__.endsWith('Tensor') ? obj : null; + case 'torch.nn.parameter': + return obj.__class__.__name__ === 'Parameter' ? obj.data : null; + default: + return null; + } + } + + static getType(value) { + if (value === null || value === undefined) { + return undefined; + } else if (value === true || value === false) { + return 'boolean'; + } else if (pytorch.Utility.isTensor(value)) { + return 'Tensor'; + } else if (typeof value === 'string') { + return 'string'; + } else if (Number(value) === value && value % 1 === 0) { + return 'int64'; + } else if (Number(value) === value) { + return 'float32'; + } else if (Array.isArray(value) && value.every((item) => Number(item) === item && item % 1 === 0)) { + return 'int64[]'; + } else if (Array.isArray(value) && value.every((item) => Number(item) === item)) { + return 'float32[]'; + } + const text = (JSON.stringify(value) || '(undefined)').substring(0, 10); + throw new pytorch.Error(`Unsupported ops argument type '${text}'.`); + } + + static isType(obj, type) { + switch (type) { + case 'Tensor': + return !Array.isArray(obj) && (pytorch.Utility.isTensor(obj) || obj === null); + case 'Tensor[]': + return Array.isArray(obj) && obj.length > 0 && obj.every((tensor) => pytorch.Utility.isTensor(tensor) || tensor === null); + case 'Scalar': + return (obj !== null && obj !== Object(obj)) || (pytorch.Utility.isTensor(obj) && Array.isArray(obj.size()) && obj.size().length === 0); + case 'boolean': + return obj === true || obj === false; + case 'string': + return obj === null || typeof obj === 'string'; + case 'SymInt': + case 'int64': + return Number.isInteger(obj) || obj instanceof base.Int64 || (typeof obj === 'number' && isNaN(obj)); + case 'SymInt[]': + case 'SymInt[2]': + case 'SymInt[3]': + case 'SymInt[4]': + case 'SymInt[5]': + case 'SymInt[6]': + case 'int64[]': + case 'int64[2]': + case 'int64[3]': + return Array.isArray(obj) && obj.every((item) => Number.isInteger(item) || (typeof item === 'number' && isNaN(item)) || item === undefined); + case 'int64[1]': + case 'SymInt[1]': + return pytorch.Utility.isType(obj, 'int64') || pytorch.Utility.isType(obj, 'int64[]'); + case 'float32': + case 'float64': + return obj !== null && obj !== Object(obj); + case 'float32[]': + return Array.isArray(obj) && obj.every((item) => typeof item === 'number' && !isNaN(item)); + case 'string[][]': + return Array.isArray(obj) && obj.every((item) => Array.isArray(item) && item.every((item) => typeof item === 'string')); + case 'Layout': + case 'ScalarType': + case 'MemoryFormat': + return Number.isInteger(obj) || obj === null; + case 'Dimname': + return obj === null || typeof obj === 'string'; + case 'Dimname[]': + return Array.isArray(obj) && obj.every((item) => item === null || typeof item === 'string'); + case 'Device': + return obj === null || obj === Object(obj); + default: + if (type && type.startsWith('__torch__.') && + obj && obj.__class__ && obj.__class__.__module__ && obj.__class__.__name__) { + return type === `${obj.__class__.__module__}.${obj.__class__.__name__}`; + } + return true; + } + } + + static isSubclass(value, name) { + if (value.__module__ && value.__name__) { + if (name === `${value.__module__}.${value.__name__}`) { + return true; + } + } + if (value.__bases__) { + for (const base of value.__bases__) { + if (pytorch.Utility.isSubclass(base, name)) { + return true; + } + } + } + return false; + } + + static isInstance(value, name) { + return value.__class__ ? pytorch.Utility.isSubclass(value.__class__, name) : false; + } + + static isCall(expression, name, size) { + if (expression.type === 'call' && + (size === undefined || size === expression.args.length) && + pytorch.Utility.target(expression.target) === name) { + return true; + } + return false; + } + + static isEqual(a, b) { + return (a.type === 'id' && b.type === 'id' && a.value === b.value); + } + + static format(name, value) { + // https://github.com/pytorch/pytorch/blob/master/caffe2/serialize/inline_container.h + // kProducedFileFormatVersion + const versions = new Map([ + [ '1', 'v1.3' ], + [ '2', 'v1.5' ], // 7a2889b014ce36fcc333b2c6de6f29f976652f84 (#28122) + [ '3', 'v1.6' ], // 2ec6a30722b0ef85632a2f3e7ce6f80da403008a (#36085) + [ '4', 'v1.6' ], // 95489b590f00801bdee7f41783f30874883cf6bb (#38620) + [ '5', 'v1.7' ], // cb26661fe4faf26386703180a9045e6ac6d157df (#40364) + [ '6', 'v1.9' ], // 3ee7637ffa50df0d9b231c7b40778ac1c390bf4a (#59714) + [ '7', 'v1.10' ], // 880098a7e34a20628f960daa8eab0eb1ad566c39 (#63651) + [ '8', 'v1.11' ], // b28e696516a7f0c7a6ead6da967590ce6c1d6698 (#71486) + [ '9', 'v1.11' ], // 8757e21c6a4fc00e83539aa7f9c28eb11eff53c1 (#72051) + [ '10', 'v1.12' ] // 4f8b986e28736b59bc46cd0873a0f36fdaa6f5b8 (#61439) + ]); + if (!versions.has(value)) { + throw new pytorch.Error(`Unsupported '${name}' version '${value}'.`); + } + return `${name} ${versions.get(value)}`; + } + + static find(data) { + const root = pytorch.Utility.findModule(data); + if (root) { + return root; + } + const weights = pytorch.Utility.findWeights(data); + if (weights) { + return weights; + } + if (data && Array.isArray(data) && data === Object(data) && Object.entries(data).length === 0) { + return []; + } + throw new pytorch.Error('File does not contain root module or state dictionary.'); + } + + static findModule(root) { + if (root) { + const keys = [ '', 'model', 'net' ]; + for (const key of keys) { + const obj = key === '' ? root : root[key]; + if (obj) { + if (obj instanceof Map && obj.has('engine')) { + // https://github.com/NVIDIA-AI-IOT/torch2trt/blob/master/torch2trt/torch2trt.py + const data = obj.get('engine'); + const signatures = [ + [ 0x70, 0x74, 0x72, 0x74 ], // ptrt + [ 0x66, 0x74, 0x72, 0x74 ] // ftrt + ]; + for (const signature of signatures) { + if (data instanceof Uint8Array && data.length > signature.length && signature.every((value, index) => value === data[index])) { + // const buffer = data.slice(0, 24); + // const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + throw new pytorch.Error('Invalid file content. File contains undocumented PyTorch TensorRT engine data.'); + } + } + } + if (obj._modules) { + return new Map([ ['', obj] ]); + } + const entries = Object.entries(obj).filter(([name, obj]) => name && obj && obj._modules); + if (entries.length > 1) { + return new Map(entries); + } + } + } + } + return null; + } + + static findWeights(obj) { + if (obj) { + if (pytorch.Utility.isTensor(obj)) { + const module = {}; + module.__class__ = { + __module__: obj.__class__.__module__, + __name__: obj.__class__.__name__ + }; + module._parameters = new Map(); + module._parameters.set('value', obj); + return new Map([ [ '', { _modules: new Map([ [ '', module ] ]) } ] ]); + } + if (!Array.isArray(obj) && !(obj instanceof Map) && obj === Object(obj) && Object.keys(obj).length === 0) { + return new Map(); + } + const keys = !Array.isArray(obj) ? Object.keys(obj) : []; + if (keys.length > 1) { + keys.splice(0, keys.length); + } + keys.push(...[ + 'state_dict', 'state_dict_stylepredictor', 'state_dict_ghiasi', + 'state', 'model_state', 'model', 'model_state_dict', 'model_dict', 'net_dict', + 'generator', 'discriminator', 'g_state', 'module', 'params', + 'weights', 'network_weights', 'network', 'net', 'netG', 'net_states', + 'EMA_generator', 'runner', '' + ]); + for (const key of keys) { + const value = key === '' ? obj : obj[key]; + let graphs = null; + graphs = graphs || pytorch.Utility._convertObjectList(value); + graphs = graphs || pytorch.Utility._convertStateDict(value); + if (graphs) { + return graphs; + } + } + } + return null; + } + + static _convertObjectList(obj) { + if (obj && Array.isArray(obj)) { + if (obj.every((item) => typeof item === 'number' || typeof item === 'string')) { + return new Map([ ['', obj] ]); + } + if (obj.every((item) => item && Object.values(item).filter((value) => pytorch.Utility.isTensor(value)).length > 0)) { + return new Map([ ['', obj] ]); + } + } + return null; + } + + static _convertStateDict(obj) { + const clean = (obj) => { + if (obj && Array.isArray(obj)) { + return obj; + } + if (obj && obj instanceof Map) { + obj.delete('_ema'); + return obj; + } + if (obj && Object(obj) === obj) { + const target = {}; + const map_count = Object.entries(obj).filter(([, value]) => value instanceof Map).length; + for (const [key, value] of Object.entries(obj)) { + if (key.indexOf('optim') !== -1 || key.indexOf('opt') !== -1) { + if (value === null || (value.state && value.param_groups)) { + continue; + } + } + if (map_count > 2 && key.endsWith('_avg') && pytorch.Utility.isTensor(value)) { + continue; + } + if (typeof value === 'number' || typeof value === 'string' || typeof value === 'boolean') { + continue; + } + if (key === '__class__' && value.__module__ && value.__name__) { + continue; + } + if (Array.isArray(value) && (key.indexOf('loss') !== -1 || value.length === 0)) { + continue; + } + if (value && value.__class__ && value.__class__.__module__ === 'datetime' && value.__class__.__name__ === 'datetime') { + continue; + } + if (value && Number.isInteger(value.epoch) && value.state_dict) { + target[key] = value.state_dict; + continue; + } + if ((key.startsWith('dico_') && Object(value) === value) || + (key.startsWith('best_metrics') && Object(value) === value) || + (key === 'args' && Object(value) === value) || + (key.startsWith('params') && Object(value) === value && (value.id2lang || value.lang2id)) || + (key.startsWith('spk_dict_') && Object(value) === value && Object.keys(value).length === 0) || + (key === 'blk_det') || + (key === 'random_state') || + (key === 'train_cfg' || key === 'test_cfg' || key === '_is_full_backward_hook')) { + continue; + } + target[key] = value; + } + return target; + } + return obj; + }; + const validate = (map) => { + let tensor = false; + if (map && map instanceof Map) { + for (const [key, value] of map) { + const separator = key.indexOf('.') === -1 && key.indexOf('|') !== -1 ? '|' : '.'; + const keys = key.split(separator); + if (keys[keys.length - 1] === '_metadata') { + continue; + } else if (keys.length >= 2 && keys[keys.length - 2] === '_packed_params') { + continue; + } else if (pytorch.Utility.isTensor(value)) { + tensor = true; + continue; + } else if (value && Array.isArray(value) && value.every((item) => pytorch.Utility.isTensor(item))) { + tensor = true; + continue; + } else if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { + continue; + } else if (value === null) { + continue; + } + return false; + } + } + return tensor; + }; + const flatten = (obj) => { + if (!obj || Array.isArray(obj) || ArrayBuffer.isView(obj)) { + return null; + } + if (obj instanceof Map) { + if (validate(obj)) { + return obj; + } + return null; + } + if (Object(obj) !== obj) { + return null; + } + const map = new Map(Object.entries(obj)); + if (validate(map)) { + return map; + } + const target = new Map(); + for (const [name, obj] of map) { + const value = flatten(obj); + if (value && value instanceof Map) { + for (const pair of value) { + target.set(`${name}.${pair[0]}`, pair[1]); + } + continue; + } + return null; + } + return target; + }; + if (!obj) { + return null; + } + obj = clean(obj); + const map = new Map(); + if (Array.isArray(obj) && obj.every((item) => validate(item))) { + for (let i = 0; i < obj.length; i++) { + map.set(i.toString(), flatten(obj[i])); + } + } else if (obj instanceof Map && validate(obj)) { + map.set('', flatten(obj)); + } else if (Object(obj) === obj && Object.entries(obj).every(([, value]) => validate(value))) { + for (const [name, value] of Object.entries(obj)) { + map.set(name, value); + } + } else if (Object(obj) === obj && Object.entries(obj).every(([, value]) => pytorch.Utility.isTensor(value))) { + map.set('', new Map(Object.entries(obj).map(([key, value]) => [ key, value ]))); + } else { + const value = flatten(obj); + if (value) { + map.set('', value); + } + } + if (map.size > 0) { + const modules = new Map(); + for (const [graph_name, layer_map] of map) { + const layers = new Map(); + for (const [key, value] of layer_map) { + let layer_name = ''; + let parameter = ''; + const separator = key.indexOf('.') === -1 && key.indexOf('|') !== -1 ? '|' : '.'; + const keys = key.split(separator); + if (keys[keys.length - 1] === '_metadata') { + continue; + } + if (keys.length >= 2 && keys[keys.length - 2] === '_packed_params') { + parameter = keys.slice(-2).join(separator); + keys.pop(); + keys.pop(); + } else { + parameter = keys.pop(); + if (keys.length < 0) { + keys.push(''); + } + } + layer_name = keys.join(separator); + if (!layers.has(layer_name)) { + layers.set(layer_name, {}); + } + const layer = layers.get(layer_name); + if (pytorch.Utility.isTensor(value)) { + layer._parameters = layer._parameters || new Map(); + value.name = key; + layer._parameters.set(parameter, value); + if (layer_name == '' && layer._parameters.length > 12) { + return null; + } + } else if (value && Array.isArray(value) && value.every((item) => pytorch.Utility.isTensor(item))) { + layer._parameters.set(parameter, value); + } else if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { + layer[parameter] = value; + } + } + modules.set(graph_name, { _modules: layers }); + } + return modules; + } + return null; + } +}; + +pytorch.nnapi = {}; + +pytorch.nnapi.SerializedModel = class { + + constructor(serialized_model, buffers) { + const reader = new base.BinaryReader(serialized_model); + this.version = reader.int32(); + if (this.version !== 1) { + throw new pytorch.Error('Invalid NNAPI serialized model version.'); + } + const operands = new Array(reader.int32()); + const values = new Array(reader.int32()); + this.operations = new Array(reader.int32()); + this.inputs = new Array(reader.int32()); + this.outputs = new Array(reader.int32()); + const data_types = new Map([ + [ 0, 'float32' ], + [ 1, 'int32' ], + [ 2, 'uint32' ], + [ 3, 'float32[]' ], + [ 4, 'int32[]' ], + [ 5, 'quant8_asymm[]' ], + [ 6, 'boolean' ], + [ 7, 'quant16_symm[]' ], + [ 8, 'float16[]' ], + [ 9, 'boolean[]' ], + [ 10, 'float16' ], + [ 11, 'quant8_symm_per_channel[]' ], + [ 12, 'quant16_asymm[]' ], + [ 13, 'quant8_symm[]' ], + [ 14, 'quant8_asymm_signed[]' ], + [ 16, 'model' ] + ]); + for (let i = 0; i < operands.length; i++) { + const data_type = reader.int32(); + operands[i] = { + index: i, + data_type: data_types.has(data_type) ? data_types.get(data_type) : data_type, + dimensions: new Array(reader.uint32()), + scale: reader.float32(), + zero_point: reader.int32() + }; + } + for (let i = 0; i < values.length; i++) { + values[i] = { + index: reader.int32(), + source_type: reader.int32(), + source_length: reader.uint32() + }; + } + for (let i = 0; i < this.operations.length; i++) { + this.operations[i] = { + index: reader.int32(), + location: i, + inputs: new Array(reader.uint32()), + outputs: new Array(reader.uint32()) + }; + } + for (const operand of operands) { + for (let i = 0; i< operand.dimensions.length; i++) { + operand.dimensions[i] = reader.uint32(); + } + } + for (const value of values) { + const index = value.index; + const operand = operands[index]; + switch (value.source_type) { + case 0: { // immediate + switch (operand.data_type) { + case 'boolean': + operand.value = reader.byte() ? true : false; + reader.skip(3); + break; + case 'int32': + operand.value = reader.int32(); + break; + case 'float32': + operand.value = reader.float32(); + break; + case 'int32[]': + operand.data = reader.read(value.source_length); + break; + case 'float32[]': + operand.data = reader.read(value.source_length); + break; + default: + throw new pytorch.Error(`Unsupported NNAPI operand type '${operand.data_type}'.`); + } + break; + } + case 2: { // numbered buffer + if (value.source_length !== 12) { + throw new pytorch.Error('Invalid NNAPI numbered buffer source length.'); + } + const number = reader.uint32(); + const offset = reader.uint32(); + const operand_length = reader.uint32(); + const storage = buffers[number]; + const data = storage.data && storage.data.peek ? storage.data.peek() : storage.data; + operand.data = data.slice(offset, operand_length); + break; + } + case 3: { // numbered memory + throw new pytorch.Error('NNAPI numbered memory buffer not implemented.'); + } + default: { + throw new pytorch.Error('Unsupported NNAPI value source type.'); + } + } + } + for (const operation of this.operations) { + for (let i = 0; i< operation.inputs.length; i++) { + const index = reader.uint32(); + operation.inputs[i] = operands[index]; + } + for (let i = 0; i< operation.outputs.length; i++) { + const index = reader.uint32(); + operation.outputs[i] = operands[index]; + } + } + for (let i = 0; i< this.inputs.length; i++) { + const index = reader.uint32(); + this.inputs[i] = operands[index]; + } + for (let i = 0; i< this.outputs.length; i++) { + const index = reader.uint32(); + this.outputs[i] = operands[index]; + } + + if (reader.position !== reader.length) { + throw new pytorch.Error('Invalid NNAPI serialized model length.'); + } + } +}; + +pytorch.nnapi.Graph = class { + + constructor(model) { + this.name = 'torch.classes._nnapi.Compilation'; + this.nodes = []; + this.inputs = []; + this.outputs = []; + const values = new Map(); + values.map = (operand) => { + if (!values.has(operand.index)) { + const name = operand.index.toString(); + const dimensions = operand.dimensions; + const shape = new pytorch.TensorShape(dimensions); + let dataType = operand.data_type.replace('[]', ''); + let quantization = null; + switch (dataType) { + case 'quant8_asymm': + case 'quant8_symm_per_channel': + case 'quant8_symm': + case 'quant8_asymm_signed[]': + case 'quant16_asymm': + case 'quant16_symm': + quantization = dataType; + dataType = dataType.indexOf('16') !== -1 ? 'uint16' : 'uint8'; + break; + default: + break; + } + const type = new pytorch.TensorType(dataType, shape); + let initializer = null; + if (operand.data) { + const size = dimensions.reduce((a, b) => a * b, 1); + const tensor = { + size: () => dimensions, + stride: () => null, + storage_offset: () => 0, + storage: () => ({ + dtype: { __reduce__: () => type.dataType }, + data: operand.data, size: () => size + }) + }; + initializer = new pytorch.Tensor(null, tensor); + } + if (quantization || (operand.scale !== undefined && operand.scale !== 0) || (operand.zero_point !== undefined && operand.zero_point !== 0)) { + quantization = { + type: quantization || 'linear', + scale: [ operand.scale ], + offset: [ operand.zero_point ] + }; + } + const value = new pytorch.Value(name, type, quantization, initializer); + values.set(operand.index, value); + } + return values.get(operand.index); + }; + const metadata = new pytorch.nnapi.Metadata(); + for (const operation of model.operations) { + const node = new pytorch.nnapi.Node(metadata, operation, values); + this.nodes.push(node); + } + for (let i = 0; i < model.inputs.length; i++) { + const name = i.toString(); + const operand = model.inputs[i]; + const argument = new pytorch.Argument(name, [ values.map(operand) ]); + this.inputs.push(argument); + } + for (let i = 0; i < model.outputs.length; i++) { + const name = i.toString(); + const operand = model.outputs[i]; + const argument = new pytorch.Argument(name, [ values.map(operand) ]); + this.outputs.push(argument); + } + } +}; + +pytorch.nnapi.Node = class { + + constructor(metadata, operation, values) { + const signature = (operation.inputs || []).map((input) => input.data_type); + this.name = ''; + this.type = metadata.type(operation.index, signature); + this.inputs = []; + this.outputs = []; + this.attributes = []; + this.chain = []; + if (operation.location !== undefined) { + this.location = operation.location.toString(); + } + const inputs = this.type.inputs.concat(this.type.attributes); + if (operation.inputs) { + for (let i = 0; i < operation.inputs.length; i++) { + const name = i < inputs.length ? inputs[i].name : i.toString(); + const operand = operation.inputs[i]; + if (operand.dimensions.length > 0) { + const value = values.map(operand); + const argument = new pytorch.Argument(name, [ value ]); + this.inputs.push(argument); + } else if (name === 'activation') { + const activation = new Map([ [ 1, 19 ], [ 2, 20 ], [ 3, 21 ] ]).get(operand.value) || 0; + if (activation !== 0) { + this.chain.push(new pytorch.nnapi.Node(metadata, { index: activation })); + } + } else { + const attribute = new pytorch.Argument(name, operand.value, operand.data_type, false); + this.attributes.push(attribute); + } + } + } + if (operation.outputs) { + for (let i = 0; i < operation.outputs.length; i++) { + const name = i < inputs.length ? inputs[i].name : i.toString(); + const operand = operation.outputs[i]; + const value = values.map(operand); + const argument = new pytorch.Argument(name, [ value ]); + this.outputs.push(argument); + } + } + } +}; + +pytorch.nnapi.Metadata = class { + + constructor() { + this._types = new Map(); + // https://developer.android.com/ndk/reference/group/neural-networks + // https://github.com/pytorch/pytorch/commits/master/torch/backends/_nnapi/serializer.py + this.register(0, 'ADD', '', [ 'A', 'B' ], [ [ 'activation', 'int32'] ], [ 'C' ]); + this.register(1, 'AVERAGE_POOL_2D', 'Pool', [ 'input' ], [ [ 'padding_left', 'int32' ], [ 'padding_right', 'int32' ], [ 'padding_top', 'int32' ], [ 'padding_bottom', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'filter_x', 'int32' ], [ 'filter_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ] ], [ 'output' ]); + this.register(1, 'AVERAGE_POOL_2D', 'Pool', [ 'input' ], [ [ 'padding_scheme', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'filter_x', 'int32' ], [ 'filter_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ] ], [ 'output' ]); + this.register(2, 'CONCATENATION'); + this.register(3, 'CONV_2D', 'Layer', [ 'input', 'weights', 'bias' ], [ [ 'padding_left', 'int32' ], [ 'padding_right', 'int32' ], [ 'padding_top', 'int32' ], [ 'padding_bottom', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ], [ 'dilation_width', 'int32' ], [ 'dilation_height', 'int32' ] ], [ 'output' ]); + this.register(3, 'CONV_2D', 'Layer', [ 'input', 'weights', 'bias' ], [ [ 'padding_scheme', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ], [ 'dilation_width', 'int32' ], [ 'dilation_height', 'int32' ] ], [ 'output' ]); + this.register(4, 'DEPTHWISE_CONV_2D', 'Layer', [ 'input', 'weights', 'bias' ], [ [ 'padding_left', 'int32' ], [ 'padding_right', 'int32' ], [ 'padding_top', 'int32' ], [ 'padding_bottom', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ], [ 'dilation_width', 'int32' ], [ 'dilation_height', 'int32' ] ], [ 'output' ]); + this.register(4, 'DEPTHWISE_CONV_2D', 'Layer', [ 'input', 'weights', 'bias' ], [ [ 'padding_scheme', 'int32' ], [ 'stride_x', 'int32' ], [ 'stride_y', 'int32' ], [ 'activation', 'int32' ], [ 'nchw', 'boolean' ], [ 'dilation_width', 'int32' ], [ 'dilation_height', 'int32' ] ], [ 'output' ]); + this.register(5, 'DEPTH_TO_SPACE'); + this.register(6, 'DEQUANTIZE'); + this.register(7, 'EMBEDDING_LOOKUP'); + this.register(8, 'FLOOR'); + this.register(9, 'FULLY_CONNECTED', 'Layer', [ 'input', 'weights', 'bias' ], [ [ 'activation', 'int32' ] ], [ 'output' ]); + this.register(10, 'HASHTABLE_LOOKUP'); + this.register(11, 'L2_NORMALIZATION'); + this.register(12, 'L2_POOL_2D', 'Pool'); + this.register(13, 'LOCAL_RESPONSE_NORMALIZATION'); + this.register(14, 'LOGISTIC'); + this.register(15, 'LSH_PROJECTION'); + this.register(16, 'LSTM', 'Layer'); + this.register(17, 'MAX_POOL_2D', 'Pool'); + this.register(18, 'MUL'); + this.register(19, 'RELU', 'Activation', [ 'input' ], [], [ 'output' ]); + this.register(20, 'RELU1', 'Activation'); + this.register(21, 'RELU6', 'Activation'); + this.register(22, 'RESHAPE', 'Shape', [ 'input', 'shape' ], [], [ 'output' ]); + this.register(23, 'RESIZE_BILINEAR'); + this.register(24, 'RNN', 'Layer'); + this.register(25, 'SOFTMAX', 'Activation'); + this.register(26, 'SPACE_TO_DEPTH'); + this.register(27, 'SVDF'); + this.register(28, 'TANH'); + this.register(29, 'BATCH_TO_SPACE_ND'); + this.register(30, 'DIV'); + this.register(31, 'MEAN'); + this.register(32, 'PAD'); + this.register(33, 'SPACE_TO_BATCH_ND'); + this.register(34, 'SQUEEZE'); + this.register(35, 'STRIDED_SLICE'); + this.register(36, 'SUB'); + this.register(37, 'TRANSPOSE'); + this.register(38, 'ABS'); + this.register(39, 'ARGMAX'); + this.register(40, 'ARGMIN'); + this.register(41, 'AXIS_ALIGNED_BBOX_TRANSFORM'); + this.register(42, 'BIDIRECTIONAL_SEQUENCE_LSTM'); + this.register(43, 'BIDIRECTIONAL_SEQUENCE_RNN'); + this.register(44, 'BOX_WITH_NMS_LIMIT'); + this.register(45, 'CAST'); + this.register(46, 'CHANNEL_SHUFFLE'); + this.register(47, 'DETECTION_POSTPROCESSING'); + this.register(48, 'EQUAL'); + this.register(49, 'EXP'); + this.register(50, 'EXPAND_DIMS'); + this.register(51, 'GATHER'); + this.register(52, 'GENERATE_PROPOSALS'); + this.register(53, 'GREATER'); + this.register(54, 'GREATER_EQUAL'); + this.register(55, 'GROUPED_CONV_2D'); + this.register(56, 'HEATMAP_MAX_KEYPOINT'); + this.register(57, 'INSTANCE_NORMALIZATION'); + this.register(58, 'LESS'); + this.register(59, 'LESS_EQUAL'); + this.register(60, 'LOG'); + this.register(61, 'LOGICAL_AND'); + this.register(62, 'LOGICAL_NOT'); + this.register(63, 'LOGICAL_OR'); + this.register(64, 'LOG_SOFTMAX'); + this.register(65, 'MAXIMUM'); + this.register(66, 'MINIMUM'); + this.register(67, 'NEG'); + this.register(68, 'NOT_EQUAL'); + this.register(69, 'PAD_V2'); + this.register(70, 'POW'); + this.register(71, 'PRELU'); + this.register(72, 'QUANTIZE'); + this.register(73, 'QUANTIZED_16BIT_LSTM'); + this.register(74, 'RANDOM_MULTINOMIAL'); + this.register(75, 'REDUCE_ALL'); + this.register(76, 'REDUCE_ANY'); + this.register(77, 'REDUCE_MAX'); + this.register(78, 'REDUCE_MIN'); + this.register(79, 'REDUCE_PROD'); + this.register(80, 'REDUCE_SUM'); + this.register(81, 'ROI_ALIGN'); + this.register(82, 'ROI_POOLING'); + this.register(83, 'RSQRT'); + this.register(84, 'SELECT'); + this.register(85, 'SIN'); + this.register(86, 'SLICE'); + this.register(87, 'SPLIT'); + this.register(88, 'SQRT'); + this.register(89, 'TILE'); + this.register(90, 'TOPK_V2'); + this.register(91, 'TRANSPOSE_CONV_2D', 'Layer'); + this.register(92, 'UNIDIRECTIONAL_SEQUENCE_LSTM', 'Layer'); + this.register(93, 'UNIDIRECTIONAL_SEQUENCE_RNN', 'Layer'); + this.register(94, 'RESIZE_NEAREST_NEIGHBOR'); + this.register(95, 'QUANTIZED_LSTM', 'Layer'); + this.register(96, 'IF'); + this.register(97, 'WHILE'); + this.register(98, 'ELU', 'Activation'); + this.register(99, 'HARD_SWISH', 'Activation'); + this.register(100, 'FILL'); + this.register(101, 'RANK'); + } + + register(index, name, category, inputs, attributes, outputs) { + const type = {}; + type.name = name; + type.inputs = (inputs || []).map((name) => ({ name: name, type: 'Tensor' })); + type.outputs = (outputs || []).map((name) => ({ name: name, type: 'Tensor' })); + type.attributes = (attributes || []).map(([name, type]) => ({ name: name, type: type })); + if (category) { + type.category = category; + } + if (!this._types.has(index)) { + this._types.set(index, []); + } + this._types.get(index).push(type); + } + + type(index, signature) { + if (!this._types.has(index)) { + this._types.set(index, { name: index.toString(), inputs: [], outputs: [], attributes: [] }); + } + const types = this._types.get(index); + for (const type of types) { + const inputs = type.inputs.concat(type.attributes); + if (signature.length < inputs.length) { + if (inputs.every((input, i) => input.type === undefined || input.type === 'Tensor' || input.type === signature[i])) { + return type; + } + } + } + return types[0]; + } +}; + +pytorch.Metadata = class { + + static async open(context) { + if (pytorch.Metadata._metadata) { + return pytorch.Metadata._metadata; + } + try { + const data = await context.request('pytorch-metadata.json'); + pytorch.Metadata._metadata = new pytorch.Metadata(data); + return pytorch.Metadata._metadata; + } catch (error) { + pytorch.Metadata._metadata = new pytorch.Metadata(null); + return pytorch.Metadata._metadata; + } + } + + constructor(data) { + this._types = new Map(); + this._attributes = new Map(); + this._index = new Map(); + if (data) { + const items = JSON.parse(data); + for (const item of items) { + this._types.set(item.name, item); + } + } + } + + add(name, value) { + this._types.set(name, value); + } + + type(name) { + return this._types.get(name); + } + + attribute(type, name) { + const key = `${type}:${name}`; + if (!this._attributes.has(key)) { + this._attributes.set(key, null); + const metadata = this.type(type); + if (metadata) { + if (metadata.inputs) { + for (const input of metadata.inputs) { + this._attributes.set(`${type}:${input.name}`, input); + } + } + if (metadata.attributes) { + for (const attribute of metadata.attributes) { + this._attributes.set(`${type}:${attribute.name}`, attribute); + } + } + } + } + return this._attributes.get(key); + } +}; + +pytorch.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading PyTorch model.'; + } +}; + +export const ModelFactory = pytorch.ModelFactory; diff --git a/rknn-metadata.json b/rknn-metadata.json new file mode 100644 index 00000000000..b130549e59c --- /dev/null +++ b/rknn-metadata.json @@ -0,0 +1,349 @@ +[ + { + "name": "VSI_NN_OP_ADD", + "inputs": [ + { "name": "input" }, + { "name": "other" } + ] + }, + { + "name": "VSI_NN_OP_CONCAT", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "list": true } + ] + }, + { + "name": "VSI_NN_OP_CONV_RELU", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_CONV_RELU_POOL", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_CONV2D", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_DECONVOLUTION", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_FCL", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_FCL_RELU", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "VSI_NN_OP_LEAKY_RELU", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_LEAKY_SIGMOID", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_MISH", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_PERMUTE", + "category": "Shape" + }, + { + "name": "VSI_NN_OP_POOL", + "category": "Pool" + }, + { + "name": "VSI_NN_OP_RELU", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_PRELU", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ] + }, + { + "name": "VSI_NN_OP_RESHAPE", + "category": "Shape" + }, + { + "name": "VSI_NN_OP_SIGMOID", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_SOFTMAX", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_RESIZE", + "category": "Shape" + }, + { + "name": "VSI_NN_OP_LRN", + "category": "Normalization" + }, + { + "name": "VSI_NN_OP_BATCH_NORM", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ] + }, + { + "name": "VSI_NN_OP_LSTM", + "category": "Layer" + }, + { + "name": "VSI_NN_OP_STRIDED_SLICE", + "category": "Tensor" + }, + { + "name": "VSI_NN_OP_SWISH", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_RELUN", + "category": "Activation" + }, + { + "name": "VSI_NN_OP_INSTANCE_NORM", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" } + ] + }, + { + "name": "Conv", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvReluAdd", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvLeakyRelu", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvLeakyReluAdd", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvExSwish", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvTranspose", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvRelu", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "ConvClip", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "list": true } + ] + }, + { + "name": "BatchNormalization", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" }, + { "name": "running_mean" }, + { "name": "running_var" } + ] + }, + { + "name": "Relu", + "category": "Activation" + }, + { + "name": "Softmax", + "category": "Activation" + }, + { + "name": "MaxPool", + "category": "Pool" + }, + { + "name": "AveragePool", + "category": "Pool" + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + }, + { + "name": "Transpose", + "category": "Transform" + }, + { + "name": "Add", + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "Split", + "category": "Tensor", + "outputs": [ + { "name": "output", "list": true } + ] + }, + { + "name": "PoolingLayer2", + "category": "Pool" + }, + { + "name": "Softmax2Layer", + "category": "Activation" + }, + { + "name": "SoftMax2", + "category": "Activation" + }, + { + "name": "TensorTranspose", + "category": "Transform" + }, + { + "name": "ActivationLayer", + "category": "Activation" + }, + { + "name": "LeakyReluLayer", + "category": "Activation" + }, + { + "name": "ConvolutionLayer", + "category": "Layer" + }, + { + "name": "TensorScale", + "category": "Layer" + }, + { + "name": "BatchNormalizationLayer", + "category": "Normalization" + }, + { + "name": "Sigmoid", + "category": "Activation" + }, + { + "name": "LeakyRelu", + "category": "Activation" + }, + { + "name": "HardSigmoid", + "category": "Activation" + }, + { + "name": "exSwish", + "category": "Activation" + }, + { + "name": "Slice", + "category": "Tensor" + } +] \ No newline at end of file diff --git a/rknn-schema.js b/rknn-schema.js new file mode 100644 index 00000000000..64805502915 --- /dev/null +++ b/rknn-schema.js @@ -0,0 +1,123 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('rknn'); + +$root.rknn = $root.rknn || {}; + +$root.rknn.Model = class Model { + + static identifier(reader) { + return reader.identifier === 'RKNN'; + } + + static create(reader) { + return $root.rknn.Model.decode(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.rknn.Model(); + $.var1 = reader.int32_(position, 4, 0); + $.format = reader.string_(position, 6, null); + $.graphs = reader.tableArray(position, 8, $root.rknn.Graph.decode); + $.generator = reader.string_(position, 10, null); + $.var2 = reader.tableArray(position, 12, $root.rknn.Type1.decode); + $.var3 = reader.int32_(position, 14, 0); + $.var4 = reader.int32_(position, 16, 0); + $.compiler = reader.string_(position, 18, null); + $.runtime = reader.string_(position, 20, null); + $.source = reader.string_(position, 22, null); + $.var5 = reader.bool_(position, 24, false); + $.var6 = reader.int32_(position, 26, 0); + $.input_json = reader.string_(position, 28, null); + $.output_json = reader.string_(position, 30, null); + return $; + } +}; + +$root.rknn.Graph = class Graph { + + static decode(reader, position) { + const $ = new $root.rknn.Graph(); + $.tensors = reader.tableArray(position, 4, $root.rknn.Tensor.decode); + $.nodes = reader.tableArray(position, 6, $root.rknn.Node.decode); + $.inputs = reader.typedArray(position, 8, Int32Array); + $.outputs = reader.typedArray(position, 10, Int32Array); + $.var1 = reader.tableArray(position, 12, $root.rknn.Type2.decode); + return $; + } +}; + +$root.rknn.Node = class Node { + + static decode(reader, position) { + const $ = new $root.rknn.Node(); + $.var1 = reader.int32_(position, 4, 0); + $.type = reader.string_(position, 6, null); + $.name = reader.string_(position, 8, null); + $.var2 = reader.int8_(position, 10, 0); + $.inputs = reader.typedArray(position, 12, Int32Array); + $.outputs = reader.typedArray(position, 14, Int32Array); + $.var3 = reader.tableArray(position, 16, $root.rknn.Type3.decode); + $.var4 = reader.int8_(position, 18, 0); + $.var5 = reader.int32_(position, 20, 0); + $.var6 = reader.int32_(position, 22, 0); + return $; + } +}; + +$root.rknn.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.rknn.Tensor(); + $.data_type = reader.int8_(position, 4, 0); + $.var02 = reader.int8_(position, 6, 0); + $.kind = reader.int8_(position, 8, 0); + $.var04 = reader.typedArray(position, 10, Int32Array); + $.shape = reader.typedArray(position, 12, Int32Array); + $.name = reader.string_(position, 14, null); + $.var06 = reader.typedArray(position, 16, Int8Array); + $.var07 = reader.string_(position, 18, null); + $.var08 = reader.typedArray(position, 20, Int8Array); + $.var09 = reader.typedArray(position, 22, Int8Array); + $.var10 = reader.typedArray(position, 24, Int8Array); + $.var11 = reader.typedArray(position, 26, Int8Array); + $.size = reader.int32_(position, 28, 0); + $.var13 = reader.int32_(position, 30, 0); + $.var14 = reader.int32_(position, 32, 0); + $.var15 = reader.int32_(position, 34, 0); + $.var16 = reader.int32_(position, 36, 0); + $.var17 = reader.int32_(position, 38, 0); + $.index = reader.int32_(position, 40, 0); + return $; + } +}; + +$root.rknn.Type1 = class Type1 { + + static decode(reader, position) { + const $ = new $root.rknn.Type1(); + $.var1 = reader.int32_(position, 4, 0); + return $; + } +}; + +$root.rknn.Type2 = class Type2 { + + static decode(reader, position) { + const $ = new $root.rknn.Type2(); + $.var1 = reader.typedArray(position, 4, Int32Array); + $.var2 = reader.typedArray(position, 6, Int32Array); + $.var3 = reader.typedArray(position, 8, Int32Array); + return $; + } +}; + +$root.rknn.Type3 = class Type3 { + + static decode(reader, position) { + const $ = new $root.rknn.Type3(); + $.var1 = reader.int32_(position, 4, 0); + return $; + } +}; diff --git a/rknn.js b/rknn.js new file mode 100644 index 00000000000..1aebc396094 --- /dev/null +++ b/rknn.js @@ -0,0 +1,681 @@ + +import * as base from './base.js'; +import * as flatbuffers from './flatbuffers.js'; +import * as json from './json.js'; + +const rknn = {}; +const openvx = {}; + +rknn.ModelFactory = class { + + match(context) { + return rknn.Container.open(context); + } + + async open(context, target) { + await context.require('./rknn-schema'); + rknn.schema = flatbuffers.get('rknn').rknn; + const metadata = await context.metadata('rknn-metadata.json'); + target.read(); + if (target.has('json')) { + const buffer = target.get('json'); + const reader = json.TextReader.open(buffer); + const model = reader.read(); + return new rknn.Model(metadata, 'json', model, target); + } + if (target.has('flatbuffers')) { + const buffer = target.get('flatbuffers'); + const reader = flatbuffers.BinaryReader.open(buffer); + const model = rknn.schema.Model.create(reader); + return new rknn.Model(metadata, 'flatbuffers', model, null); + } + if (target.has('openvx')) { + const buffer = target.get('openvx'); + const model = new openvx.Model(buffer); + return new rknn.Model(metadata, 'openvx', model, null); + } + throw new rknn.Error("Unsupported RKNN format."); + } +}; + +rknn.Model = class { + + constructor(metadata, type, model, container) { + switch (type) { + case 'json': { + this._format = `RKNN v${model.version.split('-').shift()}`; + this._name = model.name || ''; + this._producer = model.ori_network_platform || model.network_platform || ''; + this._runtime = model.target_platform ? model.target_platform.join(',') : ''; + this._graphs = [ new rknn.Graph(metadata, type, model.name || '', model, container) ]; + break; + } + case 'flatbuffers': { + const version = model.compiler.split('-').shift(); + this._format = `RKNN Lite${version ? ` v${version}` : ''}`; + this._runtime = model.runtime; + this._name = model.name || ''; + this._graphs = model.graphs.map((graph) => new rknn.Graph(metadata, type, '', graph, null)); + this._metadata = new Map(); + this._metadata.set('source', model.source); + break; + } + case 'openvx': { + this._format = 'RKNN OpenVX'; + this._name = model.name || ''; + this._graphs = [ new rknn.Graph(metadata, type, '', model, container) ]; + break; + } + default: { + throw new rknn.Error(`Unsupported RKNN model type '${type}'.`); + } + } + } + + get format() { + return this._format; + } + + get name() { + return this._name; + } + + get producer() { + return this._producer; + } + + get runtime() { + return this._runtime; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +rknn.Graph = class { + + constructor(metadata, type, name, obj, container) { + this._name = name; + this._inputs = []; + this._outputs = []; + this._nodes = []; + switch (type) { + case 'json': { + const dataType = (value) => { + const type = value.vx_type.startsWith('VSI_NN_TYPE_') ? value.vx_type.split('_').pop().toLowerCase() : value.vx_type; + switch (type) { + case 'uint8': + case 'int8': + case 'int16': + case 'int32': + case 'int64': + case 'float16': + case 'float32': + case 'float64': + case 'vdata': + return type; + default: + if (value.vx_type !== '') { + throw new rknn.Error(`Invalid data type '${JSON.stringify(dataType)}'.`); + } + return '?'; + } + }; + const model = obj; + const values = new Map(); + for (const const_tensor of model.const_tensor) { + const name = `const_tensor:${const_tensor.tensor_id}`; + const shape = new rknn.TensorShape(const_tensor.size); + const type = new rknn.TensorType(dataType(const_tensor.dtype), shape); + const tensor = new rknn.Tensor(type, const_tensor.offset, null); + const value = new rknn.Value(name, type, tensor); + values.set(name, value); + } + for (const virtual_tensor of model.virtual_tensor) { + const name = `${virtual_tensor.node_id}:${virtual_tensor.output_port}`; + const value = new rknn.Value(name, null, null); + values.set(name, value); + } + for (const norm_tensor of model.norm_tensor) { + const name = `norm_tensor:${norm_tensor.tensor_id}`; + const shape = new rknn.TensorShape(norm_tensor.size); + const type = new rknn.TensorType(dataType(norm_tensor.dtype), shape); + const value = new rknn.Value(name, type, null); + values.set(name, value); + } + const value = (name) => { + if (!values.has(name)) { + values.set(name, new rknn.Value(name, null, null)); + } + return values.get(name); + }; + for (const node of model.nodes) { + node.input = []; + node.output = []; + } + for (const connection of model.connection) { + switch (connection.left) { + case 'input': + model.nodes[connection.node_id].input.push(connection); + if (connection.right_node) { + model.nodes[connection.right_node.node_id].output[connection.right_node.tensor_id] = connection; + } + break; + case 'output': + model.nodes[connection.node_id].output.push(connection); + break; + default: + throw new rknn.Error(`Unsupported left connection '${connection.left}'.`); + } + } + for (const graph of model.graph) { + const key = `${graph.right}:${graph.right_tensor_id}`; + const name = graph.left + (graph.left_tensor_id === 0 ? '' : graph.left_tensor_id.toString()); + const argument = new rknn.Argument(name, [ value(key) ]); + switch (graph.left) { + case 'input': + this._inputs.push(argument); + break; + case 'output': + this._outputs.push(argument); + break; + default: + throw new rknn.Error(`Unsupported left graph connection '${graph.left}'.`); + } + } + this._nodes = model.nodes.map((node) => new rknn.Node(metadata, type, node, value, container)); + break; + } + case 'flatbuffers': { + const graph = obj; + const dataTypes = [ 'unk0', 'int32', '?', 'int8', '?', 'int16', 'float32', 'int64', '?', '?', 'float16', '?', '?', 'unk13' ]; + const args = graph.tensors.map((tensor) => { + const shape = new rknn.TensorShape(Array.from(tensor.shape)); + const dataType = tensor.data_type < dataTypes.length ? dataTypes[tensor.data_type] : '?'; + if (dataType === '?') { + throw new rknn.Error(`Unsupported tensor data type '${tensor.data_type}'.`); + } + const type = new rknn.TensorType(dataType, shape); + const initializer = tensor.kind !== 4 && tensor.kind !== 5 ? null : new rknn.Tensor(type, 0, null); + return new rknn.Value(tensor.name, type, initializer); + }); + const arg = (index) => { + if (index >= args.length) { + throw new rknn.Error(`Invalid tensor index '${index}'.`); + } + return args[index]; + }; + this._nodes = graph.nodes.map((node) => new rknn.Node(metadata, type, node, arg, container)); + break; + } + case 'openvx': { + const model = obj; + this._nodes = model.nodes.map((node) => new rknn.Node(metadata, type, node, null, container)); + break; + } + default: { + throw new rknn.Error(`Unsupported RKNN graph type '${type}'.`); + } + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +rknn.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +rknn.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new rknn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +rknn.Node = class { + + constructor(metadata, type, node, value, container) { + this._inputs = []; + this._outputs = []; + this._attributes = []; + switch (type) { + case 'json': { + this._name = node.name || ''; + if (node.op === 'VSI_NN_OP_NBG' && container && container.has('openvx')) { + const buffer = container.get('openvx'); + const model = new openvx.Model(buffer); + this._type = new rknn.Graph(metadata, 'openvx', 'NBG', model, null); + } else if (node.op === 'RKNN_OP_NNBG'&& container && container.has('flatbuffers')) { + const buffer = container.get('flatbuffers'); + const reader = flatbuffers.BinaryReader.open(buffer); + const model = rknn.schema.Model.create(reader); + this._type = new rknn.Graph(metadata, 'flatbuffers', 'NNBG', model.graphs[0], null); + } else { + this._type = Object.assign({}, metadata.type(node.op) || { name: node.op }); + for (const prefix of [ 'VSI_NN_OP_', 'RKNN_OP_' ]) { + this._type.name = this._type.name.startsWith(prefix) ? this._type.name.substring(prefix.length) : this._type.name; + } + } + node.input = node.input || []; + for (let i = 0; i < node.input.length;) { + const input = this._type && this._type.inputs && i < this._type.inputs.length ? this._type.inputs[i] : { name: i === 0 ? 'input' : i.toString() }; + const count = input.list ? node.input.length - i : 1; + const list = node.input.slice(i, i + count).map((input) => { + if (input.right_tensor) { + return value(`${input.right_tensor.type}:${input.right_tensor.tensor_id}`); + } + if (input.right_node) { + return value(`${input.right_node.node_id}:${input.right_node.tensor_id}`); + } + throw new rknn.Error('Invalid input argument.'); + }); + this._inputs.push(new rknn.Argument(input.name, list)); + i += count; + } + node.output = node.output || []; + for (let i = 0; i < node.output.length;) { + const output = this._metadata && this._metadata.outputs && i < this._metadata.outputs.length ? this._metadata.outputs[i] : { name: i === 0 ? 'output' : i.toString() }; + const count = output.list ? node.output.length - i : 1; + const list = node.output.slice(i, i + count).map((output) => { + if (output.right_tensor) { + return value(`${output.right_tensor.type}:${output.right_tensor.tensor_id}`); + } + if (output.right_node) { + return value(`${output.right_node.node_id}:${output.right_node.tensor_id}`); + } + throw new rknn.Error('Invalid output argument.'); + }); + this._outputs.push(new rknn.Argument(output.name, list)); + i += count; + } + if (node.nn) { + for (const params of Object.values(node.nn)) { + for (const [name, value] of Object.entries(params)) { + const attribute = new rknn.Attribute(name, value); + this._attributes.push(attribute); + } + } + } + break; + } + case 'flatbuffers': { + this._name = node.name; + this._type = metadata.type(node.type); + if (node.inputs.length > 0) { + const inputs = this._type.inputs || (node.inputs.length === 1 ? [ { name: "input" } ] : [ { name: "inputs", list: true } ]); + if (Array.isArray(inputs) && inputs.length > 0 && inputs[0].list === true) { + this._inputs = [new rknn.Argument(inputs[0].name, Array.from(node.inputs).map((input) => value(input))) ]; + } else { + this._inputs = Array.from(node.inputs).map((input, index) => { + return new rknn.Argument(index < inputs.length ? inputs[index].name : index.toString(), [ value(input) ]); + }); + } + } + if (node.outputs.length > 0) { + const outputs = this._type.outputs || (node.outputs.length === 1 ? [ { name: "output" } ] : [ { name: "outputs", list: true } ]); + if (Array.isArray(outputs) && outputs.length > 0 && outputs[0].list === true) { + const values = Array.from(node.outputs).map((output) => value(output)); + const argument = new rknn.Argument(outputs[0].name, values); + this._outputs = [ argument ]; + } else { + this._outputs = Array.from(node.outputs).map((output, index) => { + return new rknn.Argument(index < outputs.length ? outputs[index].name : index.toString(), [ value(output) ]); + }); + } + } + break; + } + case 'openvx': { + this._name = ''; + this._type = metadata.type(node.type); + break; + } + default: { + throw new rknn.Error(`Unsupported RKNN node type '${type}'.`); + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +rknn.Attribute = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +rknn.Tensor = class { + + constructor(type, offset, weights) { + this._type = type; + this._data = null; + let itemsize = 0; + switch (this._type.dataType) { + case 'uint8': itemsize = 1; break; + case 'int8': itemsize = 1; break; + case 'int16': itemsize = 2; break; + case 'int32': itemsize = 4; break; + case 'int64': itemsize = 8; break; + case 'float16': itemsize = 2; break; + case 'float32': itemsize = 4; break; + case 'float64': itemsize = 8; break; + case 'vdata': itemsize = 1; break; + default: throw new rknn.Error(`Unsupported tensor data type '${this._type.dataType}'.`); + } + if (weights) { + const shape = type.shape.dimensions; + const size = itemsize * shape.reduce((a, b) => a * b, 1); + if (size > 0) { + this._data = weights.slice(offset, offset + size); + } + } + } + + get type() { + return this._type; + } + + get values() { + return this._data; + } +}; + +rknn.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +rknn.TensorShape = class { + + constructor(shape) { + this._dimensions = shape; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.join(',')}]`; + } +}; + +rknn.Container = class extends Map { + + static open(context) { + const stream = context.stream; + if (stream) { + const signature = rknn.Container.signature(stream); + switch (signature) { + case 'rknn': + case 'openvx': + case 'flatbuffers': + return new rknn.Container(stream); + default: + break; + } + const obj = context.peek('json'); + if (obj && obj.version && Array.isArray(obj.nodes) && obj.network_platform) { + const entries = new Map(); + entries.set('json', stream); + return new rknn.Container(null, entries); + } + } + return null; + } + + constructor(stream, entries) { + super(entries); + this._stream = stream; + } + + read() { + if (this._stream) { + const stream = this._stream; + delete this._stream; + const signature = rknn.Container.signature(stream); + switch (signature) { + case 'rknn': { + const uint64 = () => { + const buffer = stream.read(8); + const reader = new base.BinaryReader(buffer); + return reader.uint64(); + }; + stream.skip(8); + const version = uint64(); + const data_size = uint64(); + switch (version) { + case 0x0001: + case 0x1001: + break; + case 0x0002: + case 0x1002: + case 0x0003: + case 0x1003: + case 0x0004: + case 0x1004: + case 0x0005: + case 0x0006: + if (data_size > 0) { + stream.skip(40); + } + break; + default: + throw new rknn.Error(`Unsupported RKNN container version '${version}'.`); + } + const signature = rknn.Container.signature(stream, data_size); + const data = stream.read(data_size); + const json_size = uint64(); + const json = stream.read(json_size); + this.set('json', json); + if (signature) { + this.set(signature, data); + } + break; + } + case 'openvx': + case 'flatbuffers': { + this.set(signature, stream.peek()); + break; + } + case 'cyptrknn': { + throw new rknn.Error('Invalid file content. File contains undocumented encrypted RKNN data.'); + } + default: { + break; + } + } + } + } + + static signature(stream, length) { + length = length || stream.length; + if (stream && (stream.position + 16) <= length) { + const signature = [ 0x52, 0x4B, 0x4E, 0x4E ]; // RKNN + if (stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'rknn'; + } + } + if (stream && (stream.position + 16) <= length) { + const signature = [ 0x43, 0x59, 0x50, 0x54, 0x52, 0x4B, 0x4E, 0x4E ]; // CYPTRKNN + if (stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'cyptrknn'; + } + } + if (stream && (stream.position + 8) <= length) { + const signature = [ 0x52, 0x4B, 0x4E, 0x4E ]; // RKNN + if (stream.peek(8).subarray(4, 8).every((value, index) => value === signature[index])) { + return 'flatbuffers'; + } + } + if (stream && (stream.position + 8) <= length) { + const signature = [ 0x56, 0x50, 0x4D, 0x4E ]; // VPMN + if (stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'openvx'; + } + } + return undefined; + } +}; + +openvx.BinaryReader = class extends base.BinaryReader { + + string(length) { + const buffer = this.read(length); + const index = buffer.indexOf(0); + const data = index === -1 ? buffer : buffer.subarray(0, index); + this._decoder = this._decoder || new TextDecoder('ascii'); + return this._decoder.decode(data); + } +}; + +openvx.Model = class { + + constructor(buffer) { + const reader = new openvx.BinaryReader(buffer); + reader.skip(4); // signature + const major = reader.uint16(); + /* const minor = */ reader.uint16(); + reader.skip(4); + this._name = reader.string(64); + this._nodes = new Array(reader.uint32()); + if (major > 3) { + reader.skip(296); + } else if (major > 1) { + reader.skip(288); + } else { + reader.skip(32); + } + /* const inputOffset = */ reader.uint32(); + /* const inputSize = */ reader.uint32(); + /* const outputOffset = */ reader.uint32(); + /* const outputSize = */ reader.uint32(); + const nodeOffset = reader.uint32(); + /* const nodeSize = */ reader.uint32(); + reader.seek(nodeOffset); + for (let i = 0; i < this._nodes.length; i++) { + const type = reader.string(64); + const node = { type: type }; + node.index = reader.uint32(); + node.c = reader.uint32(); + if (major > 3) { + node.d = reader.uint32(); + } + this._nodes[i] = node; + } + } + + get name() { + return this._name; + } + + get nodes() { + return this._nodes; + } +}; + +rknn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading RKNN model.'; + } +}; + +export const ModelFactory = rknn.ModelFactory; diff --git a/safetensors.js b/safetensors.js new file mode 100644 index 00000000000..e49787917ba --- /dev/null +++ b/safetensors.js @@ -0,0 +1,237 @@ + +import * as json from './json.js'; + +const safetensors = {}; + +safetensors.ModelFactory = class { + + match(context) { + const container = safetensors.Container.open(context); + if (container) { + return { name: 'safetensors', value: container }; + } + const obj = context.peek('json'); + if (obj && obj.weight_map) { + const entries = Object.entries(obj.weight_map); + if (entries.length > 0 && entries.every(([, value]) => typeof value === 'string' && value.endsWith('.safetensors'))) { + return { name: 'safetensors.json', value: entries }; + } + } + return ''; + } + + async open(context, target) { + switch (target.name) { + case 'safetensors': { + const container = target.value; + await container.read(); + return new safetensors.Model(container.entries); + } + case 'safetensors.json': { + const weight_map = new Map(target.value); + const keys = new Set(weight_map.keys()); + const files = Array.from(new Set(weight_map.values())); + const contexts = await Promise.all(files.map((name) => context.fetch(name))); + const containers = contexts.map((context) => safetensors.Container.open(context)); + await Promise.all(containers.map((container) => container.read())); + const entries = new Map(); + for (const container of containers) { + for (const [key, value] of Array.from(container.entries)) { + if (keys.has(key)) { + entries.set(key, value); + } + } + } + return new safetensors.Model(entries); + } + default: { + throw new safetensors.Error(`Unsupported Safetensors format '${target.name}'.`); + } + } + } +}; + +safetensors.Model = class { + + constructor(entries) { + this.format = 'Safetensors'; + this.graphs = [ new safetensors.Graph(entries) ]; + } +}; + +safetensors.Graph = class { + + constructor(entries) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + const layers = new Map(); + for (const [key, value] of Array.from(entries)) { + if (key === '__metadata__') { + continue; + } + const parts = key.split('.'); + const name = parts.pop(); + const layer = parts.join('.'); + if (!layers.has(layer)) { + layers.set(layer, []); + } + layers.get(layer).push([ name, key, value]); + } + for (const [name, values] of layers) { + const node = new safetensors.Node(name, values); + this.nodes.push(node); + } + } +}; + +safetensors.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +safetensors.Value = class { + + constructor(name, value) { + this.name = name; + this.initializer = value; + } + + get type() { + return this.initializer.type; + } +}; + +safetensors.Node = class { + + constructor(name, values) { + this.name = name; + this.type = { name: 'Module' }; + this.inputs = []; + this.outputs = []; + this.attributes = []; + for (const [name, identifier, obj] of values) { + const tensor = new safetensors.Tensor(obj); + const value = new safetensors.Value(identifier, tensor); + const argument = new safetensors.Argument(name, [ value ]); + this.inputs.push(argument); + } + } +}; + +safetensors.TensorType = class { + + constructor(dtype, shape) { + switch (dtype) { + case 'I8': this.dataType = 'int8'; break; + case 'I16': this.dataType = 'int16'; break; + case 'I32': this.dataType = 'int32'; break; + case 'I64': this.dataType = 'int64'; break; + case 'U8': this.dataType = 'uint8'; break; + case 'U16': this.dataType = 'uint16'; break; + case 'U32': this.dataType = 'uint32'; break; + case 'U64': this.dataType = 'uint64'; break; + case 'BF16': this.dataType = 'bfloat16'; break; + case 'F16': this.dataType = 'float16'; break; + case 'F32': this.dataType = 'float32'; break; + case 'F64': this.dataType = 'float64'; break; + default: throw new safetensors.Error(`Unsupported data type '${dtype}'.`); + } + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +safetensors.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +safetensors.Tensor = class { + + constructor(obj) { + const shape = new safetensors.TensorShape(obj.shape); + this.type = new safetensors.TensorType(obj.dtype, shape); + this.encoding = '<'; + this.data = obj.__data__; + } + + get values() { + if (this.data instanceof Uint8Array) { + return this.data; + } + if (this.data && this.data.peek) { + return this.data.peek(); + } + return null; + } +}; + +safetensors.Container = class { + + static open(context) { + const identifier = context.identifier; + const stream = context.stream; + if (stream.length > 9) { + const buffer = stream.peek(9); + if (buffer[6] === 0 && buffer[7] === 0 && buffer[8] === 0x7b) { + const size = buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24 | buffer [3] << 32 | buffer [3] << 40; + if (size < stream.length) { + return new safetensors.Container(identifier, stream, size); + } + } + } + return null; + } + + constructor(identifier, stream, size) { + this.identifier = identifier; + this.size = size; + this.stream = stream; + this.entries = new Map(); + } + + async read() { + const stream = this.stream; + const position = stream.position; + stream.seek(8); + const buffer = stream.read(this.size); + const reader = json.TextReader.open(buffer); + const obj = reader.read(); + const offset = stream.position; + for (const [key, value] of Object.entries(obj)) { + if (key === '__metadata__') { + continue; + } + const [start, end] = value.data_offsets; + stream.seek(offset + start); + value.__data__ = stream.stream(end - start); + this.entries.set(key, value); + } + stream.seek(position); + delete this.size; + delete this.stream; + } +}; + +safetensors.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Safetensors model.'; + } +}; + +export const ModelFactory = safetensors.ModelFactory; diff --git a/sentencepiece-proto.js b/sentencepiece-proto.js new file mode 100644 index 00000000000..100c6a645c2 --- /dev/null +++ b/sentencepiece-proto.js @@ -0,0 +1,665 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('sentencepiece'); + +$root.sentencepiece = {}; + +$root.sentencepiece.TrainerSpec = class TrainerSpec { + + constructor() { + this.input = []; + this.accept_language = []; + this.control_symbols = []; + this.user_defined_symbols = []; + } + + static decode(reader, length) { + const message = new $root.sentencepiece.TrainerSpec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input.push(reader.string()); + break; + case 7: + message.input_format = reader.string(); + break; + case 2: + message.model_prefix = reader.string(); + break; + case 3: + message.model_type = reader.int32(); + break; + case 4: + message.vocab_size = reader.int32(); + break; + case 5: + message.accept_language.push(reader.string()); + break; + case 6: + message.self_test_sample_size = reader.int32(); + break; + case 50: + message.enable_differential_privacy = reader.bool(); + break; + case 51: + message.differential_privacy_noise_level = reader.float(); + break; + case 52: + message.differential_privacy_clipping_threshold = reader.uint64(); + break; + case 10: + message.character_coverage = reader.float(); + break; + case 11: + message.input_sentence_size = reader.uint64(); + break; + case 19: + message.shuffle_input_sentence = reader.bool(); + break; + case 12: + message.mining_sentence_size = reader.int32(); + break; + case 13: + message.training_sentence_size = reader.int32(); + break; + case 14: + message.seed_sentencepiece_size = reader.int32(); + break; + case 15: + message.shrinking_factor = reader.float(); + break; + case 18: + message.max_sentence_length = reader.int32(); + break; + case 16: + message.num_threads = reader.int32(); + break; + case 17: + message.num_sub_iterations = reader.int32(); + break; + case 20: + message.max_sentencepiece_length = reader.int32(); + break; + case 21: + message.split_by_unicode_script = reader.bool(); + break; + case 23: + message.split_by_number = reader.bool(); + break; + case 22: + message.split_by_whitespace = reader.bool(); + break; + case 24: + message.treat_whitespace_as_suffix = reader.bool(); + break; + case 26: + message.allow_whitespace_only_pieces = reader.bool(); + break; + case 25: + message.split_digits = reader.bool(); + break; + case 53: + message.pretokenization_delimiter = reader.string(); + break; + case 30: + message.control_symbols.push(reader.string()); + break; + case 31: + message.user_defined_symbols.push(reader.string()); + break; + case 36: + message.required_chars = reader.string(); + break; + case 35: + message.byte_fallback = reader.bool(); + break; + case 32: + message.vocabulary_output_piece_score = reader.bool(); + break; + case 33: + message.hard_vocab_limit = reader.bool(); + break; + case 34: + message.use_all_vocab = reader.bool(); + break; + case 40: + message.unk_id = reader.int32(); + break; + case 41: + message.bos_id = reader.int32(); + break; + case 42: + message.eos_id = reader.int32(); + break; + case 43: + message.pad_id = reader.int32(); + break; + case 45: + message.unk_piece = reader.string(); + break; + case 46: + message.bos_piece = reader.string(); + break; + case 47: + message.eos_piece = reader.string(); + break; + case 48: + message.pad_piece = reader.string(); + break; + case 44: + message.unk_surface = reader.string(); + break; + case 49: + message.train_extremely_large_corpus = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.TrainerSpec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + reader.array(message.input, () => reader.string()); + break; + case "input_format": + message.input_format = reader.string(); + break; + case "model_prefix": + message.model_prefix = reader.string(); + break; + case "model_type": + message.model_type = reader.enum($root.sentencepiece.TrainerSpec.ModelType); + break; + case "vocab_size": + message.vocab_size = reader.int32(); + break; + case "accept_language": + reader.array(message.accept_language, () => reader.string()); + break; + case "self_test_sample_size": + message.self_test_sample_size = reader.int32(); + break; + case "enable_differential_privacy": + message.enable_differential_privacy = reader.bool(); + break; + case "differential_privacy_noise_level": + message.differential_privacy_noise_level = reader.float(); + break; + case "differential_privacy_clipping_threshold": + message.differential_privacy_clipping_threshold = reader.uint64(); + break; + case "character_coverage": + message.character_coverage = reader.float(); + break; + case "input_sentence_size": + message.input_sentence_size = reader.uint64(); + break; + case "shuffle_input_sentence": + message.shuffle_input_sentence = reader.bool(); + break; + case "mining_sentence_size": + message.mining_sentence_size = reader.int32(); + break; + case "training_sentence_size": + message.training_sentence_size = reader.int32(); + break; + case "seed_sentencepiece_size": + message.seed_sentencepiece_size = reader.int32(); + break; + case "shrinking_factor": + message.shrinking_factor = reader.float(); + break; + case "max_sentence_length": + message.max_sentence_length = reader.int32(); + break; + case "num_threads": + message.num_threads = reader.int32(); + break; + case "num_sub_iterations": + message.num_sub_iterations = reader.int32(); + break; + case "max_sentencepiece_length": + message.max_sentencepiece_length = reader.int32(); + break; + case "split_by_unicode_script": + message.split_by_unicode_script = reader.bool(); + break; + case "split_by_number": + message.split_by_number = reader.bool(); + break; + case "split_by_whitespace": + message.split_by_whitespace = reader.bool(); + break; + case "treat_whitespace_as_suffix": + message.treat_whitespace_as_suffix = reader.bool(); + break; + case "allow_whitespace_only_pieces": + message.allow_whitespace_only_pieces = reader.bool(); + break; + case "split_digits": + message.split_digits = reader.bool(); + break; + case "pretokenization_delimiter": + message.pretokenization_delimiter = reader.string(); + break; + case "control_symbols": + reader.array(message.control_symbols, () => reader.string()); + break; + case "user_defined_symbols": + reader.array(message.user_defined_symbols, () => reader.string()); + break; + case "required_chars": + message.required_chars = reader.string(); + break; + case "byte_fallback": + message.byte_fallback = reader.bool(); + break; + case "vocabulary_output_piece_score": + message.vocabulary_output_piece_score = reader.bool(); + break; + case "hard_vocab_limit": + message.hard_vocab_limit = reader.bool(); + break; + case "use_all_vocab": + message.use_all_vocab = reader.bool(); + break; + case "unk_id": + message.unk_id = reader.int32(); + break; + case "bos_id": + message.bos_id = reader.int32(); + break; + case "eos_id": + message.eos_id = reader.int32(); + break; + case "pad_id": + message.pad_id = reader.int32(); + break; + case "unk_piece": + message.unk_piece = reader.string(); + break; + case "bos_piece": + message.bos_piece = reader.string(); + break; + case "eos_piece": + message.eos_piece = reader.string(); + break; + case "pad_piece": + message.pad_piece = reader.string(); + break; + case "unk_surface": + message.unk_surface = reader.string(); + break; + case "train_extremely_large_corpus": + message.train_extremely_large_corpus = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.TrainerSpec.prototype.input_format = ""; +$root.sentencepiece.TrainerSpec.prototype.model_prefix = ""; +$root.sentencepiece.TrainerSpec.prototype.model_type = 1; +$root.sentencepiece.TrainerSpec.prototype.vocab_size = 8000; +$root.sentencepiece.TrainerSpec.prototype.self_test_sample_size = 0; +$root.sentencepiece.TrainerSpec.prototype.enable_differential_privacy = false; +$root.sentencepiece.TrainerSpec.prototype.differential_privacy_noise_level = 0; +$root.sentencepiece.TrainerSpec.prototype.differential_privacy_clipping_threshold = protobuf.Uint64.create(0); +$root.sentencepiece.TrainerSpec.prototype.character_coverage = 0.9995; +$root.sentencepiece.TrainerSpec.prototype.input_sentence_size = protobuf.Uint64.create(0); +$root.sentencepiece.TrainerSpec.prototype.shuffle_input_sentence = true; +$root.sentencepiece.TrainerSpec.prototype.mining_sentence_size = 0; +$root.sentencepiece.TrainerSpec.prototype.training_sentence_size = 0; +$root.sentencepiece.TrainerSpec.prototype.seed_sentencepiece_size = 1000000; +$root.sentencepiece.TrainerSpec.prototype.shrinking_factor = 0.75; +$root.sentencepiece.TrainerSpec.prototype.max_sentence_length = 4192; +$root.sentencepiece.TrainerSpec.prototype.num_threads = 16; +$root.sentencepiece.TrainerSpec.prototype.num_sub_iterations = 2; +$root.sentencepiece.TrainerSpec.prototype.max_sentencepiece_length = 16; +$root.sentencepiece.TrainerSpec.prototype.split_by_unicode_script = true; +$root.sentencepiece.TrainerSpec.prototype.split_by_number = true; +$root.sentencepiece.TrainerSpec.prototype.split_by_whitespace = true; +$root.sentencepiece.TrainerSpec.prototype.treat_whitespace_as_suffix = false; +$root.sentencepiece.TrainerSpec.prototype.allow_whitespace_only_pieces = false; +$root.sentencepiece.TrainerSpec.prototype.split_digits = false; +$root.sentencepiece.TrainerSpec.prototype.pretokenization_delimiter = ""; +$root.sentencepiece.TrainerSpec.prototype.required_chars = ""; +$root.sentencepiece.TrainerSpec.prototype.byte_fallback = false; +$root.sentencepiece.TrainerSpec.prototype.vocabulary_output_piece_score = true; +$root.sentencepiece.TrainerSpec.prototype.hard_vocab_limit = true; +$root.sentencepiece.TrainerSpec.prototype.use_all_vocab = false; +$root.sentencepiece.TrainerSpec.prototype.unk_id = 0; +$root.sentencepiece.TrainerSpec.prototype.bos_id = 1; +$root.sentencepiece.TrainerSpec.prototype.eos_id = 2; +$root.sentencepiece.TrainerSpec.prototype.pad_id = -1; +$root.sentencepiece.TrainerSpec.prototype.unk_piece = ""; +$root.sentencepiece.TrainerSpec.prototype.bos_piece = ""; +$root.sentencepiece.TrainerSpec.prototype.eos_piece = ""; +$root.sentencepiece.TrainerSpec.prototype.pad_piece = ""; +$root.sentencepiece.TrainerSpec.prototype.unk_surface = " E28187 "; +$root.sentencepiece.TrainerSpec.prototype.train_extremely_large_corpus = false; + +$root.sentencepiece.TrainerSpec.ModelType = { + "UNIGRAM": 1, + "BPE": 2, + "WORD": 3, + "CHAR": 4 +}; + +$root.sentencepiece.NormalizerSpec = class NormalizerSpec { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.sentencepiece.NormalizerSpec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.precompiled_charsmap = reader.bytes(); + break; + case 3: + message.add_dummy_prefix = reader.bool(); + break; + case 4: + message.remove_extra_whitespaces = reader.bool(); + break; + case 5: + message.escape_whitespaces = reader.bool(); + break; + case 6: + message.normalization_rule_tsv = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.NormalizerSpec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "precompiled_charsmap": + message.precompiled_charsmap = reader.bytes(); + break; + case "add_dummy_prefix": + message.add_dummy_prefix = reader.bool(); + break; + case "remove_extra_whitespaces": + message.remove_extra_whitespaces = reader.bool(); + break; + case "escape_whitespaces": + message.escape_whitespaces = reader.bool(); + break; + case "normalization_rule_tsv": + message.normalization_rule_tsv = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.NormalizerSpec.prototype.name = ""; +$root.sentencepiece.NormalizerSpec.prototype.precompiled_charsmap = new Uint8Array([]); +$root.sentencepiece.NormalizerSpec.prototype.add_dummy_prefix = true; +$root.sentencepiece.NormalizerSpec.prototype.remove_extra_whitespaces = true; +$root.sentencepiece.NormalizerSpec.prototype.escape_whitespaces = true; +$root.sentencepiece.NormalizerSpec.prototype.normalization_rule_tsv = ""; + +$root.sentencepiece.SelfTestData = class SelfTestData { + + constructor() { + this.samples = []; + } + + static decode(reader, length) { + const message = new $root.sentencepiece.SelfTestData(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.samples.push($root.sentencepiece.SelfTestData.Sample.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.SelfTestData(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "samples": + message.samples.push($root.sentencepiece.SelfTestData.Sample.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.SelfTestData.Sample = class Sample { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.sentencepiece.SelfTestData.Sample(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input = reader.string(); + break; + case 2: + message.expected = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.SelfTestData.Sample(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "input": + message.input = reader.string(); + break; + case "expected": + message.expected = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.SelfTestData.Sample.prototype.input = ""; +$root.sentencepiece.SelfTestData.Sample.prototype.expected = ""; + +$root.sentencepiece.ModelProto = class ModelProto { + + constructor() { + this.pieces = []; + } + + static decode(reader, length) { + const message = new $root.sentencepiece.ModelProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pieces.push($root.sentencepiece.ModelProto.SentencePiece.decode(reader, reader.uint32())); + break; + case 2: + message.trainer_spec = $root.sentencepiece.TrainerSpec.decode(reader, reader.uint32()); + break; + case 3: + message.normalizer_spec = $root.sentencepiece.NormalizerSpec.decode(reader, reader.uint32()); + break; + case 4: + message.self_test_data = $root.sentencepiece.SelfTestData.decode(reader, reader.uint32()); + break; + case 5: + message.denormalizer_spec = $root.sentencepiece.NormalizerSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.ModelProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "pieces": + message.pieces.push($root.sentencepiece.ModelProto.SentencePiece.decodeText(reader)); + break; + case "trainer_spec": + message.trainer_spec = $root.sentencepiece.TrainerSpec.decodeText(reader); + break; + case "normalizer_spec": + message.normalizer_spec = $root.sentencepiece.NormalizerSpec.decodeText(reader); + break; + case "self_test_data": + message.self_test_data = $root.sentencepiece.SelfTestData.decodeText(reader); + break; + case "denormalizer_spec": + message.denormalizer_spec = $root.sentencepiece.NormalizerSpec.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.ModelProto.prototype.trainer_spec = null; +$root.sentencepiece.ModelProto.prototype.normalizer_spec = null; +$root.sentencepiece.ModelProto.prototype.self_test_data = null; +$root.sentencepiece.ModelProto.prototype.denormalizer_spec = null; + +$root.sentencepiece.ModelProto.SentencePiece = class SentencePiece { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.sentencepiece.ModelProto.SentencePiece(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.piece = reader.string(); + break; + case 2: + message.score = reader.float(); + break; + case 3: + message.type = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.sentencepiece.ModelProto.SentencePiece(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "piece": + message.piece = reader.string(); + break; + case "score": + message.score = reader.float(); + break; + case "type": + message.type = reader.enum($root.sentencepiece.ModelProto.SentencePiece.Type); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.sentencepiece.ModelProto.SentencePiece.prototype.piece = ""; +$root.sentencepiece.ModelProto.SentencePiece.prototype.score = 0; +$root.sentencepiece.ModelProto.SentencePiece.prototype.type = 1; + +$root.sentencepiece.ModelProto.SentencePiece.Type = { + "NORMAL": 1, + "UNKNOWN": 2, + "CONTROL": 3, + "USER_DEFINED": 4, + "BYTE": 6, + "UNUSED": 5 +}; diff --git a/sentencepiece.js b/sentencepiece.js new file mode 100644 index 00000000000..f0e3957de57 --- /dev/null +++ b/sentencepiece.js @@ -0,0 +1,102 @@ + +import * as protobuf from './protobuf.js'; + +const sentencepiece = {}; + +sentencepiece.ModelFactory = class { + + match(context) { + const tags = context.tags('pb'); + if ((tags.size >= 3 && tags.size <= 5 && + tags.get(1) === 2 && tags.get(2) === 2 & tags.get(3) === 2) && + Array.from(tags).every(([key, value]) => key <= 5 && value === 2)) { + const model = context.tags('pb+'); + if (model && + model['1'] && model['1']['1'] === 2 && model['1']['2'] === 5 && model['1']['3'] === 0 && + model['2'] && model['2']['1'] === 2 && model['2']['2'] === 2 && model['2']['3'] === 0 && + model['2']['4'] === 0 && model['2']['10'] === 5 && model['2']['16'] === 0 && + model['2']['40'] === 0 && model['2']['41'] === 0 && model['2']['42'] === 0 && model['2']['43'] === 0) { + return 'sentencepiece'; + } + } + return undefined; + } + + async open(context) { + await context.require('./sentencepiece-proto'); + let model = null; + try { + sentencepiece.proto = protobuf.get('sentencepiece').sentencepiece; + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + model = sentencepiece.proto.ModelProto.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new sentencepiece.Error(`File format is not sentencepiece.ModelProto (${message.replace(/\.$/, '')}).`); + } + return new sentencepiece.Model(model); + } +}; + +sentencepiece.Model = class { + + constructor(model) { + this.format = 'SentencePiece'; + this.graphs = [ new sentencepiece.Graph(model) ]; + } +}; + +sentencepiece.Graph = class { + + constructor(model) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + for (const [name, value] of Object.entries(model)) { + const node = new sentencepiece.Node(name, value); + this.nodes.push(node); + } + } +}; + +sentencepiece.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +sentencepiece.Node = class { + + constructor(name, obj) { + this.name = name; + this.inputs = []; + this.outputs = []; + this.attributes = []; + if (Array.isArray(obj)) { + const type = new Set(obj.map((value) => value.constructor.name)); + this.type = { name: `${Array.from(type)[0]}[]` }; + const attribute = new sentencepiece.Argument(name, obj); + this.attributes.push(attribute); + } else { + this.type = { name: obj.constructor.name }; + for (const [name, value] of Object.entries(obj)) { + const data = ArrayBuffer.isView(value) ? Array.from(value) : value; + const attribute = new sentencepiece.Argument(name, data); + this.attributes.push(attribute); + } + } + } +}; + +sentencepiece.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading SentencePiece model.'; + } +}; + +export const ModelFactory = sentencepiece.ModelFactory; + diff --git a/server.js b/server.js new file mode 100644 index 00000000000..cf8cef559a8 --- /dev/null +++ b/server.js @@ -0,0 +1,250 @@ + +// Experimental + +const message = {}; + +message.ModelFactory = class { + + match(context) { + const stream = context.stream; + if (stream) { + const buffer = stream.peek(Math.min(64, stream.length)); + const content = String.fromCharCode.apply(null, buffer); + const match = content.match(/^{\s*"signature":\s*"(.*)"\s*,\s*/); + if (match && match[1].startsWith('netron:')) { + const obj = context.peek('json'); + if (obj && obj.signature && obj.signature.startsWith('netron:')) { + return obj; + } + } + } + return null; + } + + async open(context, target) { + return new message.Model(target); + } +}; + +message.Model = class { + + constructor(data) { + this._format = data.format || ''; + this._producer = data.producer || ''; + this._version = data.version || ''; + this._description = data.description || ''; + this._metadata = (data.metadata || []).map((entry) => { + return { name: entry.name, value: entry.value }; + }); + this._graphs = (data.graphs || []).map((graph) => new message.Graph(graph)); + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +message.Graph = class { + + constructor(data) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + const args = data.arguments ? data.arguments.map((argument) => new message.Value(argument)) : []; + for (const parameter of data.inputs || []) { + parameter.arguments = parameter.arguments.map((index) => args[index]).filter((argument) => !argument.initializer); + if (parameter.arguments.filter((argument) => !argument.initializer).length > 0) { + this._inputs.push(new message.Argument(parameter)); + } + } + for (const parameter of data.outputs || []) { + parameter.arguments = parameter.arguments.map((index) => args[index]); + if (parameter.arguments.filter((argument) => !argument.initializer).length > 0) { + this._outputs.push(new message.Argument(parameter)); + } + } + for (const node of data.nodes || []) { + for (const parameter of node.inputs || []) { + parameter.arguments = parameter.arguments.map((index) => args[index]); + } + for (const parameter of node.outputs || []) { + parameter.arguments = parameter.arguments.map((index) => args[index]); + } + this._nodes.push(new message.Node(node)); + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +message.Argument = class { + + constructor(data) { + this._name = data.name || ''; + this._value = (data.arguments || []); + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +message.Value = class { + + constructor(data) { + this._name= data.name || ''; + this._type = data.type ? new message.TensorType(data.type) : null; + this._initializer = data.initializer ? new message.Tensor(data.initializer) : null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer && this._initializer.type) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +message.Node = class { + + constructor(data) { + this._type = { name: data.type.name, category: data.type.category }; + this._name = data.name; + this._inputs = (data.inputs || []).map((input) => new message.Argument(input)); + this._outputs = (data.outputs || []).map((output) => new message.Argument(output)); + this._attributes = (data.attributes || []).map((attribute) => new message.Attribute(attribute)); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +message.Attribute = class { + + constructor(data) { + this._type = data.type || ''; + this._name = data.name; + this._value = data.value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get type() { + return this._type; + } +}; + +message.TensorType = class { + + constructor(data) { + this._dataType = data.dataType; + this._shape = new message.TensorShape(data.shape); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +message.TensorShape = class { + + constructor(data) { + this._dimensions = data.dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return `[${this._dimensions}]`; + } +}; + +message.Tensor = class { + + constructor() { + } +}; + +message.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Message Error'; + } +}; + +export const ModelFactory = message.ModelFactory; diff --git a/sklearn-metadata.json b/sklearn-metadata.json new file mode 100644 index 00000000000..777d5755bf6 --- /dev/null +++ b/sklearn-metadata.json @@ -0,0 +1,2317 @@ +[ + { + "name": "lightgbm.basic.Booster", + "attributes": [ + { + "default": -1, + "name": "best_iteration" + }, + { + "default": false, + "name": "network" + }, + { + "default": null, + "name": "train_set" + }, + { + "default": false, + "name": "stride" + }, + { + "default": null, + "name": "model_file" + }, + { + "default": null, + "name": "params" + }, + { + "default": null, + "name": "pandas_categorical" + } + ] + }, + { + "name": "lightgbm.sklearn.LGBMClassifier", + "attributes": [ + { + "default": "gbdt", + "name": "boosting_type", + "type": "string" + }, + { + "default": null, + "name": "class_weight" + }, + { + "default": 1, + "name": "colsample_bytree" + }, + { + "default": 0.05, + "name": "learning_rate" + }, + { + "default": -1, + "name": "max_depth" + }, + { + "default": 20, + "name": "min_child_samples" + }, + { + "default": 0.001, + "name": "min_child_weight" + }, + { + "default": 0, + "name": "min_split_gain" + }, + { + "default": 100, + "name": "n_estimators" + }, + { + "default": -1, + "name": "n_jobs" + }, + { + "default": 31, + "name": "num_leaves" + }, + { + "default": null, + "name": "random_state" + }, + { + "default": 0, + "name": "reg_alpha" + }, + { + "default": 0, + "name": "reg_lambda" + }, + { + "default": true, + "name": "silent", + "type": "boolean" + }, + { + "default": 200000, + "name": "subsample_for_bin" + }, + { + "default": 0, + "name": "subsample_freq" + }, + { + "default": 1, + "name": "subsample" + } + ] + }, + { + "name": "lightgbm.sklearn.LGBMRegressor", + "attributes": [ + { + "default": "gbdt", + "name": "boosting_type", + "type": "string" + }, + { + "default": null, + "name": "class_weight" + }, + { + "default": 1, + "name": "colsample_bytree" + }, + { + "default": 0.05, + "name": "learning_rate" + }, + { + "default": -1, + "name": "max_depth" + }, + { + "default": 20, + "name": "min_child_samples" + }, + { + "default": 0.001, + "name": "min_child_weight" + }, + { + "default": 0, + "name": "min_split_gain" + }, + { + "default": 100, + "name": "n_estimators" + }, + { + "default": -1, + "name": "n_jobs" + }, + { + "default": 31, + "name": "num_leaves" + }, + { + "default": null, + "name": "random_state" + }, + { + "default": 0, + "name": "reg_alpha" + }, + { + "default": 0, + "name": "reg_lambda" + }, + { + "default": true, + "name": "silent", + "type": "boolean" + }, + { + "default": 200000, + "name": "subsample_for_bin" + }, + { + "default": 0, + "name": "subsample_freq" + }, + { + "default": 1, + "name": "subsample" + } + ] + }, + { + "name": "sklearn.calibration.CalibratedClassifierCV", + "description": "Probability calibration with isotonic regression or logistic regression.\n\nThis class uses cross-validation to both estimate the parameters of a\nclassifier and subsequently calibrate a classifier. With default\n`ensemble=True`, for each cv split it\nfits a copy of the base estimator to the training subset, and calibrates it\nusing the testing subset. For prediction, predicted probabilities are\naveraged across these individual calibrated classifiers. When\n`ensemble=False`, cross-validation is used to obtain unbiased predictions,\nvia :func:`~sklearn.model_selection.cross_val_predict`, which are then\nused for calibration. For prediction, the base estimator, trained using all\nthe data, is used. This is the prediction method implemented when\n`probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC`\nestimators (see :ref:`User Guide ` for details).\n\nAlready fitted classifiers can be calibrated via the parameter\n`cv=\"prefit\"`. In this case, no cross-validation is used and all provided\ndata is used for calibration. The user has to take care manually that data\nfor model fitting and calibration are disjoint.\n\nThe calibration is based on the :term:`decision_function` method of the\n`estimator` if it exists, else on :term:`predict_proba`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": null, + "description": "This parameter is deprecated. Use `estimator` instead.\n\n.. deprecated:: 1.2\nThe parameter `base_estimator` is deprecated in 1.2 and will be\nremoved in 1.4. Use `estimator` instead.\n", + "name": "base_estimator" + }, + { + "default": "sigmoid", + "description": "The method to use for calibration. Can be 'sigmoid' which\ncorresponds to Platt's method (i.e. a logistic regression model) or\n'isotonic' which is a non-parametric approach. It is not advised to\nuse isotonic calibration with too few calibration samples\n``(<<1000)`` since it tends to overfit.\n", + "name": "method" + }, + { + "default": null, + "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if ``y`` is binary or multiclass,\n:class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is\nneither binary nor multiclass, :class:`~sklearn.model_selection.KFold`\nis used.\n\nRefer to the :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\nIf \"prefit\" is passed, it is assumed that `estimator` has been\nfitted already and all data is used for calibration.\n\n.. versionchanged:: 0.22\n``cv`` default value if None changed from 3-fold to 5-fold.\n", + "name": "cv", + "optional": true, + "type": "int32" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors.\n\nBase estimator clones are fitted in parallel across cross-validation\niterations. Therefore parallelism happens only when `cv != \"prefit\"`.\n\nSee :term:`Glossary ` for more details.\n\n.. versionadded:: 0.24\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": true, + "description": "Determines how the calibrator is fitted when `cv` is not `'prefit'`.\nIgnored if `cv='prefit'`.\n\nIf `True`, the `estimator` is fitted using training data, and\ncalibrated using testing data, for each `cv` fold. The final estimator\nis an ensemble of `n_cv` fitted classifier and calibrator pairs, where\n`n_cv` is the number of cross-validation folds. The output is the\naverage predicted probabilities of all pairs.\n\nIf `False`, `cv` is used to compute unbiased predictions, via\n:func:`~sklearn.model_selection.cross_val_predict`, which are then\nused for calibration. At prediction time, the classifier used is the\n`estimator` trained on all the data.\nNote that this method is also internally implemented in\n:mod:`sklearn.svm` estimators with the `probabilities=True` parameter.\n\n.. versionadded:: 0.24\n", + "name": "ensemble", + "type": "boolean" + }, + { + "name": "estimator", + "description": "The classifier whose output need to be calibrated to provide more\naccurate `predict_proba` outputs. The default classifier is\na :class:`~sklearn.svm.LinearSVC`.\n\n.. versionadded:: 1.2\n", + "default": null + } + ] + }, + { + "name": "sklearn.compose._column_transformer.ColumnTransformer", + "description": "Applies transformers to columns of an array or pandas DataFrame.\n\nThis estimator allows different columns or column subsets of the input\nto be transformed separately and the features generated by each transformer\nwill be concatenated to form a single feature space.\nThis is useful for heterogeneous or columnar data, to combine several\nfeature extraction mechanisms or transformations into a single transformer.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n", + "attributes": [ + { + "description": "List of (name, transformer, columns) tuples specifying the\ntransformer objects to be applied to subsets of the data.\n\nname : str\nLike in Pipeline and FeatureUnion, this allows the transformer and\nits parameters to be set using ``set_params`` and searched in grid\nsearch.\ntransformer : {'drop', 'passthrough'} or estimator\nEstimator must support :term:`fit` and :term:`transform`.\nSpecial-cased strings 'drop' and 'passthrough' are accepted as\nwell, to indicate to drop the columns or to pass them through\nuntransformed, respectively.\ncolumns : str, array-like of str, int, array-like of int, array-like of bool, slice or callable\nIndexes the data on its second axis. Integers are interpreted as\npositional columns, while strings can reference DataFrame columns\nby name. A scalar string or int should be used where\n``transformer`` expects X to be a 1d array-like (vector),\notherwise a 2d array will be passed to the transformer.\nA callable is passed the input data `X` and can return any of the\nabove. To select multiple columns by name or dtype, you can use\n:obj:`make_column_selector`.\n", + "name": "transformers" + }, + { + "description": "By default, only the specified columns in `transformers` are\ntransformed and combined in the output, and the non-specified\ncolumns are dropped. (default of ``'drop'``).\nBy specifying ``remainder='passthrough'``, all remaining columns that\nwere not specified in `transformers`, but present in the data passed\nto `fit` will be automatically passed through. This subset of columns\nis concatenated with the output of the transformers. For dataframes,\nextra columns not seen during `fit` will be excluded from the output\nof `transform`.\nBy setting ``remainder`` to be an estimator, the remaining\nnon-specified columns will use the ``remainder`` estimator. The\nestimator must support :term:`fit` and :term:`transform`.\nNote that using this feature requires that the DataFrame columns\ninput at :term:`fit` and :term:`transform` have identical order.\n", + "name": "remainder", + "default": "drop" + }, + { + "default": 0.3, + "description": "If the output of the different transformers contains sparse matrices,\nthese will be stacked as a sparse matrix if the overall density is\nlower than this value. Use ``sparse_threshold=0`` to always return\ndense. When the transformed output consists of all dense data, the\nstacked result will be dense, and this keyword will be ignored.\n", + "name": "sparse_threshold", + "type": "float32" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Multiplicative weights for features per transformer. The output of the\ntransformer is multiplied by these weights. Keys are transformer names,\nvalues the weights.\n", + "name": "transformer_weights" + }, + { + "default": false, + "description": "If True, the time elapsed while fitting each transformer will be\nprinted as it is completed.\n", + "name": "verbose", + "type": "boolean" + }, + { + "name": "prefix_feature_names_out", + "description": "If True, :meth:`get_feature_names_out` will prefix all feature names\nwith the name of the transformer that generated that feature.\nIf False, :meth:`get_feature_names_out` will not prefix any feature\nnames and will error if feature names are not unique.\n\n.. versionadded:: 1.0\n", + "type": "boolean", + "default": true + }, + { + "name": "verbose_feature_names_out", + "description": "If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix\nall feature names with the name of the transformer that generated that\nfeature.\nIf False, :meth:`ColumnTransformer.get_feature_names_out` will not\nprefix any feature names and will error if feature names are not\nunique.\n\n.. versionadded:: 1.0\n", + "type": "boolean", + "default": true + } + ] + }, + { + "name": "sklearn.decomposition._pca.PCA", + "description": "Principal component analysis (PCA).\n\nLinear dimensionality reduction using Singular Value Decomposition of the\ndata to project it to a lower dimensional space. The input data is centered\nbut not scaled for each feature before applying the SVD.\n\nIt uses the LAPACK implementation of the full SVD or a randomized truncated\nSVD by the method of Halko et al. 2009, depending on the shape of the input\ndata and the number of components to extract.\n\nIt can also use the scipy.sparse.linalg ARPACK implementation of the\ntruncated SVD.\n\nNotice that this class does not support sparse input. See\n:class:`TruncatedSVD` for an alternative with sparse data.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": null, + "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\nn_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\nn_components == min(n_samples, n_features) - 1\n", + "name": "n_components", + "type": "int32" + }, + { + "default": true, + "description": "If False, data passed to fit are overwritten and running\nfit(X).transform(X) will not yield the expected results,\nuse fit_transform(X) instead.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "When True (False by default) the `components_` vectors are multiplied\nby the square root of n_samples and then divided by the singular values\nto ensure uncorrelated outputs with unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometime\nimprove the predictive accuracy of the downstream estimators by\nmaking their data respect some hard-wired assumptions.\n", + "name": "whiten", + "optional": true, + "type": "boolean" + }, + { + "default": "auto", + "description": "If auto :\nThe solver is selected by a default policy based on `X.shape` and\n`n_components`: if the input data is larger than 500x500 and the\nnumber of components to extract is lower than 80% of the smallest\ndimension of the data, then the more efficient 'randomized'\nmethod is enabled. Otherwise the exact full SVD is computed and\noptionally truncated afterwards.\nIf full :\nrun exact full SVD calling the standard LAPACK solver via\n`scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\nrun SVD truncated to n_components calling ARPACK solver via\n`scipy.sparse.linalg.svds`. It requires strictly\n0 < n_components < min(X.shape)\nIf randomized :\nrun randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0\n", + "name": "svd_solver" + }, + { + "default": 0.0, + "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [0.0, infinity).\n\n.. versionadded:: 0.18.0\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "default": "auto", + "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\nMust be of range [0, infinity).\n\n.. versionadded:: 0.18.0\n", + "name": "iterated_power" + }, + { + "default": null, + "description": "Used when the 'arpack' or 'randomized' solvers are used. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18.0\n", + "name": "random_state", + "type": "int32" + }, + { + "name": "n_oversamples", + "description": "This parameter is only relevant when `svd_solver=\"randomized\"`.\nIt corresponds to the additional number of random vectors to sample the\nrange of `X` so as to ensure proper conditioning. See\n:func:`~sklearn.utils.extmath.randomized_svd` for more details.\n\n.. versionadded:: 1.1\n", + "type": "int32", + "default": 10 + }, + { + "name": "power_iteration_normalizer", + "description": "Power iteration normalizer for randomized SVD solver.\nNot used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`\nfor more details.\n\n.. versionadded:: 1.1\n", + "default": "auto" + } + ] + }, + { + "name": "sklearn.decomposition._truncated_svd.TruncatedSVD", + "description": "Dimensionality reduction using truncated SVD (aka LSA).\n\nThis transformer performs linear dimensionality reduction by means of\ntruncated singular value decomposition (SVD). Contrary to PCA, this\nestimator does not center the data before computing the singular value\ndecomposition. This means it can work with sparse matrices\nefficiently.\n\nIn particular, truncated SVD works on term count/tf-idf matrices as\nreturned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In\nthat context, it is known as latent semantic analysis (LSA).\n\nThis estimator supports two algorithms: a fast randomized SVD solver, and\na \"naive\" algorithm that uses ARPACK as an eigensolver on `X * X.T` or\n`X.T * X`, whichever is more efficient.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 2, + "description": "Desired dimensionality of output data.\nIf algorithm='arpack', must be strictly less than the number of features.\nIf algorithm='randomized', must be less than or equal to the number of features.\nThe default value is useful for visualisation. For LSA, a value of\n100 is recommended.\n", + "name": "n_components", + "type": "int32" + }, + { + "default": "randomized", + "description": "SVD solver to use. Either \"arpack\" for the ARPACK wrapper in SciPy\n(scipy.sparse.linalg.svds), or \"randomized\" for the randomized\nalgorithm due to Halko (2009).\n", + "name": "algorithm", + "type": "string" + }, + { + "default": 5, + "description": "Number of iterations for randomized SVD solver. Not used by ARPACK. The\ndefault is larger than the default in\n:func:`~sklearn.utils.extmath.randomized_svd` to handle sparse\nmatrices that may have large slowly decaying spectrum.\n", + "name": "n_iter", + "optional": true, + "type": "int32" + }, + { + "default": null, + "description": "Used during randomized svd. Pass an int for reproducible results across\nmultiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": 0.0, + "description": "Tolerance for ARPACK. 0 means machine precision. Ignored by randomized\nSVD solver.\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "name": "n_oversamples", + "description": "Number of oversamples for randomized SVD solver. Not used by ARPACK.\nSee :func:`~sklearn.utils.extmath.randomized_svd` for a complete\ndescription.\n\n.. versionadded:: 1.1\n", + "type": "int32", + "default": 10 + }, + { + "name": "power_iteration_normalizer", + "description": "Power iteration normalizer for randomized SVD solver.\nNot used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`\nfor more details.\n\n.. versionadded:: 1.1\n", + "default": "auto" + } + ] + }, + { + "name": "sklearn.decomposition.PCA", + "description": "Principal component analysis (PCA).\n\nLinear dimensionality reduction using Singular Value Decomposition of the\ndata to project it to a lower dimensional space. The input data is centered\nbut not scaled for each feature before applying the SVD.\n\nIt uses the LAPACK implementation of the full SVD or a randomized truncated\nSVD by the method of Halko et al. 2009, depending on the shape of the input\ndata and the number of components to extract.\n\nIt can also use the scipy.sparse.linalg ARPACK implementation of the\ntruncated SVD.\n\nNotice that this class does not support sparse input. See\n:class:`TruncatedSVD` for an alternative with sparse data.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": null, + "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\nn_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\nn_components == min(n_samples, n_features) - 1\n", + "name": "n_components", + "type": "int32" + }, + { + "default": true, + "description": "If False, data passed to fit are overwritten and running\nfit(X).transform(X) will not yield the expected results,\nuse fit_transform(X) instead.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "When True (False by default) the `components_` vectors are multiplied\nby the square root of n_samples and then divided by the singular values\nto ensure uncorrelated outputs with unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometime\nimprove the predictive accuracy of the downstream estimators by\nmaking their data respect some hard-wired assumptions.\n", + "name": "whiten", + "optional": true, + "type": "boolean" + }, + { + "default": "auto", + "description": "If auto :\nThe solver is selected by a default policy based on `X.shape` and\n`n_components`: if the input data is larger than 500x500 and the\nnumber of components to extract is lower than 80% of the smallest\ndimension of the data, then the more efficient 'randomized'\nmethod is enabled. Otherwise the exact full SVD is computed and\noptionally truncated afterwards.\nIf full :\nrun exact full SVD calling the standard LAPACK solver via\n`scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\nrun SVD truncated to n_components calling ARPACK solver via\n`scipy.sparse.linalg.svds`. It requires strictly\n0 < n_components < min(X.shape)\nIf randomized :\nrun randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0\n", + "name": "svd_solver", + "type": "string" + }, + { + "default": 0.0, + "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [0.0, infinity).\n\n.. versionadded:: 0.18.0\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "default": "auto", + "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\nMust be of range [0, infinity).\n\n.. versionadded:: 0.18.0\n", + "name": "iterated_power" + }, + { + "default": null, + "description": "Used when the 'arpack' or 'randomized' solvers are used. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18.0\n", + "name": "random_state", + "optional": true, + "type": "int32" + }, + { + "name": "n_oversamples", + "description": "This parameter is only relevant when `svd_solver=\"randomized\"`.\nIt corresponds to the additional number of random vectors to sample the\nrange of `X` so as to ensure proper conditioning. See\n:func:`~sklearn.utils.extmath.randomized_svd` for more details.\n\n.. versionadded:: 1.1\n", + "type": "int32", + "default": 10 + }, + { + "name": "power_iteration_normalizer", + "description": "Power iteration normalizer for randomized SVD solver.\nNot used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`\nfor more details.\n\n.. versionadded:: 1.1\n", + "default": "auto" + } + ] + }, + { + "name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", + "description": "Linear Discriminant Analysis.\n\nA classifier with a linear decision boundary, generated by fitting class\nconditional densities to the data and using Bayes' rule.\n\nThe model fits a Gaussian density to each class, assuming that all classes\nshare the same covariance matrix.\n\nThe fitted model can also be used to reduce the dimensionality of the input\nby projecting it to the most discriminative directions, using the\n`transform` method.\n\n.. versionadded:: 0.17\n*LinearDiscriminantAnalysis*.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "svd", + "description": "Solver to use, possible values:\n- 'svd': Singular value decomposition (default).\nDoes not compute the covariance matrix, therefore this solver is\nrecommended for data with a large number of features.\n- 'lsqr': Least squares solution.\nCan be combined with shrinkage or custom covariance estimator.\n- 'eigen': Eigenvalue decomposition.\nCan be combined with shrinkage or custom covariance estimator.\n\n.. versionchanged:: 1.2\n`solver=\"svd\"` now has experimental Array API support. See the\n:ref:`Array API User Guide ` for more details.\n", + "name": "solver" + }, + { + "description": "Shrinkage parameter, possible values:\n- None: no shrinkage (default).\n- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n- float between 0 and 1: fixed shrinkage parameter.\n\nThis should be left to None if `covariance_estimator` is used.\nNote that shrinkage works only with 'lsqr' and 'eigen' solvers.\n", + "name": "shrinkage", + "default": null + }, + { + "default": null, + "description": "The class prior probabilities. By default, the class proportions are\ninferred from the training data.\n", + "name": "priors" + }, + { + "default": null, + "description": "Number of components (<= min(n_classes - 1, n_features)) for\ndimensionality reduction. If None, will be set to\nmin(n_classes - 1, n_features). This parameter only affects the\n`transform` method.\n", + "name": "n_components", + "type": "int32" + }, + { + "default": false, + "description": "If True, explicitly compute the weighted within-class covariance\nmatrix when solver is 'svd'. The matrix is always computed\nand stored for the other solvers.\n\n.. versionadded:: 0.17\n", + "name": "store_covariance", + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Absolute threshold for a singular value of X to be considered\nsignificant, used to estimate the rank of X. Dimensions whose\nsingular values are non-significant are discarded. Only used if\nsolver is 'svd'.\n\n.. versionadded:: 0.17\n", + "name": "tol", + "type": "float32" + }, + { + "default": null, + "description": "If not None, `covariance_estimator` is used to estimate\nthe covariance matrices instead of relying on the empirical\ncovariance estimator (with potential shrinkage).\nThe object should have a fit method and a ``covariance_`` attribute\nlike the estimators in :mod:`sklearn.covariance`.\nif None the shrinkage parameter drives the estimate.\n\nThis should be left to None if `shrinkage` is used.\nNote that `covariance_estimator` works only with 'lsqr' and 'eigen'\nsolvers.\n\n.. versionadded:: 0.24\n", + "name": "covariance_estimator" + } + ] + }, + { + "name": "sklearn.ensemble.forest.ExtraTreesClassifier", + "description": "\nAn extra-trees classifier.\n\nThis class implements a meta estimator that fits a number of\nrandomized decision trees (a.k.a. extra-trees) on various sub-samples\nof the dataset and uses averaging to improve the predictive accuracy\nand control over-fitting.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 100, + "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\nThe default value of ``n_estimators`` changed from 10 to 100\nin 0.22.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\n", + "name": "criterion" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": "\"auto\"", + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features" + }, + { + "default": null, + "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": null, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": false, + "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree.\n", + "name": "bootstrap", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use out-of-bag samples to estimate\nthe generalization accuracy.\n", + "name": "oob_score", + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Controls 3 sources of randomness:\n\n- the bootstrapping of the samples used when building trees\n(if ``bootstrap=True``)\n- the sampling of the features to consider when looking for the best\nsplit at each node (if ``max_features < n_features``)\n- the draw of the splits for each of the `max_features`\n\nSee :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": 0, + "description": "Controls the verbosity when fitting and predicting.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + }, + { + "default": null, + "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n`max_samples` should be in the interval `(0, 1)`.\n\n.. versionadded:: 0.22\n", + "name": "max_samples" + } + ] + }, + { + "name": "sklearn.ensemble.forest.RandomForestClassifier", + "description": "\nA random forest classifier.\n\nA random forest is a meta estimator that fits a number of decision tree\nclassifiers on various sub-samples of the dataset and uses averaging to\nimprove the predictive accuracy and control over-fitting.\nThe sub-sample size is controlled with the `max_samples` parameter if\n`bootstrap=True` (default), otherwise the whole dataset is used to build\neach tree.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 100, + "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\nThe default value of ``n_estimators`` changed from 10 to 100\nin 0.22.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\nNote: this parameter is tree-specific.\n", + "name": "criterion" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": "\"auto\"", + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)` (same as \"auto\").\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features" + }, + { + "default": null, + "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": null, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": true, + "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree.\n", + "name": "bootstrap", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use out-of-bag samples to estimate\nthe generalization accuracy.\n", + "name": "oob_score", + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Controls both the randomness of the bootstrapping of the samples used\nwhen building trees (if ``bootstrap=True``) and the sampling of the\nfeatures to consider when looking for the best split at each node\n(if ``max_features < n_features``).\nSee :term:`Glossary ` for details.\n", + "name": "random_state" + }, + { + "default": 0, + "description": "Controls the verbosity when fitting and predicting.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + }, + { + "default": null, + "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n`max_samples` should be in the interval `(0, 1)`.\n\n.. versionadded:: 0.22\n", + "name": "max_samples" + } + ] + }, + { + "name": "sklearn.ensemble.weight_boosting.AdaBoostClassifier", + "description": "An AdaBoost classifier.\n\nAn AdaBoost [1] classifier is a meta-estimator that begins by fitting a\nclassifier on the original dataset and then fits additional copies of the\nclassifier on the same dataset but where the weights of incorrectly\nclassified instances are adjusted such that subsequent classifiers focus\nmore on difficult cases.\n\nThis class implements the algorithm known as AdaBoost-SAMME [2].\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.14\n", + "attributes": [ + { + "default": null, + "description": "The base estimator from which the boosted ensemble is built.\nSupport for sample weighting is required, as well as proper\n``classes_`` and ``n_classes_`` attributes. If ``None``, then\nthe base estimator is ``DecisionTreeClassifier(max_depth=1)``.\n", + "name": "base_estimator" + }, + { + "default": 50, + "description": "The maximum number of estimators at which boosting is terminated.\nIn case of perfect fit, the learning procedure is stopped early.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": 1, + "description": "Learning rate shrinks the contribution of each classifier by\n``learning_rate``. There is a trade-off between ``learning_rate`` and\n``n_estimators``.\n", + "name": "learning_rate", + "type": "float32" + }, + { + "default": "SAMME.R", + "description": "If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n``base_estimator`` must support calculation of class probabilities.\nIf 'SAMME' then use the SAMME discrete boosting algorithm.\nThe SAMME.R algorithm typically converges faster than SAMME,\nachieving a lower test error with fewer boosting iterations.\n", + "name": "algorithm" + }, + { + "default": null, + "description": "Controls the random seed given at each `base_estimator` at each\nboosting iteration.\nThus, it is only used when `base_estimator` exposes a `random_state`.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state" + } + ] + }, + { + "name": "sklearn.feature_extraction.text.CountVectorizer", + "description": "Convert a collection of text documents to a matrix of token counts.\n\nThis implementation produces a sparse representation of the counts using\nscipy.sparse.csr_matrix.\n\nIf you do not provide an a-priori dictionary and you do not use an analyzer\nthat does some kind of feature selection then the number of features will\nbe equal to the vocabulary size found by analyzing the data.\n\nFor an efficiency comparison of the different feature extractors, see\n:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "content", + "description": "- If `'filename'`, the sequence passed as an argument to fit is\nexpected to be a list of filenames that need reading to fetch\nthe raw content to analyze.\n\n- If `'file'`, the sequence items must have a 'read' method (file-like\nobject) that is called to fetch the bytes in memory.\n\n- If `'content'`, the input is expected to be a sequence of items that\ncan be of type string or byte.\n", + "name": "input", + "type": "string" + }, + { + "default": "utf-8", + "description": "If bytes or files are given to analyze, this encoding is used to\ndecode.\n", + "name": "encoding", + "type": "string" + }, + { + "default": "strict", + "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'.\n", + "name": "decode_error" + }, + { + "default": null, + "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\na direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) means no character normalization is performed.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`.\n", + "name": "strip_accents" + }, + { + "default": true, + "description": "Convert all characters to lowercase before tokenizing.\n", + "name": "lowercase", + "type": "boolean" + }, + { + "default": null, + "description": "Override the preprocessing (strip_accents and lowercase) stage while\npreserving the tokenizing and n-grams generation steps.\nOnly applies if ``analyzer`` is not callable.\n", + "name": "preprocessor" + }, + { + "default": null, + "description": "Override the string tokenization step while preserving the\npreprocessing and n-grams generation steps.\nOnly applies if ``analyzer == 'word'``.\n", + "name": "tokenizer" + }, + { + "default": null, + "description": "If 'english', a built-in stop word list for English is used.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. In this case, setting `max_df`\nto a higher value, such as in the range (0.7, 1.0), can automatically detect\nand filter stop words based on intra corpus document frequency of terms.\n", + "name": "stop_words", + "type": "string" + }, + { + "default": "r\"(?u)\\\\b\\\\w\\\\w+\\\\b\"", + "description": "Regular expression denoting what constitutes a \"token\", only used\nif ``analyzer == 'word'``. The default regexp select tokens of 2\nor more alphanumeric characters (punctuation is completely ignored\nand always treated as a token separator).\n\nIf there is a capturing group in token_pattern then the\ncaptured group content, not the entire match, becomes the token.\nAt most one capturing group is permitted.\n", + "name": "token_pattern", + "type": "string" + }, + { + "default": "(1, 1)", + "description": "The lower and upper boundary of the range of n-values for different\nword n-grams or char n-grams to be extracted. All values of n such\nsuch that min_n <= n <= max_n will be used. For example an\n``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means\nunigrams and bigrams, and ``(2, 2)`` means only bigrams.\nOnly applies if ``analyzer`` is not callable.\n", + "name": "ngram_range" + }, + { + "default": "word", + "description": "Whether the feature should be made of word n-gram or character\nn-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\n\nSince v0.21, if ``input`` is ``filename`` or ``file``, the data is\nfirst read from the file and then passed to the given callable\nanalyzer.\n", + "name": "analyzer", + "type": "string" + }, + { + "default": "1.0", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_df" + }, + { + "default": "1", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "min_df" + }, + { + "default": null, + "description": "If not None, build a vocabulary that only consider the top\n`max_features` ordered by term frequency across the corpus.\nOtherwise, all features are used.\n\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Either a Mapping (e.g., a dict) where keys are terms and values are\nindices in the feature matrix, or an iterable over terms. If not\ngiven, a vocabulary is determined from the input documents. Indices\nin the mapping should not be repeated and should not have any gap\nbetween 0 and the largest index.\n", + "name": "vocabulary", + "optional": true + }, + { + "default": false, + "description": "If True, all non zero counts are set to 1. This is useful for discrete\nprobabilistic models that model binary events rather than integer\ncounts.\n", + "name": "binary", + "type": "boolean" + }, + { + "default": "np.int64", + "description": "Type of the matrix returned by fit_transform() or transform().\n", + "name": "dtype", + "optional": true + } + ] + }, + { + "name": "sklearn.feature_extraction.text.TfidfVectorizer", + "description": "Convert a collection of raw documents to a matrix of TF-IDF features.\n\nEquivalent to :class:`CountVectorizer` followed by\n:class:`TfidfTransformer`.\n\nFor an example of usage, see\n:ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`.\n\nFor an efficiency comparison of the different feature extractors, see\n:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "content", + "description": "- If `'filename'`, the sequence passed as an argument to fit is\nexpected to be a list of filenames that need reading to fetch\nthe raw content to analyze.\n\n- If `'file'`, the sequence items must have a 'read' method (file-like\nobject) that is called to fetch the bytes in memory.\n\n- If `'content'`, the input is expected to be a sequence of items that\ncan be of type string or byte.\n", + "name": "input", + "type": "string" + }, + { + "default": "utf-8", + "description": "If bytes or files are given to analyze, this encoding is used to\ndecode.\n", + "name": "encoding", + "type": "string" + }, + { + "default": "strict", + "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'.\n", + "name": "decode_error" + }, + { + "default": null, + "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\na direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) means no character normalization is performed.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`.\n", + "name": "strip_accents" + }, + { + "default": true, + "description": "Convert all characters to lowercase before tokenizing.\n", + "name": "lowercase", + "type": "boolean" + }, + { + "default": null, + "description": "Override the preprocessing (string transformation) stage while\npreserving the tokenizing and n-grams generation steps.\nOnly applies if ``analyzer`` is not callable.\n", + "name": "preprocessor" + }, + { + "default": null, + "description": "Override the string tokenization step while preserving the\npreprocessing and n-grams generation steps.\nOnly applies if ``analyzer == 'word'``.\n", + "name": "tokenizer" + }, + { + "description": "Whether the feature should be made of word or character n-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\nSince v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data\nis first read from the file and then passed to the given callable\nanalyzer.\n", + "name": "analyzer", + "default": "word" + }, + { + "default": null, + "description": "If a string, it is passed to _check_stop_list and the appropriate stop\nlist is returned. 'english' is currently the only supported string\nvalue.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. In this case, setting `max_df`\nto a higher value, such as in the range (0.7, 1.0), can automatically detect\nand filter stop words based on intra corpus document frequency of terms.\n", + "name": "stop_words" + }, + { + "default": "r\"(?u)\\\\b\\\\w\\\\w+\\\\b", + "description": "Regular expression denoting what constitutes a \"token\", only used\nif ``analyzer == 'word'``. The default regexp selects tokens of 2\nor more alphanumeric characters (punctuation is completely ignored\nand always treated as a token separator).\n\nIf there is a capturing group in token_pattern then the\ncaptured group content, not the entire match, becomes the token.\nAt most one capturing group is permitted.\n", + "name": "token_pattern", + "type": "string" + }, + { + "default": "(1, 1)", + "description": "The lower and upper boundary of the range of n-values for different\nn-grams to be extracted. All values of n such that min_n <= n <= max_n\nwill be used. For example an ``ngram_range`` of ``(1, 1)`` means only\nunigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means\nonly bigrams.\nOnly applies if ``analyzer`` is not callable.\n", + "name": "ngram_range" + }, + { + "default": "1.0", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float in range [0.0, 1.0], the parameter represents a proportion of\ndocuments, integer absolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_df" + }, + { + "default": "1", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float in range of [0.0, 1.0], the parameter represents a proportion\nof documents, integer absolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "min_df" + }, + { + "default": null, + "description": "If not None, build a vocabulary that only consider the top\n`max_features` ordered by term frequency across the corpus.\nOtherwise, all features are used.\n\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Either a Mapping (e.g., a dict) where keys are terms and values are\nindices in the feature matrix, or an iterable over terms. If not\ngiven, a vocabulary is determined from the input documents.\n", + "name": "vocabulary", + "optional": true + }, + { + "default": false, + "description": "If True, all non-zero term counts are set to 1. This does not mean\noutputs will have only 0/1 values, only that the tf term in tf-idf\nis binary. (Set `binary` to True, `use_idf` to False and\n`norm` to None to get 0/1 outputs).\n", + "name": "binary", + "type": "boolean" + }, + { + "default": "float64", + "description": "Type of the matrix returned by fit_transform() or transform().\n", + "name": "dtype", + "optional": true + }, + { + "default": "l2", + "description": "Each output row will have unit norm, either:\n\n- 'l2': Sum of squares of vector elements is 1. The cosine\nsimilarity between two vectors is their dot product when l2 norm has\nbeen applied.\n- 'l1': Sum of absolute values of vector elements is 1.\nSee :func:`~sklearn.preprocessing.normalize`.\n- None: No normalization.\n", + "name": "norm" + }, + { + "default": true, + "description": "Enable inverse-document-frequency reweighting. If False, idf(t) = 1.\n", + "name": "use_idf", + "type": "boolean" + }, + { + "default": true, + "description": "Smooth idf weights by adding one to document frequencies, as if an\nextra document was seen containing every term in the collection\nexactly once. Prevents zero divisions.\n", + "name": "smooth_idf", + "type": "boolean" + }, + { + "default": false, + "description": "Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).\n", + "name": "sublinear_tf", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.feature_selection._univariate_selection.SelectKBest", + "description": "Select features according to the k highest scores.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "f_classif", + "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues) or a single array with scores.\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks.\n\n.. versionadded:: 0.18\n", + "name": "score_func" + }, + { + "default": "10", + "description": "Number of top features to select.\nThe \"all\" option bypasses selection, for use in a parameter search.\n", + "name": "k", + "optional": true + } + ] + }, + { + "name": "sklearn.impute._base.SimpleImputer", + "description": "Univariate imputer for completing missing values with simple strategies.\n\nReplace missing values using a descriptive statistic (e.g. mean, median, or\nmost frequent) along each column, or using a constant value.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`\nestimator which is now removed.\n", + "attributes": [ + { + "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\ncan be set to either `np.nan` or `pd.NA`.\n", + "name": "missing_values", + "default": "np.nan" + }, + { + "default": "mean", + "description": "The imputation strategy.\n\n- If \"mean\", then replace missing values using the mean along\neach column. Can only be used with numeric data.\n- If \"median\", then replace missing values using the median along\neach column. Can only be used with numeric data.\n- If \"most_frequent\", then replace missing using the most frequent\nvalue along each column. Can be used with strings or numeric data.\nIf there is more than one such value, only the smallest is returned.\n- If \"constant\", then replace missing values with fill_value. Can be\nused with strings or numeric data.\n\n.. versionadded:: 0.20\nstrategy=\"constant\" for fixed value imputation.\n", + "name": "strategy", + "type": "string" + }, + { + "default": null, + "description": "When strategy == \"constant\", `fill_value` is used to replace all\noccurrences of missing_values. For string or object data types,\n`fill_value` must be a string.\nIf `None`, `fill_value` will be 0 when imputing numerical\ndata and \"missing_value\" for strings or object data types.\n", + "name": "fill_value" + }, + { + "default": 0, + "description": "Controls the verbosity of the imputer.\n\n.. deprecated:: 1.1\nThe 'verbose' parameter was deprecated in version 1.1 and will be\nremoved in 1.3. A warning will always be raised upon the removal of\nempty columns in the future version.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": true, + "description": "If True, a copy of X will be created. If False, imputation will\nbe done in-place whenever possible. Note that, in the following cases,\na new copy will always be made, even if `copy=False`:\n\n- If `X` is not an array of floating values;\n- If `X` is encoded as a CSR matrix;\n- If `add_indicator=True`.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "If True, a :class:`MissingIndicator` transform will stack onto output\nof the imputer's transform. This allows a predictive estimator\nto account for missingness despite imputation. If a feature has no\nmissing values at fit/train time, the feature won't appear on\nthe missing indicator even if there are missing values at\ntransform/test time.\n", + "name": "add_indicator", + "type": "boolean" + }, + { + "name": "keep_empty_features", + "default": false, + "description": "If True, features that consist exclusively of missing values when\n`fit` is called are returned in results when `transform` is called.\nThe imputed value is always `0` except when `strategy=\"constant\"`\nin which case `fill_value` will be used instead.\n\n.. versionadded:: 1.2\n" + } + ] + }, + { + "name": "sklearn.linear_model._logistic.LogisticRegression", + "description": "\nLogistic Regression (aka logit, MaxEnt) classifier.\n\nIn the multiclass case, the training algorithm uses the one-vs-rest (OvR)\nscheme if the 'multi_class' option is set to 'ovr', and uses the\ncross-entropy loss if the 'multi_class' option is set to 'multinomial'.\n(Currently the 'multinomial' option is supported only by the 'lbfgs',\n'sag', 'saga' and 'newton-cg' solvers.)\n\nThis class implements regularized logistic regression using the\n'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note\nthat regularization is applied by default**. It can handle both dense\nand sparse input. Use C-ordered arrays or CSR matrices containing 64-bit\nfloats for optimal performance; any other input format will be converted\n(and copied).\n\nThe 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization\nwith primal formulation, or no regularization. The 'liblinear' solver\nsupports both L1 and L2 regularization, with a dual formulation only for\nthe L2 penalty. The Elastic-Net regularization is only supported by the\n'saga' solver.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "l2", + "description": "Specify the norm of the penalty:\n\n- `None`: no penalty is added;\n- `'l2'`: add a L2 penalty term and it is the default choice;\n- `'l1'`: add a L1 penalty term;\n- `'elasticnet'`: both L1 and L2 penalty terms are added.\n\n.. warning::\nSome penalties may not work with some solvers. See the parameter\n`solver` below, to know the compatibility between the penalty and\nsolver.\n\n.. versionadded:: 0.19\nl1 penalty with SAGA solver (allowing 'multinomial' + L1)\n", + "name": "penalty" + }, + { + "default": false, + "description": "Dual (constrained) or primal (regularized, see also\n:ref:`this equation `) formulation. Dual formulation\nis only implemented for l2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features.\n", + "name": "dual", + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Tolerance for stopping criteria.\n", + "name": "tol", + "type": "float32" + }, + { + "default": 1.0, + "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization.\n", + "name": "C", + "type": "float32" + }, + { + "default": true, + "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function.\n", + "name": "fit_intercept", + "type": "boolean" + }, + { + "default": 1.0, + "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased.\n", + "name": "intercept_scaling", + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n*class_weight='balanced'*\n", + "name": "class_weight" + }, + { + "default": null, + "description": "Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the\ndata. See :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": "lbfgs", + "description": "\nAlgorithm to use in the optimization problem. Default is 'lbfgs'.\nTo choose a solver, you might want to consider the following aspects:\n\n- For small datasets, 'liblinear' is a good choice, whereas 'sag'\nand 'saga' are faster for large ones;\n- For multiclass problems, only 'newton-cg', 'sag', 'saga' and\n'lbfgs' handle multinomial loss;\n- 'liblinear' is limited to one-versus-rest schemes.\n- 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,\nespecially with one-hot encoded categorical features with rare\ncategories. Note that it is limited to binary classification and the\none-versus-rest reduction for multiclass classification. Be aware that\nthe memory usage of this solver has a quadratic dependency on\n`n_features` because it explicitly computes the Hessian matrix.\n\n.. warning::\nThe choice of the algorithm depends on the penalty chosen.\nSupported penalties by solver:\n\n- 'lbfgs' - ['l2', None]\n- 'liblinear' - ['l1', 'l2']\n- 'newton-cg' - ['l2', None]\n- 'newton-cholesky' - ['l2', None]\n- 'sag' - ['l2', None]\n- 'saga' - ['elasticnet', 'l1', 'l2', None]\n\n.. note::\n'sag' and 'saga' fast convergence is only guaranteed on features\nwith approximately the same scale. You can preprocess the data with\na scaler from :mod:`sklearn.preprocessing`.\n\n.. seealso::\nRefer to the User Guide for more information regarding\n:class:`LogisticRegression` and more specifically the\n:ref:`Table `\nsummarizing solver/penalty supports.\n\n.. versionadded:: 0.17\nStochastic Average Gradient descent solver.\n.. versionadded:: 0.19\nSAGA solver.\n.. versionchanged:: 0.22\nThe default solver changed from 'liblinear' to 'lbfgs' in 0.22.\n.. versionadded:: 1.2\nnewton-cholesky solver.\n", + "name": "solver" + }, + { + "default": 100, + "description": "Maximum number of iterations taken for the solvers to converge.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": "auto", + "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\nStochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\nDefault changed from 'ovr' to 'auto' in 0.22.\n", + "name": "multi_class" + }, + { + "default": 0, + "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nUseless for liblinear solver. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Number of CPU cores used when parallelizing over classes if\nmulti_class='ovr'\". This parameter is ignored when the ``solver`` is\nset to 'liblinear' regardless of whether 'multi_class' is specified or\nnot. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors.\nSee :term:`Glossary ` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2.\n", + "name": "l1_ratio", + "type": "float32" + } + ] + }, + { + "name": "sklearn.linear_model.LassoLars", + "description": "Lasso model fit with Least Angle Regression a.k.a. Lars.\n\nIt is a Linear Model trained with an L1 prior as regularizer.\n\nThe optimization objective for Lasso is::\n\n(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 1.0, + "description": "Constant that multiplies the penalty term. Defaults to 1.0.\n``alpha = 0`` is equivalent to an ordinary least square, solved\nby :class:`LinearRegression`. For numerical reasons, using\n``alpha = 0`` with the LassoLars object is not advised and you\nshould prefer the LinearRegression object.\n", + "name": "alpha", + "type": "float32" + }, + { + "default": true, + "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered).\n", + "name": "fit_intercept", + "type": "boolean" + }, + { + "default": "False", + "description": "Sets the verbosity amount.\n", + "name": "verbose", + "optional": true + }, + { + "default": false, + "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. versionchanged:: 1.2\ndefault changed from True to False in 1.2.\n\n.. deprecated:: 1.2\n``normalize`` was deprecated in version 1.2 and will be removed in 1.4.\n", + "name": "normalize", + "optional": true, + "type": "boolean" + }, + { + "default": "auto", + "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument.\n", + "name": "precompute", + "type": "boolean" + }, + { + "default": 500, + "description": "Maximum number of iterations to perform.\n", + "name": "max_iter", + "optional": true, + "type": "int32" + }, + { + "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization.\n", + "name": "eps", + "optional": true, + "type": "float32", + "default": null + }, + { + "default": true, + "description": "If True, X will be copied; else, it may be overwritten.\n", + "name": "copy_X", + "optional": true, + "type": "boolean" + }, + { + "default": true, + "description": "If ``True`` the full path is stored in the ``coef_path_`` attribute.\nIf you compute the solution for a large problem or many targets,\nsetting ``fit_path`` to ``False`` will lead to a speedup, especially\nwith a small alpha.\n", + "name": "fit_path", + "type": "boolean" + }, + { + "default": false, + "description": "Restrict coefficients to be >= 0. Be aware that you might want to\nremove fit_intercept which is set True by default.\nUnder the positive restriction the model coefficients will not converge\nto the ordinary-least-squares solution for small values of alpha.\nOnly coefficients up to the smallest alpha value (``alphas_[alphas_ >\n0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\nalgorithm are typically in congruence with the solution of the\ncoordinate descent Lasso estimator.\n", + "name": "positive", + "type": "boolean" + }, + { + "default": null, + "description": "Upper bound on a uniform noise parameter to be added to the\n`y` values, to satisfy the model's assumption of\none-at-a-time computations. Might help with stability.\n\n.. versionadded:: 0.23\n", + "name": "jitter", + "type": "float32" + }, + { + "default": null, + "description": "Determines random number generation for jittering. Pass an int\nfor reproducible output across multiple function calls.\nSee :term:`Glossary `. Ignored if `jitter` is None.\n\n.. versionadded:: 0.23\n", + "name": "random_state", + "type": "int32" + } + ] + }, + { + "name": "sklearn.linear_model.LinearRegression", + "description": "\nOrdinary least squares Linear Regression.\n\nLinearRegression fits a linear model with coefficients w = (w1, ..., wp)\nto minimize the residual sum of squares between the observed targets in\nthe dataset, and the targets predicted by the linear approximation.\n", + "attributes": [ + { + "default": true, + "description": "Whether to calculate the intercept for this model. If set\nto False, no intercept will be used in calculations\n(i.e. data is expected to be centered).\n", + "name": "fit_intercept", + "optional": true, + "type": "boolean" + }, + { + "default": false, + "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n`normalize` was deprecated in version 1.0 and will be\nremoved in 1.2.\n", + "name": "normalize", + "optional": true, + "type": "boolean" + }, + { + "default": true, + "description": "If True, X will be copied; else, it may be overwritten.\n", + "name": "copy_X", + "optional": true, + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to use for the computation. This will only provide\nspeedup in case of sufficiently large problems, that is if firstly\n`n_targets > 1` and secondly `X` is sparse or if `positive` is set\nto `True`. ``None`` means 1 unless in a\n:obj:`joblib.parallel_backend` context. ``-1`` means using all\nprocessors. See :term:`Glossary ` for more details.\n", + "name": "n_jobs", + "optional": true, + "type": "int32" + }, + { + "default": false, + "description": "When set to ``True``, forces the coefficients to be positive. This\noption is only supported for dense arrays.\n\n.. versionadded:: 0.24\n", + "name": "positive", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.linear_model.LogisticRegression", + "description": "\nLogistic Regression (aka logit, MaxEnt) classifier.\n\nIn the multiclass case, the training algorithm uses the one-vs-rest (OvR)\nscheme if the 'multi_class' option is set to 'ovr', and uses the\ncross-entropy loss if the 'multi_class' option is set to 'multinomial'.\n(Currently the 'multinomial' option is supported only by the 'lbfgs',\n'sag', 'saga' and 'newton-cg' solvers.)\n\nThis class implements regularized logistic regression using the\n'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note\nthat regularization is applied by default**. It can handle both dense\nand sparse input. Use C-ordered arrays or CSR matrices containing 64-bit\nfloats for optimal performance; any other input format will be converted\n(and copied).\n\nThe 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization\nwith primal formulation, or no regularization. The 'liblinear' solver\nsupports both L1 and L2 regularization, with a dual formulation only for\nthe L2 penalty. The Elastic-Net regularization is only supported by the\n'saga' solver.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "l2", + "description": "Specify the norm of the penalty:\n\n- `None`: no penalty is added;\n- `'l2'`: add a L2 penalty term and it is the default choice;\n- `'l1'`: add a L1 penalty term;\n- `'elasticnet'`: both L1 and L2 penalty terms are added.\n\n.. warning::\nSome penalties may not work with some solvers. See the parameter\n`solver` below, to know the compatibility between the penalty and\nsolver.\n\n.. versionadded:: 0.19\nl1 penalty with SAGA solver (allowing 'multinomial' + L1)\n", + "name": "penalty", + "optional": true + }, + { + "default": false, + "description": "Dual (constrained) or primal (regularized, see also\n:ref:`this equation `) formulation. Dual formulation\nis only implemented for l2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features.\n", + "name": "dual", + "optional": true, + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Tolerance for stopping criteria.\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "default": 1.0, + "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization.\n", + "name": "C", + "optional": true, + "type": "float32" + }, + { + "default": true, + "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function.\n", + "name": "fit_intercept", + "optional": true, + "type": "boolean" + }, + { + "default": 1.0, + "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased.\n", + "name": "intercept_scaling", + "optional": true, + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n*class_weight='balanced'*\n", + "name": "class_weight", + "optional": true + }, + { + "default": null, + "description": "Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the\ndata. See :term:`Glossary ` for details.\n", + "name": "random_state", + "optional": true, + "type": "int32" + }, + { + "default": "lbfgs", + "description": "\nAlgorithm to use in the optimization problem. Default is 'lbfgs'.\nTo choose a solver, you might want to consider the following aspects:\n\n- For small datasets, 'liblinear' is a good choice, whereas 'sag'\nand 'saga' are faster for large ones;\n- For multiclass problems, only 'newton-cg', 'sag', 'saga' and\n'lbfgs' handle multinomial loss;\n- 'liblinear' is limited to one-versus-rest schemes.\n- 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,\nespecially with one-hot encoded categorical features with rare\ncategories. Note that it is limited to binary classification and the\none-versus-rest reduction for multiclass classification. Be aware that\nthe memory usage of this solver has a quadratic dependency on\n`n_features` because it explicitly computes the Hessian matrix.\n\n.. warning::\nThe choice of the algorithm depends on the penalty chosen.\nSupported penalties by solver:\n\n- 'lbfgs' - ['l2', None]\n- 'liblinear' - ['l1', 'l2']\n- 'newton-cg' - ['l2', None]\n- 'newton-cholesky' - ['l2', None]\n- 'sag' - ['l2', None]\n- 'saga' - ['elasticnet', 'l1', 'l2', None]\n\n.. note::\n'sag' and 'saga' fast convergence is only guaranteed on features\nwith approximately the same scale. You can preprocess the data with\na scaler from :mod:`sklearn.preprocessing`.\n\n.. seealso::\nRefer to the User Guide for more information regarding\n:class:`LogisticRegression` and more specifically the\n:ref:`Table `\nsummarizing solver/penalty supports.\n\n.. versionadded:: 0.17\nStochastic Average Gradient descent solver.\n.. versionadded:: 0.19\nSAGA solver.\n.. versionchanged:: 0.22\nThe default solver changed from 'liblinear' to 'lbfgs' in 0.22.\n.. versionadded:: 1.2\nnewton-cholesky solver.\n", + "name": "solver", + "optional": true + }, + { + "default": 100, + "description": "Maximum number of iterations taken for the solvers to converge.\n", + "name": "max_iter", + "optional": true, + "type": "int32" + }, + { + "default": "auto", + "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\nStochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\nDefault changed from 'ovr' to 'auto' in 0.22.\n", + "name": "multi_class", + "optional": true + }, + { + "default": 0, + "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity.\n", + "name": "verbose", + "optional": true, + "type": "int32" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nUseless for liblinear solver. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.\n", + "name": "warm_start", + "optional": true, + "type": "boolean" + }, + { + "default": null, + "description": "Number of CPU cores used when parallelizing over classes if\nmulti_class='ovr'\". This parameter is ignored when the ``solver`` is\nset to 'liblinear' regardless of whether 'multi_class' is specified or\nnot. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors.\nSee :term:`Glossary ` for more details.\n", + "name": "n_jobs", + "optional": true, + "type": "int32" + }, + { + "default": null, + "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2.\n", + "name": "l1_ratio", + "optional": true, + "type": "float32" + } + ] + }, + { + "name": "sklearn.model_selection._search.GridSearchCV", + "description": "Exhaustive search over specified parameter values for an estimator.\n\nImportant members are fit, predict.\n\nGridSearchCV implements a \"fit\" and a \"score\" method.\nIt also implements \"score_samples\", \"predict\", \"predict_proba\",\n\"decision_function\", \"transform\" and \"inverse_transform\" if they are\nimplemented in the estimator used.\n\nThe parameters of the estimator used to apply these methods are optimized\nby cross-validated grid-search over a parameter grid.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "description": "This is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed.\n", + "name": "estimator" + }, + { + "description": "Dictionary with parameters names (`str`) as keys and lists of\nparameter settings to try as values, or a list of such\ndictionaries, in which case the grids spanned by each dictionary\nin the list are explored. This enables searching over any sequence\nof parameter settings.\n", + "name": "param_grid" + }, + { + "default": null, + "description": "Strategy to evaluate the performance of the cross-validated model on\nthe test set.\n\nIf `scoring` represents a single score, one can use:\n\n- a single string (see :ref:`scoring_parameter`);\n- a callable (see :ref:`scoring`) that returns a single value.\n\nIf `scoring` represents multiple scores, one can use:\n\n- a list or tuple of unique strings;\n- a callable returning a dictionary where the keys are the metric\nnames and the values are the metric scores;\n- a dictionary with metric names as keys and callables a values.\n\nSee :ref:`multimetric_grid_search` for an example.\n", + "name": "scoring" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n`n_jobs` default changed from 1 to None\n", + "name": "n_jobs", + "type": "int32" + }, + { + "description": "Controls the number of jobs that get dispatched during parallel\nexecution. Reducing this number can be useful to avoid an\nexplosion of memory consumption when more jobs get dispatched\nthan CPUs can process. This parameter can be:\n\n- None, in which case all the jobs are immediately\ncreated and spawned. Use this for lightweight and\nfast-running jobs, to avoid delays due to on-demand\nspawning of the jobs\n\n- An int, giving the exact number of total jobs that are\nspawned\n\n- A str, giving an expression as a function of n_jobs,\nas in '2*n_jobs'\n", + "name": "pre_dispatch", + "default": "2*n_jobs" + }, + { + "default": false, + "description": "If True, return the average score across folds, weighted by the number\nof samples in each test set. In this case, the data is assumed to be\nidentically distributed across the folds, and the loss minimized is\nthe total loss per sample, and not the mean loss across the folds.\n\n.. deprecated:: 0.22\nParameter ``iid`` is deprecated in 0.22 and will be removed in 0.24\n", + "name": "iid", + "type": "boolean" + }, + { + "default": null, + "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross validation,\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n``cv`` default value if None changed from 3-fold to 5-fold.\n", + "name": "cv", + "type": "int32" + }, + { + "default": "True", + "description": "Refit an estimator using the best found parameters on the whole\ndataset.\n\nFor multiple metric evaluation, this needs to be a `str` denoting the\nscorer that would be used to find the best parameters for refitting\nthe estimator at the end.\n\nWhere there are considerations other than maximum score in\nchoosing a best estimator, ``refit`` can be set to a function which\nreturns the selected ``best_index_`` given ``cv_results_``. In that\ncase, the ``best_estimator_`` and ``best_params_`` will be set\naccording to the returned ``best_index_`` while the ``best_score_``\nattribute will not be available.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``GridSearchCV`` instance.\n\nAlso for multiple metric evaluation, the attributes ``best_index_``,\n``best_score_`` and ``best_params_`` will only be available if\n``refit`` is set and all of them will be determined w.r.t this specific\nscorer.\n\nSee ``scoring`` parameter to know more about multiple metric\nevaluation.\n\nSee :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py`\nto see how to design a custom selection strategy using a callable\nvia `refit`.\n\n.. versionchanged:: 0.20\nSupport for callable added.\n", + "name": "refit", + "type": "boolean" + }, + { + "description": "Controls the verbosity: the higher, the more messages.\n\n- >1 : the computation time for each fold and parameter candidate is\ndisplayed;\n- >2 : the score is also displayed;\n- >3 : the fold and candidate parameter indexes are also displayed\ntogether with the starting time of the computation.\n", + "name": "verbose", + "type": "int32" + }, + { + "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error.\n", + "name": "error_score", + "default": "np.nan" + }, + { + "default": false, + "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance.\n\n.. versionadded:: 0.19\n\n.. versionchanged:: 0.21\nDefault value was changed from ``True`` to ``False``\n", + "name": "return_train_score", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.naive_bayes.BernoulliNB", + "description": "Naive Bayes classifier for multivariate Bernoulli models.\n\nLike MultinomialNB, this classifier is suitable for discrete data. The\ndifference is that while MultinomialNB works with occurrence counts,\nBernoulliNB is designed for binary/boolean features.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "1.0", + "description": "Additive (Laplace/Lidstone) smoothing parameter\n(set alpha=0 and force_alpha=True, for no smoothing).\n", + "name": "alpha", + "optional": true, + "type": "float32" + }, + { + "default": "0.0", + "description": "Threshold for binarizing (mapping to booleans) of sample features.\nIf None, input is presumed to already consist of binary vectors.\n", + "name": "binarize", + "optional": true + }, + { + "default": true, + "description": "Whether to learn class prior probabilities or not.\nIf false, a uniform prior will be used.\n", + "name": "fit_prior", + "optional": true, + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. If specified, the priors are not\nadjusted according to the data.\n", + "name": "class_prior", + "optional": true + }, + { + "name": "force_alpha", + "description": "If False and alpha is less than 1e-10, it will set alpha to\n1e-10. If True, alpha will remain unchanged. This may cause\nnumerical errors if alpha is too close to 0.\n\n.. versionadded:: 1.2\n.. versionchanged:: 1.4\nThe default value of `force_alpha` changed to `True`.\n", + "type": "boolean", + "default": true + } + ] + }, + { + "name": "sklearn.naive_bayes.ComplementNB", + "description": "The Complement Naive Bayes classifier described in Rennie et al. (2003).\n\nThe Complement Naive Bayes classifier was designed to correct the \"severe\nassumptions\" made by the standard Multinomial Naive Bayes classifier. It is\nparticularly suited for imbalanced data sets.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n", + "attributes": [ + { + "default": "1.0", + "description": "Additive (Laplace/Lidstone) smoothing parameter\n(set alpha=0 and force_alpha=True, for no smoothing).\n", + "name": "alpha", + "optional": true, + "type": "float32" + }, + { + "default": true, + "description": "Only used in edge case with a single class in the training set.\n", + "name": "fit_prior", + "optional": true, + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. Not used.\n", + "name": "class_prior", + "optional": true + }, + { + "default": false, + "description": "Whether or not a second normalization of the weights is performed. The\ndefault behavior mirrors the implementations found in Mahout and Weka,\nwhich do not follow the full algorithm described in Table 9 of the\npaper.\n", + "name": "norm", + "optional": true, + "type": "boolean" + }, + { + "name": "force_alpha", + "description": "If False and alpha is less than 1e-10, it will set alpha to\n1e-10. If True, alpha will remain unchanged. This may cause\nnumerical errors if alpha is too close to 0.\n\n.. versionadded:: 1.2\n.. versionchanged:: 1.4\nThe default value of `force_alpha` changed to `True`.\n", + "type": "boolean", + "default": true + } + ] + }, + { + "name": "sklearn.naive_bayes.MultinomialNB", + "description": "\nNaive Bayes classifier for multinomial models.\n\nThe multinomial Naive Bayes classifier is suitable for classification with\ndiscrete features (e.g., word counts for text classification). The\nmultinomial distribution normally requires integer feature counts. However,\nin practice, fractional counts such as tf-idf may also work.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "1.0", + "description": "Additive (Laplace/Lidstone) smoothing parameter\n(set alpha=0 and force_alpha=True, for no smoothing).\n", + "name": "alpha", + "optional": true, + "type": "float32" + }, + { + "default": true, + "description": "Whether to learn class prior probabilities or not.\nIf false, a uniform prior will be used.\n", + "name": "fit_prior", + "optional": true, + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. If specified, the priors are not\nadjusted according to the data.\n", + "name": "class_prior", + "optional": true + }, + { + "name": "force_alpha", + "description": "If False and alpha is less than 1e-10, it will set alpha to\n1e-10. If True, alpha will remain unchanged. This may cause\nnumerical errors if alpha is too close to 0.\n\n.. versionadded:: 1.2\n.. versionchanged:: 1.4\nThe default value of `force_alpha` changed to `True`.\n", + "type": "boolean", + "default": true + } + ] + }, + { + "name": "sklearn.neighbors.KNeighborsClassifier", + "description": "Classifier implementing the k-nearest neighbors vote.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 5, + "description": "Number of neighbors to use by default for :meth:`kneighbors` queries.\n", + "name": "n_neighbors", + "optional": true, + "type": "int32" + }, + { + "default": "uniform", + "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\nare weighted equally.\n- 'distance' : weight points by the inverse of their distance.\nin this case, closer neighbors of a query point will have a\ngreater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\narray of distances, and returns an array of the same shape\ncontaining the weights.\n\nRefer to the example entitled\n:ref:`sphx_glr_auto_examples_neighbors_plot_classification.py`\nshowing the impact of the `weights` parameter on the decision\nboundary.\n", + "name": "weights", + "optional": true + }, + { + "default": "auto", + "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\nbased on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force.\n", + "name": "algorithm", + "optional": true + }, + { + "default": 30, + "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem.\n", + "name": "leaf_size", + "optional": true, + "type": "int32" + }, + { + "default": 2.0, + "description": "Power parameter for the Minkowski metric. When p = 1, this is equivalent\nto using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.\nFor arbitrary p, minkowski_distance (l_p) is used. This parameter is expected\nto be positive.\n", + "name": "p", + "optional": true, + "type": "int32" + }, + { + "default": "minkowski", + "description": "Metric to use for distance computation. Default is \"minkowski\", which\nresults in the standard Euclidean distance when p = 2. See the\ndocumentation of `scipy.spatial.distance\n`_ and\nthe metrics listed in\n:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric\nvalues.\n\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`, in which\ncase only \"nonzero\" elements may be considered neighbors.\n\nIf metric is a callable function, it takes two arrays representing 1D\nvectors as inputs and must return one value indicating the distance\nbetween those vectors. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string.\n", + "name": "metric" + }, + { + "default": null, + "description": "Additional keyword arguments for the metric function.\n", + "name": "metric_params", + "optional": true + }, + { + "default": null, + "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method.\n", + "name": "n_jobs", + "optional": true, + "type": "int32" + } + ] + }, + { + "name": "sklearn.neighbors.KNeighborsRegressor", + "description": "Regression based on k-nearest neighbors.\n\nThe target is predicted by local interpolation of the targets\nassociated of the nearest neighbors in the training set.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.9\n", + "attributes": [ + { + "default": 5, + "description": "Number of neighbors to use by default for :meth:`kneighbors` queries.\n", + "name": "n_neighbors", + "optional": true, + "type": "int32" + }, + { + "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\nare weighted equally.\n- 'distance' : weight points by the inverse of their distance.\nin this case, closer neighbors of a query point will have a\ngreater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\narray of distances, and returns an array of the same shape\ncontaining the weights.\n\nUniform weights are used by default.\n", + "name": "weights", + "default": "uniform" + }, + { + "default": "auto", + "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\nbased on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force.\n", + "name": "algorithm", + "optional": true + }, + { + "default": 30, + "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem.\n", + "name": "leaf_size", + "optional": true, + "type": "int32" + }, + { + "default": 2.0, + "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n", + "name": "p", + "optional": true, + "type": "int32" + }, + { + "default": "minkowski", + "description": "Metric to use for distance computation. Default is \"minkowski\", which\nresults in the standard Euclidean distance when p = 2. See the\ndocumentation of `scipy.spatial.distance\n`_ and\nthe metrics listed in\n:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric\nvalues.\n\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`, in which\ncase only \"nonzero\" elements may be considered neighbors.\n\nIf metric is a callable function, it takes two arrays representing 1D\nvectors as inputs and must return one value indicating the distance\nbetween those vectors. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string.\n\nIf metric is a DistanceMetric object, it will be passed directly to\nthe underlying computation routines.\n", + "name": "metric" + }, + { + "default": null, + "description": "Additional keyword arguments for the metric function.\n", + "name": "metric_params", + "optional": true + }, + { + "default": null, + "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method.\n", + "name": "n_jobs", + "optional": true, + "type": "int32" + } + ] + }, + { + "name": "sklearn.neural_network.multilayer_perceptron.MLPRegressor", + "description": "Multi-layer Perceptron regressor.\n\nThis model optimizes the squared-loss using LBFGS or stochastic gradient\ndescent.\n\n.. versionadded:: 0.18\n", + "attributes": [ + { + "default": "(100,)", + "description": "The ith element represents the number of neurons in the ith\nhidden layer.\n", + "name": "hidden_layer_sizes" + }, + { + "default": "relu", + "description": "Activation function for the hidden layer.\n\n- 'identity', no-op activation, useful to implement linear bottleneck,\nreturns f(x) = x\n\n- 'logistic', the logistic sigmoid function,\nreturns f(x) = 1 / (1 + exp(-x)).\n\n- 'tanh', the hyperbolic tan function,\nreturns f(x) = tanh(x).\n\n- 'relu', the rectified linear unit function,\nreturns f(x) = max(0, x)\n", + "name": "activation" + }, + { + "default": "adam", + "description": "The solver for weight optimization.\n\n- 'lbfgs' is an optimizer in the family of quasi-Newton methods.\n\n- 'sgd' refers to stochastic gradient descent.\n\n- 'adam' refers to a stochastic gradient-based optimizer proposed by\nKingma, Diederik, and Jimmy Ba\n\nNote: The default solver 'adam' works pretty well on relatively\nlarge datasets (with thousands of training samples or more) in terms of\nboth training time and validation score.\nFor small datasets, however, 'lbfgs' can converge faster and perform\nbetter.\n", + "name": "solver" + }, + { + "default": 0.0001, + "description": "L2 penalty (regularization term) parameter.\n", + "name": "alpha", + "type": "float32" + }, + { + "default": "auto", + "description": "Size of minibatches for stochastic optimizers.\nIf the solver is 'lbfgs', the classifier will not use minibatch.\nWhen set to \"auto\", `batch_size=min(200, n_samples)`\n", + "name": "batch_size", + "type": "int32" + }, + { + "default": "constant", + "description": "Learning rate schedule for weight updates.\n\n- 'constant' is a constant learning rate given by\n'learning_rate_init'.\n\n- 'invscaling' gradually decreases the learning rate ``learning_rate_``\nat each time step 't' using an inverse scaling exponent of 'power_t'.\neffective_learning_rate = learning_rate_init / pow(t, power_t)\n\n- 'adaptive' keeps the learning rate constant to\n'learning_rate_init' as long as training loss keeps decreasing.\nEach time two consecutive epochs fail to decrease training loss by at\nleast tol, or fail to increase validation score by at least tol if\n'early_stopping' is on, the current learning rate is divided by 5.\n\nOnly used when solver='sgd'.\n", + "name": "learning_rate" + }, + { + "default": "0.001", + "description": "The initial learning rate used. It controls the step-size\nin updating the weights. Only used when solver='sgd' or 'adam'.\n", + "name": "learning_rate_init" + }, + { + "default": "0.5", + "description": "The exponent for inverse scaling learning rate.\nIt is used in updating effective learning rate when the learning_rate\nis set to 'invscaling'. Only used when solver='sgd'.\n", + "name": "power_t" + }, + { + "default": 200, + "description": "Maximum number of iterations. The solver iterates until convergence\n(determined by 'tol') or this number of iterations. For stochastic\nsolvers ('sgd', 'adam'), note that this determines the number of epochs\n(how many times each data point will be used), not the number of\ngradient steps.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": true, + "description": "Whether to shuffle samples in each iteration. Only used when\nsolver='sgd' or 'adam'.\n", + "name": "shuffle", + "type": "boolean" + }, + { + "default": null, + "description": "Determines random number generation for weights and bias\ninitialization, train-test split if early stopping is used, and batch\nsampling when solver='sgd' or 'adam'.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": 0.0001, + "description": "Tolerance for the optimization. When the loss or score is not improving\nby at least ``tol`` for ``n_iter_no_change`` consecutive iterations,\nunless ``learning_rate`` is set to 'adaptive', convergence is\nconsidered to be reached and training stops.\n", + "name": "tol", + "type": "float32" + }, + { + "default": false, + "description": "Whether to print progress messages to stdout.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous\ncall to fit as initialization, otherwise, just erase the\nprevious solution. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": 0.9, + "description": "Momentum for gradient descent update. Should be between 0 and 1. Only\nused when solver='sgd'.\n", + "name": "momentum", + "type": "float32" + }, + { + "default": true, + "description": "Whether to use Nesterov's momentum. Only used when solver='sgd' and\nmomentum > 0.\n", + "name": "nesterovs_momentum", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to true, it will automatically set\naside 10% of training data as validation and terminate training when\nvalidation score is not improving by at least ``tol`` for\n``n_iter_no_change`` consecutive epochs.\nOnly effective when solver='sgd' or 'adam'\n", + "name": "early_stopping", + "type": "boolean" + }, + { + "default": 0.1, + "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True\n", + "name": "validation_fraction", + "type": "float32" + }, + { + "default": 0.9, + "description": "Exponential decay rate for estimates of first moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'\n", + "name": "beta_1", + "type": "float32" + }, + { + "default": 0.999, + "description": "Exponential decay rate for estimates of second moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'\n", + "name": "beta_2", + "type": "float32" + }, + { + "default": 1e-08, + "description": "Value for numerical stability in adam. Only used when solver='adam'\n", + "name": "epsilon", + "type": "float32" + }, + { + "default": 10, + "description": "Maximum number of epochs to not meet ``tol`` improvement.\nOnly effective when solver='sgd' or 'adam'\n\n.. versionadded:: 0.20\n", + "name": "n_iter_no_change", + "type": "int32" + }, + { + "default": 15000, + "description": "Only used when solver='lbfgs'. Maximum number of function calls.\nThe solver iterates until convergence (determined by 'tol'), number\nof iterations reaches max_iter, or this number of function calls.\nNote that number of function calls will be greater than or equal to\nthe number of iterations for the MLPRegressor.\n\n.. versionadded:: 0.22\n", + "name": "max_fun", + "type": "int32" + } + ] + }, + { + "name": "sklearn.pipeline.FeatureUnion", + "description": "Concatenates results of multiple transformer objects.\n\nThis estimator applies a list of transformer objects in parallel to the\ninput data, then concatenates the results. This is useful to combine\nseveral feature extraction mechanisms into a single transformer.\n\nParameters of the transformers may be set using its name and the parameter\nname separated by a '__'. A transformer may be replaced entirely by\nsetting the parameter with its name to another transformer, removed by\nsetting to 'drop' or disabled by setting to 'passthrough' (features are\npassed without transformation).\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.13\n", + "attributes": [ + { + "description": "List of transformer objects to be applied to the data. The first\nhalf of each tuple is the name of the transformer. The transformer can\nbe 'drop' for it to be ignored or can be 'passthrough' for features to\nbe passed unchanged.\n\n.. versionadded:: 1.1\nAdded the option `\"passthrough\"`.\n\n.. versionchanged:: 0.22\nDeprecated `None` as a transformer in favor of 'drop'.\n", + "name": "transformer_list" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n`n_jobs` default changed from 1 to None\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Multiplicative weights for features per transformer.\nKeys are transformer names, values the weights.\nRaises ValueError if key not present in ``transformer_list``.\n", + "name": "transformer_weights" + }, + { + "default": false, + "description": "If True, the time elapsed while fitting each transformer will be\nprinted as it is completed.\n", + "name": "verbose", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.preprocessing._data.StandardScaler", + "description": "Standardize features by removing the mean and scaling to unit variance.\n\nThe standard score of a sample `x` is calculated as:\n\nz = (x - u) / s\n\nwhere `u` is the mean of the training samples or zero if `with_mean=False`,\nand `s` is the standard deviation of the training samples or one if\n`with_std=False`.\n\nCentering and scaling happen independently on each feature by computing\nthe relevant statistics on the samples in the training set. Mean and\nstandard deviation are then stored to be used on later data using\n:meth:`transform`.\n\nStandardization of a dataset is a common requirement for many\nmachine learning estimators: they might behave badly if the\nindividual features do not more or less look like standard normally\ndistributed data (e.g. Gaussian with 0 mean and unit variance).\n\nFor instance many elements used in the objective function of\na learning algorithm (such as the RBF kernel of Support Vector\nMachines or the L1 and L2 regularizers of linear models) assume that\nall features are centered around 0 and have variance in the same\norder. If a feature has a variance that is orders of magnitude larger\nthan others, it might dominate the objective function and make the\nestimator unable to learn from other features correctly as expected.\n\n`StandardScaler` is sensitive to outliers, and the features may scale\ndifferently from each other in the presence of outliers. For an example\nvisualization, refer to :ref:`Compare StandardScaler with other scalers\n`.\n\nThis scaler can also be applied to sparse CSR or CSC matrices by passing\n`with_mean=False` to avoid breaking the sparsity structure of the data.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": true, + "description": "If False, try to avoid a copy and do inplace scaling instead.\nThis is not guaranteed to always work inplace; e.g. if the data is\nnot a NumPy array or scipy.sparse CSR matrix, a copy may still be\nreturned.\n", + "name": "copy", + "optional": true, + "type": "boolean" + }, + { + "default": true, + "description": "If True, center the data before scaling.\nThis does not work (and will raise an exception) when attempted on\nsparse matrices, because centering them entails building a dense\nmatrix which in common use cases is likely to be too large to fit in\nmemory.\n", + "name": "with_mean", + "type": "boolean" + }, + { + "default": true, + "description": "If True, scale the data to unit variance (or equivalently,\nunit standard deviation).\n", + "name": "with_std", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.preprocessing._encoders.OneHotEncoder", + "description": "\nEncode categorical features as a one-hot numeric array.\n\nThe input to this transformer should be an array-like of integers or\nstrings, denoting the values taken on by categorical (discrete) features.\nThe features are encoded using a one-hot (aka 'one-of-K' or 'dummy')\nencoding scheme. This creates a binary column for each category and\nreturns a sparse matrix or dense array (depending on the ``sparse_output``\nparameter).\n\nBy default, the encoder derives the categories based on the unique values\nin each feature. Alternatively, you can also specify the `categories`\nmanually.\n\nThis encoding is needed for feeding categorical data to many scikit-learn\nestimators, notably linear models and SVMs with the standard kernels.\n\nNote: a one-hot encoding of y labels should use a LabelBinarizer\ninstead.\n\nRead more in the :ref:`User Guide `.\nFor a comparison of different encoders, refer to:\n:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`.\n", + "attributes": [ + { + "description": "Categories (unique values) per feature:\n\n- 'auto' : Determine categories automatically from the training data.\n- list : ``categories[i]`` holds the categories expected in the ith\ncolumn. The passed categories should not mix strings and numeric\nvalues within a single feature, and should be sorted in case of\nnumeric values.\n\nThe used categories can be found in the ``categories_`` attribute.\n\n.. versionadded:: 0.20\n", + "name": "categories", + "default": "auto" + }, + { + "description": "Specifies a methodology to use to drop one of the categories per\nfeature. This is useful in situations where perfectly collinear\nfeatures cause problems, such as when feeding the resulting data\ninto an unregularized linear regression model.\n\nHowever, dropping one category breaks the symmetry of the original\nrepresentation and can therefore induce a bias in downstream models,\nfor instance for penalized linear classification or regression models.\n\n- None : retain all features (the default).\n- 'first' : drop the first category in each feature. If only one\ncategory is present, the feature will be dropped entirely.\n- 'if_binary' : drop the first category in each feature with two\ncategories. Features with 1 or more than 2 categories are\nleft intact.\n- array : ``drop[i]`` is the category in feature ``X[:, i]`` that\nshould be dropped.\n\nWhen `max_categories` or `min_frequency` is configured to group\ninfrequent categories, the dropping behavior is handled after the\ngrouping.\n\n.. versionadded:: 0.21\nThe parameter `drop` was added in 0.21.\n\n.. versionchanged:: 0.23\nThe option `drop='if_binary'` was added in 0.23.\n\n.. versionchanged:: 1.1\nSupport for dropping infrequent categories.\n", + "name": "drop", + "default": null + }, + { + "default": true, + "description": "Will return sparse matrix if set True else will return an array.\n\n.. deprecated:: 1.2\n`sparse` is deprecated in 1.2 and will be removed in 1.4. Use\n`sparse_output` instead.\n", + "name": "sparse", + "type": "boolean" + }, + { + "default": "np.float64", + "description": "Desired dtype of output.\n", + "name": "dtype" + }, + { + "default": "error", + "description": "Specifies the way unknown categories are handled during :meth:`transform`.\n\n- 'error' : Raise an error if an unknown category is present during transform.\n- 'ignore' : When an unknown category is encountered during\ntransform, the resulting one-hot encoded columns for this feature\nwill be all zeros. In the inverse transform, an unknown category\nwill be denoted as None.\n- 'infrequent_if_exist' : When an unknown category is encountered\nduring transform, the resulting one-hot encoded columns for this\nfeature will map to the infrequent category if it exists. The\ninfrequent category will be mapped to the last position in the\nencoding. During inverse transform, an unknown category will be\nmapped to the category denoted `'infrequent'` if it exists. If the\n`'infrequent'` category does not exist, then :meth:`transform` and\n:meth:`inverse_transform` will handle an unknown category as with\n`handle_unknown='ignore'`. Infrequent categories exist based on\n`min_frequency` and `max_categories`. Read more in the\n:ref:`User Guide `.\n\n.. versionchanged:: 1.1\n`'infrequent_if_exist'` was added to automatically handle unknown\ncategories and infrequent categories.\n", + "name": "handle_unknown" + }, + { + "name": "min_frequency", + "description": "Specifies the minimum frequency below which a category will be\nconsidered infrequent.\n\n- If `int`, categories with a smaller cardinality will be considered\ninfrequent.\n\n- If `float`, categories with a smaller cardinality than\n`min_frequency * n_samples` will be considered infrequent.\n\n.. versionadded:: 1.1\nRead more in the :ref:`User Guide `.\n", + "default": null + }, + { + "name": "max_categories", + "description": "Specifies an upper limit to the number of output features for each input\nfeature when considering infrequent categories. If there are infrequent\ncategories, `max_categories` includes the category representing the\ninfrequent categories along with the frequent categories. If `None`,\nthere is no limit to the number of output features.\n\n.. versionadded:: 1.1\nRead more in the :ref:`User Guide `.\n", + "type": "int32", + "default": null + }, + { + "name": "sparse_output", + "default": true, + "description": "When ``True``, it returns a :class:`scipy.sparse.csr_matrix`,\ni.e. a sparse matrix in \"Compressed Sparse Row\" (CSR) format.\n\n.. versionadded:: 1.2\n`sparse` was renamed to `sparse_output`\n" + }, + { + "name": "feature_name_combiner", + "default": "\"concat\"", + "description": "Callable with signature `def callable(input_feature, category)` that returns a\nstring. This is used to create feature names to be returned by\n:meth:`get_feature_names_out`.\n\n`\"concat\"` concatenates encoded feature name and category with\n`feature + \"_\" + str(category)`.E.g. feature X with values 1, 6, 7 create\nfeature names `X_1, X_6, X_7`.\n\n.. versionadded:: 1.3\n" + } + ] + }, + { + "name": "sklearn.preprocessing.Binarizer", + "description": "Binarize data (set feature values to 0 or 1) according to a threshold.\n\nValues greater than the threshold map to 1, while values less than\nor equal to the threshold map to 0. With the default threshold of 0,\nonly positive values map to 1.\n\nBinarization is a common operation on text count data where the\nanalyst can decide to only consider the presence or absence of a\nfeature rather than a quantified number of occurrences for instance.\n\nIt can also be used as a pre-processing step for estimators that\nconsider boolean random variables (e.g. modelled using the Bernoulli\ndistribution in a Bayesian setting).\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": true, + "description": "Set to False to perform inplace binarization and avoid a copy (if\nthe input is already a numpy array or a scipy.sparse CSR matrix).\n", + "name": "copy", + "optional": true, + "type": "boolean" + }, + { + "default": 0.0, + "description": "Feature values below or equal to this are replaced by 0, above it by 1.\nThreshold may not be less than 0 for operations on sparse matrices.\n", + "name": "threshold", + "optional": true, + "type": "float32" + } + ] + }, + { + "name": "sklearn.preprocessing.LabelEncoder", + "description": "Encode target labels with value between 0 and n_classes-1.\n\nThis transformer should be used to encode target values, *i.e.* `y`, and\nnot the input `X`.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.12\n" + }, + { + "name": "sklearn.preprocessing.MultiLabelBinarizer", + "description": "Transform between iterable of iterables and a multilabel format.\n\nAlthough a list of sets or tuples is a very intuitive format for multilabel\ndata, it is unwieldy to process. This transformer converts between this\nintuitive format and the supported multilabel format: a (samples x classes)\nbinary matrix indicating the presence of a class label.\n", + "attributes": [ + { + "default": null, + "description": "Indicates an ordering for the class labels.\nAll entries should be unique (cannot contain duplicate classes).\n", + "name": "classes", + "optional": true + }, + { + "default": false, + "description": "Set to True if output binary array is desired in CSR sparse format.\n", + "name": "sparse_output", + "type": "boolean" + } + ] + }, + { + "name": "sklearn.svm.classes.SVC", + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`sklearn.svm.LinearSVC` or\n:class:`sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`sklearn.kernel_approximation.Nystroem` transformer.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 1, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm.\nIt must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\na callable.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``.\n", + "name": "kernel" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels.\n", + "name": "degree", + "type": "int32" + }, + { + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma" + }, + { + "default": 0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "type": "float32" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "type": "float32" + }, + { + "default": 200, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n", + "name": "class_weight" + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "type": "boolean" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state" + } + ] + }, + { + "name": "sklearn.svm.SVC", + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`~sklearn.svm.LinearSVC` or\n:class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`~sklearn.kernel_approximation.Nystroem` transformer or\nother :ref:`kernel_approximation`.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 1.0, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "optional": true, + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm. If\nnone is given, 'rbf' will be used. If a callable is given it is used to\npre-compute the kernel matrix from data matrices; that matrix should be\nan array of shape ``(n_samples, n_samples)``. For an intuitive\nvisualization of different kernel types see\n:ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.\n", + "name": "kernel", + "optional": true, + "type": "string" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nMust be non-negative. Ignored by all other kernels.\n", + "name": "degree", + "optional": true, + "type": "int32" + }, + { + "default": "scale", + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features\n- if float, must be non-negative.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma", + "optional": true, + "type": "float32" + }, + { + "default": 0.0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "optional": true, + "type": "float32" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "optional": true, + "type": "boolean" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "optional": true, + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "default": 200.0, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "optional": true, + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n", + "name": "class_weight", + "optional": true + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "optional": true, + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, note that\ninternally, one-vs-one ('ovo') is always used as a multi-class strategy\nto train models; an ovr matrix is only constructed from the ovo matrix.\nThe parameter is ignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "optional": true, + "type": "int32" + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "optional": true, + "type": "boolean" + } + ] + }, + { + "name": "sklearn.svm.SVC", + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`~sklearn.svm.LinearSVC` or\n:class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`~sklearn.kernel_approximation.Nystroem` transformer or\nother :ref:`kernel_approximation`.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": 1.0, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "optional": true, + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm. If\nnone is given, 'rbf' will be used. If a callable is given it is used to\npre-compute the kernel matrix from data matrices; that matrix should be\nan array of shape ``(n_samples, n_samples)``. For an intuitive\nvisualization of different kernel types see\n:ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.\n", + "name": "kernel", + "optional": true, + "type": "string" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nMust be non-negative. Ignored by all other kernels.\n", + "name": "degree", + "optional": true, + "type": "int32" + }, + { + "default": "scale", + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features\n- if float, must be non-negative.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma", + "optional": true, + "type": "float32" + }, + { + "default": 0.0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "optional": true, + "type": "float32" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "optional": true, + "type": "boolean" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "optional": true, + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "optional": true, + "type": "float32" + }, + { + "default": 200.0, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "optional": true, + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n", + "name": "class_weight", + "optional": true + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "optional": true, + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, note that\ninternally, one-vs-one ('ovo') is always used as a multi-class strategy\nto train models; an ovr matrix is only constructed from the ovo matrix.\nThe parameter is ignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "optional": true + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "optional": true, + "type": "boolean" + } + ] + }, + { + "name": "sklearn.tree.tree.DecisionTreeClassifier", + "description": "A decision tree classifier.\n\nRead more in the :ref:`User Guide `.\n", + "attributes": [ + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\n", + "name": "criterion" + }, + { + "default": "\"best\"", + "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split.\n", + "name": "splitter" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": null, + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Controls the randomness of the estimator. The features are always\nrandomly permuted at each split, even if ``splitter`` is set to\n``\"best\"``. When ``max_features < n_features``, the algorithm will\nselect ``max_features`` at random at each split before finding the best\nsplit among them. But the best found split may vary across different\nruns, even if ``max_features=n_features``. That is the case, if the\nimprovement of the criterion is identical for several splits and one\nsplit has to be selected at random. To obtain a deterministic behaviour\nduring fitting, ``random_state`` has to be fixed to an integer.\nSee :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": null, + "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": 0, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf None, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "deprecated", + "description": "This parameter is deprecated and will be removed in v0.24.\n\n.. deprecated:: 0.22\n", + "name": "presort" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + } + ] + } +] \ No newline at end of file diff --git a/sklearn.js b/sklearn.js new file mode 100644 index 00000000000..5f44c5801f5 --- /dev/null +++ b/sklearn.js @@ -0,0 +1,351 @@ + +// Experimental + +const sklearn = {}; + +sklearn.ModelFactory = class { + + match(context) { + const obj = context.peek('pkl'); + const validate = (obj, name) => { + if (obj && obj.__class__ && obj.__class__.__module__ && obj.__class__.__name__) { + const key = `${obj.__class__.__module__}.${obj.__class__.__name__}`; + return key.startsWith(name); + } + return false; + }; + const formats = [ + { name: 'sklearn.', format: 'sklearn' }, + { name: 'xgboost.sklearn.', format: 'sklearn' }, + { name: 'lightgbm.sklearn.', format: 'sklearn' }, + { name: 'scipy.', format: 'scipy' }, + { name: 'hmmlearn.', format: 'hmmlearn' } + ]; + for (const format of formats) { + if (validate(obj, format.name)) { + return format.format; + } + if (Array.isArray(obj) && obj.length > 0 && obj.every((item) => validate(item, format.name))) { + return `${format.format}.list`; + } + if (Object(obj) === obj) { + const entries = Object.entries(obj); + if (entries.length > 0 && entries.every(([, value]) => validate(value, format.name))) { + return `${format.format}.map`; + } + } + } + return null; + } + + async open(context, target) { + const metadata = await context.metadata('sklearn-metadata.json'); + const obj = context.peek('pkl'); + return new sklearn.Model(metadata, target, obj); + } +}; + +sklearn.Model = class { + + constructor(metadata, target, obj) { + const formats = new Map([ + [ 'sklearn', 'scikit-learn' ], + [ 'scipy', 'SciPy' ], + [ 'hmmlearn', 'hmmlearn' ] + ]); + this.format = formats.get(target.split('.').shift()); + this.graphs = []; + const version = []; + switch (target) { + case 'sklearn': + case 'scipy': + case 'hmmlearn': { + if (obj._sklearn_version) { + version.push(` v${obj._sklearn_version}`); + } + this.graphs.push(new sklearn.Graph(metadata, '', obj)); + break; + } + case 'sklearn.list': + case 'scipy.list': { + const list = obj; + for (let i = 0; i < list.length; i++) { + const obj = list[i]; + this.graphs.push(new sklearn.Graph(metadata, i.toString(), obj)); + if (obj._sklearn_version) { + version.push(` v${obj._sklearn_version}`); + } + } + break; + } + case 'sklearn.map': + case 'scipy.map': { + for (const [name, value] of Object.entries(obj)) { + this.graphs.push(new sklearn.Graph(metadata, name, value)); + if (value._sklearn_version) { + version.push(` v${value._sklearn_version}`); + } + } + break; + } + default: { + throw new sklearn.Error(`Unsupported scikit-learn format '${target}'.`); + } + } + if (version.length > 0 && version.every((value) => value === version[0])) { + this.format += version[0]; + } + } +}; + +sklearn.Graph = class { + + constructor(metadata, name, obj) { + this.name = name || ''; + this.nodes = []; + this.inputs = []; + this.outputs = []; + this.groups = false; + const values = new Map(); + values.map = (name) => { + if (!values.has(name)) { + values.set(name, new sklearn.Value(name, null, null)); + } + return values.get(name); + }; + const concat = (parent, name) => { + return (parent === '' ? name : `${parent}/${name}`); + }; + const process = (group, name, obj, inputs) => { + const type = `${obj.__class__.__module__}.${obj.__class__.__name__}`; + switch (type) { + case 'sklearn.pipeline.Pipeline': { + this.groups = true; + name = name || 'pipeline'; + const childGroup = concat(group, name); + for (const step of obj.steps) { + inputs = process(childGroup, step[0], step[1], inputs); + } + return inputs; + } + case 'sklearn.pipeline.FeatureUnion': { + this.groups = true; + const outputs = []; + name = name || 'union'; + const output = concat(group, name); + const subgroup = concat(group, name); + const node = new sklearn.Node(metadata, subgroup, output, obj, inputs, [ output ], values); + this.nodes.push(node); + for (const transformer of obj.transformer_list) { + outputs.push(...process(subgroup, transformer[0], transformer[1], [ output ])); + } + return outputs; + } + case 'sklearn.compose._column_transformer.ColumnTransformer': { + this.groups = true; + name = name || 'transformer'; + const output = concat(group, name); + const subgroup = concat(group, name); + const outputs = []; + const node = new sklearn.Node(metadata, subgroup, output, obj, inputs, [ output ], values); + this.nodes.push(node); + for (const transformer of obj.transformers) { + if (transformer[1] !== 'passthrough') { + outputs.push(...process(subgroup, transformer[0], transformer[1], [ output ])); + } + } + return outputs; + } + default: { + const output = concat(group, name); + const node = new sklearn.Node(metadata, group, output, obj, inputs, output === '' ? [] : [ output ], values); + this.nodes.push(node); + return [ output ]; + } + } + }; + process('', '', obj, []); + } +}; + +sklearn.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +sklearn.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new sklearn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this._type = type || null; + this.initializer = initializer || null; + } + + get type() { + if (this.initializer) { + return this.initializer.type; + } + return this._type; + } +}; + +sklearn.Node = class { + + constructor(metadata, group, name, obj, inputs, outputs, values, stack) { + this.group = group || null; + this.name = name || ''; + const type = obj.__class__ ? `${obj.__class__.__module__}.${obj.__class__.__name__}` : 'builtins.dict'; + this.type = metadata.type(type) || { name: type }; + this.inputs = inputs.map((input) => new sklearn.Argument(input, [ values.map(input) ])); + this.outputs = outputs.map((output) => new sklearn.Argument(output, [ values.map(output) ])); + this.attributes = []; + const isArray = (obj) => { + return obj && obj.__class__ && + obj.__class__.__module__ === 'numpy' && obj.__class__.__name__ === 'ndarray'; + }; + const isObject = (obj) => { + if (obj && typeof obj === 'object') { + const proto = Object.getPrototypeOf(obj); + return proto === Object.prototype || proto === null; + } + return false; + }; + const entries = Object.entries(obj); + for (const [name, value] of entries) { + if (name === '__class__') { + continue; + } else if (value && isArray(value)) { + const tensor = new sklearn.Tensor(value); + const attribute = new sklearn.Attribute(name, tensor, 'tensor'); + this.attributes.push(attribute); + } else if (Array.isArray(value) && value.length > 0 && value.every((obj) => isArray(obj))) { + const tensors = value.map((obj) => new sklearn.Tensor(obj)); + const attribute = new sklearn.Attribute(name, tensors, 'tensor[]'); + this.attributes.push(attribute); + } else { + stack = stack || new Set(); + if (value && Array.isArray(value) && value.every((obj) => typeof obj === 'string')) { + const attribute = new sklearn.Attribute(name, value, 'string[]'); + this.attributes.push(attribute); + } else if (value && Array.isArray(value) && value.every((obj) => typeof obj === 'number')) { + const attribute = new sklearn.Attribute(name, value); + this.attributes.push(attribute); + } else if (value && value.__class__ && value.__class__.__module__ === 'builtins' && (value.__class__.__name__ === 'function' || value.__class__.__name__ === 'type')) { + const obj = {}; + obj.__class__ = value; + const node = new sklearn.Node(metadata, group, '', obj, [], [], null, stack); + const attribute = new sklearn.Attribute(name, node, 'object'); + this.attributes.push(attribute); + } else if (value && Array.isArray(value) && value.length > 0 && value.every((obj) => obj && (obj.__class__ || obj === Object(obj)))) { + const values = value.filter((value) => !stack.has(value)); + const nodes = values.map((value) => { + stack.add(value); + const node = new sklearn.Node(metadata, group, '', value, [], [], null, stack); + stack.delete(value); + return node; + }); + const attribute = new sklearn.Attribute(name, nodes, 'object[]'); + this.attributes.push(attribute); + } else if (value && (value.__class__ || isObject(value))) { + if (!stack.has(value)) { + stack.add(value); + const node = new sklearn.Node(metadata, group, '', value, [], [], null, stack); + const attribute = new sklearn.Attribute(name, node, 'object'); + this.attributes.push(attribute); + stack.delete(value); + } + } else { + const schema = metadata.attribute(type, name); + if (schema) { + let type = undefined; + let visible = undefined; + if (schema.type) { + type = schema.type; + } + if (schema.visible === false || (schema.optional && value == null)) { + visible = false; + } else if (schema.default !== undefined) { + if (Array.isArray(value)) { + if (Array.isArray(schema.default)) { + visible = value.length !== schema.default || !value.every((item, index) => item == metadata.default[index]); + } else { + visible = !value.every((item) => item == schema.default); + } + } else { + visible = value !== schema.default; + } + } + const attribute = new sklearn.Attribute(name, value, type, visible); + this.attributes.push(attribute); + } else { + const attribute = new sklearn.Attribute(name, value); + this.attributes.push(attribute); + } + } + } + } + } +}; + +sklearn.Attribute = class { + + constructor(name, value, type, visible) { + this.name = name; + this.value = value; + if (type) { + this.type = type; + } + if (visible === false) { + this.visible = visible; + } + } +}; + +sklearn.Tensor = class { + + constructor(array) { + this.type = new sklearn.TensorType(array.dtype.__name__, new sklearn.TensorShape(array.shape)); + this.stride = array.strides.map((stride) => stride / array.itemsize); + this.encoding = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.tolist() : array.tobytes(); + } +}; + +sklearn.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +sklearn.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : ''; + } +}; + +sklearn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading scikit-learn model.'; + } +}; + +export const ModelFactory = sklearn.ModelFactory; diff --git a/tar.js b/tar.js new file mode 100644 index 00000000000..b35d2145eb1 --- /dev/null +++ b/tar.js @@ -0,0 +1,165 @@ + +const tar = {}; + +tar.Archive = class { + + static open(data) { + const stream = data instanceof Uint8Array ? new tar.BinaryReader(data) : data; + if (stream && stream.length > 512) { + const buffer = stream.peek(512); + const sum = buffer.map((value, index) => (index >= 148 && index < 156) ? 32 : value).reduce((a, b) => a + b, 0); + const checksum = parseInt(Array.from(buffer.slice(148, 156)).map((c) => String.fromCharCode(c)).join('').split('\0').shift(), 8); + if (!isNaN(checksum) && sum === checksum) { + return new tar.Archive(stream); + } + } + return null; + } + + constructor(stream) { + this._entries = new Map(); + const position = stream.position; + while (stream.position < stream.length) { + const entry = new tar.Entry(stream); + if (entry.type === '0' || entry.type === '1' || entry.type === '2') { + this._entries.set(entry.name, entry.stream); + } + if (stream.position + 512 > stream.length || + stream.peek(512).every((value) => value === 0x00)) { + break; + } + } + stream.seek(position); + } + + get entries() { + return this._entries; + } +}; + +tar.Entry = class { + + constructor(stream) { + const buffer = stream.read(512); + const reader = new tar.BinaryReader(buffer); + const sum = buffer.map((value, index) => (index >= 148 && index < 156) ? 32 : value).reduce((a, b) => a + b, 0); + let checksum = ''; + for (let i = 148; i < 156 && buffer[i] !== 0x00; i++) { + checksum += String.fromCharCode(buffer[i]); + } + checksum = parseInt(checksum, 8); + if (isNaN(checksum) || sum !== checksum) { + throw new tar.Error('Invalid tar archive.'); + } + this._name = reader.string(100); + reader.string(8); // file mode + reader.string(8); // owner + reader.string(8); // group + const size = parseInt(reader.string(12).trim(), 8); + reader.string(12); // timestamp + reader.string(8); // checksum + this._type = reader.string(1); + reader.string(100); // name of linked file + if (reader.string(6) === 'ustar') { + reader.string(2); // ustar version + reader.string(32); // owner user name + reader.string(32); // owner group name + reader.string(8); // device major number + reader.string(8); // device number number + this._name = reader.string(155) + this._name; + } + this._stream = stream.stream(size); + stream.read(((size % 512) != 0) ? (512 - (size % 512)) : 0); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get stream() { + return this._stream; + } +}; + +tar.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._length = buffer.length; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + create(buffer) { + return new tar.BinaryReader(buffer); + } + + stream(length) { + return this.create(this.read(length)); + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + this._position += offset; + } + + peek(length) { + if (this._position === 0 && length === undefined) { + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + const end = this._position; + this.seek(position); + return this._buffer.subarray(position, end); + } + + read(length) { + if (this._position === 0 && length === undefined) { + this._position = this._length; + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + return this._buffer.subarray(position, this._position); + } + + string(length) { + const buffer = this.read(length); + let position = 0; + let content = ''; + for (let i = 0; i < length; i++) { + const c = buffer[position++]; + if (c === 0) { + break; + } + content += String.fromCharCode(c); + } + return content; + } +}; + +tar.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'tar Error'; + } +}; + +export const Archive = tar.Archive; \ No newline at end of file diff --git a/tengine-metadata.json b/tengine-metadata.json new file mode 100755 index 00000000000..06831be0136 --- /dev/null +++ b/tengine-metadata.json @@ -0,0 +1,933 @@ +[ + { + "name": "Absval", + "category": "Data" + }, + { + "name": "Accuracy" + }, + { + "name": "Addn", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "ArgMax", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "ArgMin", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "BatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "rescale_factor", "type": "float32", "default": 1 }, + { "name": "eps", "type": "float32", "default": 0.00001 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "var" } + ] + }, + { + "name": "BatchToSpaceND", + "category": "Shape", + "attributes": [ + { "name": "dilation_x", "type": "int32", "default": 0 }, + { "name": "dilation_y", "type": "int32", "default": 0 }, + { "name": "crop_top", "type": "int32", "default": 0 }, + { "name": "crop_bottom", "type": "int32", "default": 0 }, + { "name": "crop_left", "type": "int32", "default": 0 }, + { "name": "crop_right", "type": "int32", "default": 0 } + ] + }, + { + "name": "Bias", + "category": "Layer", + "attributes": [ + { "name": "bias_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "BilinearResize", + "category": "Shape", + "attributes": [ + { "name": "scale_x", "type": "float32", "default": 0 }, + { "name": "scale_y", "type": "float32", "default": 0 }, + { "name": "type", "type": "int32", "default": 0 } + ] + }, + { + "name": "BroadMul", + "category": "Layer" + }, + { + "name": "Cast", + "attributes": [ + { "name": "type_from", "type": "int32", "default": 0 }, + { "name": "type_to", "type": "int32", "default": 0 } + ] + }, + { + "name": "Ceil", + "category": "Layer" + }, + { + "name": "Clip", + "category": "Layer", + "attributes": [ + { "name": "max", "type": "float32", "default": 0 }, + { "name": "min", "type": "float32", "default": 0 } + ] + }, + { + "name": "Comparison", + "category": "Layer", + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + }, + { + "name": "Concat", + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 1 } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + }, + { + "name": "Const" + }, + { + "name": "Convolution", + "category": "Layer", + "attributes": [ + { "name": "kernel_h", "type": "int32", "default": 1 }, + { "name": "kernel_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "dilation_h", "type": "int32", "default": 1, "visible": false }, + { "name": "dilation_w", "type": "int32", "default": 1, "visible": false }, + { "name": "input_channel", "type": "int32", "default": 1 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "group", "type": "int32", "default": 1, "visible": false }, + { "name": "activation", "type": "int32", "default": -1 }, + { "name": "pad_h0", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_w0", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h1", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_w1", "type": "int32", "default": 0, "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "Crop", + "category": "Shape", + "attributes": [ + { "name": "num_args", "type": "int32", "default": 0 }, + { "name": "offset_c", "type": "int32", "default": 0 }, + { "name": "offset_h", "type": "int32", "default": 0 }, + { "name": "offset_w", "type": "int32", "default": 0 }, + { "name": "crop_h", "type": "int32", "default": 0 }, + { "name": "crop_w", "type": "int32", "default": 0 }, + { "name": "center_crop", "type": "bool", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "flag", "type": "int32", "default": 0 } + ] + }, + { + "name": "Deconvolution", + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 1 }, + { "name": "kernel_h", "type": "int32", "default": 1 }, + { "name": "kernel_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_w0", "type": "int32", "default": 0 }, + { "name": "pad_h0", "type": "int32", "default": 0 }, + { "name": "pad_w1", "type": "int32", "default": 0 }, + { "name": "pad_h1", "type": "int32", "default": 0 }, + { "name": "dilation_h", "type": "int32", "default": 1 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "group", "type": "int32", "default": 1 }, + { "name": "activation", "type": "int32", "default": -1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "DepthToSpace", + "category": "Shape", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "DetectionOutput", + "category": "Layer", + "attributes": [ + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "keep_top_k", "type": "int32", "default": 0 }, + { "name": "nms_top_k", "type": "int32", "default": 0 }, + { "name": "confidence_threshold", "type": "float32", "default": 0 }, + { "name": "nms_threshold", "type": "float32", "default": 0 } + ] + }, + { + "name": "DetectionPostProcess", + "category": "Layer", + "attributes": [ + { "name": "max_detections", "type": "int32", "default": 0 }, + { "name": "max_classes_per_detection", "type": "int32", "default": 0 }, + { "name": "nms_score_threshold", "type": "float32", "default": 0 }, + { "name": "nms_iou_threshold", "type": "float32", "default": 0 }, + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "scales", "type": "float32[]", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "score" }, + { "name": "anchor" } + ], + "outputs": [ + { "name": "detect_boxes" }, + { "name": "detect_classes" }, + { "name": "detect_scores" }, + { "name": "detect_num" } + ] + }, + { + "name": "DropOut", + "category": "Dropout" + }, + { + "name": "Eltwise", + "attributes": [ + { "name": "type", "type": "uint32", "default": 0 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + }, + { + "name": "ELU", + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 } + ] + }, + { + "name": "Embed", + "category": "Transform", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "input_dim", "type": "int32", "default": 0 }, + { "name": "bias_term", "type": "int32", "default": 0 }, + { "name": "weight_data_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "Expand", + "category": "Layer", + "attributes": [ + { "name": "v_shape[]", "type": "int32[]", "default": [] } + ] + }, + { + "name": "Expanddims", + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Flatten", + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "end_axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "FullyConnected", + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 10 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + }, + { + "name": "FusedbnScaleRelu", + "category": "Activation" + }, + { + "name": "Gather", + "category": "Transform", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "indices_num", "type": "int32", "default": 0 } + ] + }, + { + "name": "Gemm", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 }, + { "name": "transA", "type": "int32", "default": 0 }, + { "name": "transB", "type": "int32", "default": 0 } + ] + }, + { + "name": "Generic", + "attributes": [ + { "name": "max_input_num", "type": "int32", "default": 0 }, + { "name": "max_output_num", "type": "int32", "default": 0 }, + { "name": "opname", "type": "string", "default": "" } + ] + }, + { + "name": "GRU", + "category": "Layer", + "attributes": [ + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_gate_bias", "type": "int32", "default": 0 }, + { "name": "has_candidate_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "mxnet_flag", "type": "int32", "default": 0 } + ] + }, + { + "name": "Hardsigmoid", + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "HardSwish", + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "InstanceNorm", + "category": "Normalization", + "attributes": [ + { "name": "eps", "type": "float32", "default": 0 } + ] + }, + { + "name": "Interp", + "category": "Layer", + "attributes": [ + { "name": "resize_type", "type": "int32", "default": 0 }, + { "name": "width_scale", "type": "float32", "default": 0 }, + { "name": "height_scale", "type": "float32", "default": 0 }, + { "name": "output_width", "type": "int32", "default": 0 }, + { "name": "output_height", "type": "int32", "default": 0 } + ] + }, + { + "name": "L2Normalization", + "category": "Layer" + }, + { + "name": "L2Pool", + "category": "Layer" + }, + { + "name": "Logical", + "category": "Layer", + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + }, + { + "name": "Logistic", + "category": "Activation" + }, + { + "name": "LogSoftmax", + "category": "Layer" + }, + { + "name": "LRN", + "category": "Normalization", + "attributes": [ + { "name": "local_size", "type": "int32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 }, + { "name": "norm_region", "type": "int32", "default": 0 }, + { "name": "k", "type": "float32", "default": 0 } + ] + }, + { + "name": "LSTM", + "category": "Layer", + "attributes": [ + { "name": "forget_bias", "type": "float32", "default": 0 }, + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "cell_size", "type": "int32", "default": 0 }, + { "name": "has_peephole", "type": "int32", "default": 0 }, + { "name": "has_projection", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "forget_act", "type": "int32", "default": 0 }, + { "name": "input_act", "type": "int32", "default": 0 }, + { "name": "output_act", "type": "int32", "default": 0 }, + { "name": "cellin_act", "type": "int32", "default": 0 }, + { "name": "cellout_act", "type": "int32", "default": 0 }, + { "name": "mxnet_flag", "type": "int32", "default": 0 } + ] + }, + { + "name": "MatMul", + "category": "Layer" + }, + { + "name": "Max", + "category": "Layer" + }, + { + "name": "Mean", + "category": "Layer" + }, + { + "name": "Min", + "category": "Layer" + }, + { + "name": "Mish", + "category": "Activation" + }, + { + "name": "MVN", + "attributes": [ + { "name": "across_channels", "type": "int32", "default": 0 }, + { "name": "normalize_variance", "type": "int32", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0 } + ] + }, + { + "name": "Noop", + "category": "Layer" + }, + { + "name": "Normalize", + "category": "Normalization", + "attributes": [ + { "name": "across_spatial", "type": "int32", "default": 0 }, + { "name": "channel_shared", "type": "int32", "default": 0 } + ] + }, + { + "name": "Num", + "category": "Layer" + }, + + { + "name": "PackModel", + "category": "Layer", + "attributes": [ + { "name": "buffer size", "type": "int32", "default": 1 }, + { "name": "model version", "type": "int32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "Source Model" } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "attributes": [ + { "name": "pad_n_0", "type": "int32", "default": -1 }, + { "name": "pad_n_1", "type": "int32", "default": -1 }, + { "name": "pad_c_0", "type": "int32", "default": -1 }, + { "name": "pad_c_1", "type": "int32", "default": -1 }, + { "name": "pad_h_0", "type": "int32", "default": -1 }, + { "name": "pad_h_1", "type": "int32", "default": -1 }, + { "name": "pad_w_0", "type": "int32", "default": -1 }, + { "name": "pad_w_1", "type": "int32", "default": -1 }, + { "name": "mode", "type": "int32", "default": 0 }, + { "name": "value", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ] + }, + { + "name": "Permute", + "category": "Shape", + "attributes": [ + { "name": "flag", "type": "int32", "default": 0 }, + { "name": "order0", "type": "int32", "default": 0 }, + { "name": "order1", "type": "int32", "default": 0 }, + { "name": "order2", "type": "int32", "default": 0 }, + { "name": "order3", "type": "int32", "default": 0 } + ] + }, + { + "name": "Pooling", + "category": "Pool", + "attributes": [ + { "name": "alg", "type": "int32", "default": 0 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "global", "type": "int32", "default": 0 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 }, + { "name": "pad_h0", "type": "int32", "default": 0 }, + { "name": "pad_w0", "type": "int32", "default": 0 }, + { "name": "pad_h1", "type": "int32", "default": 0 }, + { "name": "pad_w1", "type": "int32", "default": 0 } + ] + }, + { + "name": "Prelu", + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ] + }, + { + "name": "PriorBox", + "attributes": [ + { "name": "min_size", "type": "float32[]", "default": [] }, + { "name": "max_size", "type": "float32[]", "default": [] }, + { "name": "variance", "type": "float32[]", "default": [] }, + { "name": "aspect_ratio", "type": "float32[]", "default": [] }, + { "name": "flip", "type": "int32", "default": 0 }, + { "name": "clip", "type": "int32", "default": 0 }, + { "name": "img_size", "type": "int32", "default": 0 }, + { "name": "img_h", "type": "int32", "default": 0 }, + { "name": "img_w", "type": "int32", "default": 0 }, + { "name": "step_w", "type": "float32", "default": 0 }, + { "name": "step_h", "type": "float32", "default": 0 }, + { "name": "offset", "type": "float32", "default": 0 }, + { "name": "num_priors", "type": "int32", "default": 0 }, + { "name": "out_dim", "type": "int32", "default": 0 } + ] + }, + { + "name": "Psroipooling", + "category": "Pool", + "attributes": [ + { "name": "pooled_w", "type": "int32", "default": 0 }, + { "name": "pooled_h", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 }, + { "name": "output_dim", "type": "int32", "default": 0 } + ] + }, + { + "name": "ReduceL2", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "keepdim", "type": "int32", "default": 0 } + ] + }, + { + "name": "Reduction", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": -2 }, + { "name": "dim_1", "type": "int32", "default": -2 }, + { "name": "dim_2", "type": "int32", "default": -2 }, + { "name": "dim_3", "type": "int32", "default": -2 }, + { "name": "type", "type": "int32", "default": 0 }, + { "name": "keepdim", "type": "int32", "default": 0 } + ] + }, + { + "name": "Region", + "attributes": [ + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "side", "type": "int32", "default": 0 }, + { "name": "num_box", "type": "int32", "default": 0 }, + { "name": "coords", "type": "int32", "default": 0 }, + { "name": "confidence_threshold", "type": "float32", "default": 0 }, + { "name": "nms_threshold", "type": "float32", "default": 0 }, + { "name": "biases", "type": "float32[]", "default": [] } + ] + }, + { + "name": "ReLU", + "category": "Activation", + "attributes": [ + { "name": "negative_slope", "type": "float32", "default": 0 } + ] + }, + { + "name": "Relu1", + "category": "Layer" + }, + { + "name": "ReLU6", + "category": "Activation" + }, + { + "name": "Reorg", + "category": "Shape", + "attributes": [ + { "name": "stride", "type": "int32", "default": 0 } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "attributes": [ + { "name": "is_mxnet", "type": "int32", "default": 0 }, + { "name": "reverse", "type": "int32", "default": 0 }, + { "name": "shape", "type": "int32[]", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 }, + { "name": "dim_2", "type": "int32", "default": 0 }, + { "name": "dim_3", "type": "int32", "default": 0 }, + { "name": "dim_size", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + }, + { + "name": "Reshape", + "version": 2, + "category": "Shape", + "attributes": [ + { "name": "is_mxnet", "type": "int32", "default": 0 }, + { "name": "reverse", "type": "int32", "default": 0 }, + { "name": "shape", "type": "int32[]", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + }, + { + "name": "Resize", + "category": "Data", + "attributes": [ + { "name": "scale_x", "type": "float32", "default": 0 }, + { "name": "scale_y", "type": "float32", "default": 0 }, + { "name": "type", "type": "int32", "default": 0 } + ] + }, + { + "name": "Reverse", + "category": "Shape" + }, + { + "name": "RNN", + "category": "Layer", + "attributes": [ + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "activation", "type": "int32", "default": 0 } + ] + }, + { + "name": "ROIAlign", + "attributes": [ + { "name": "pooled_width", "type": "int32", "default": 0 }, + { "name": "pooled_height", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 } + ] + }, + { + "name": "RoiPooling", + "category": "Pool", + "attributes": [ + { "name": "pooled_h", "type": "int32", "default": 0 }, + { "name": "pooled_w", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 } + ] + }, + { + "name": "Round", + "category": "Layer" + }, + { + "name": "RPN", + "attributes": [ + { "name": "ratios", "type": "float32[]", "default": [] }, + { "name": "anchor_scales", "type": "float32[]", "default": [] }, + { "name": "feat_stride", "type": "int32", "default": 0 }, + { "name": "basesize", "type": "int32", "default": 0 }, + { "name": "min_size", "type": "int32", "default": 0 }, + { "name": "per_nms_topn", "type": "int32", "default": 0 }, + { "name": "post_nms_topn", "type": "int32", "default": 0 }, + { "name": "nms_thresh", "type": "float32", "default": 0 }, + { "name": "anchors", "default": 0 } + ] + }, + { + "name": "Scale", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "num_axes", "type": "int32", "default": 0 }, + { "name": "bias_term", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" } + ] + }, + { + "name": "Scatter", + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "is_onnx", "type": "boolean", "default": false } + ] + }, + { + "name": "SELU", + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "lambda", "type": "float32", "default": 0 } + ] + }, + { + "name": "Shape", + "category": "Shape" + }, + { + "name": "ShuffleChannel", + "category": "shape", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 } + ] + }, + { + "name": "Sigmoid", + "category": "Activation" + }, + { + "name": "Slice", + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "slice_points", "type": "int32[]", "default": [] }, + { "name": "begins", "type": "int32[]", "default": [] }, + { "name": "sizes", "type": "int32[]", "default": [] }, + { "name": "iscaffe", "type": "int32", "default": 0 }, + { "name": "ismxnet", "type": "int32", "default": 0 }, + { "name": "isonnx", "type": "int32", "default": 0 }, + { "name": "begin", "type": "int32", "default": 0 }, + { "name": "end", "type": "int32", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + }, + { + "name": "SoftMax", + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "SpaceToBatchND", + "category": "Shape", + "attributes": [ + { "name": "dilation_x", "type": "int32", "default": 0 }, + { "name": "dilation_y", "type": "int32", "default": 0 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "pad_left", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 } + ] + }, + { + "name": "SpaceToDepth", + "category": "Shape", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "SparseToDense", + "category": "Shape", + "attributes": [ + { "name": "output_shape_size0", "type": "int32", "default": 0 }, + { "name": "output_shape_size1", "type": "int32", "default": 0 }, + { "name": "default_value", "type": "int32", "default": 0 } + ] + }, + { + "name": "Split", + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "split_dim", "type": "int32", "default": 0 }, + { "name": "is_caffe", "type": "boolean", "default": false }, + { "name": "is_onnx", "type": "boolean", "default": false }, + { "name": "split_sizes", "type": "int32[]", "default": [] } + ] + }, + { + "name": "SquaredDifference", + "category": "Layer" + }, + { + "name": "Squeeze", + "category": "Transform", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 }, + { "name": "dim_2", "type": "int32", "default": 0 }, + { "name": "dim_3", "type": "int32", "default": 0 } + ] + }, + { + "name": "StridedSlice", + "category": "Shape", + "attributes": [ + { "name": "begine_n", "type": "int32", "default": 0 }, + { "name": "end_n", "type": "int32", "default": 0 }, + { "name": "stride_n", "type": "int32", "default": 0 }, + { "name": "begine_c", "type": "int32", "default": 0 }, + { "name": "end_c", "type": "int32", "default": 0 }, + { "name": "stride_c", "type": "int32", "default": 0 }, + { "name": "begine_h", "type": "int32", "default": 0 }, + { "name": "end_h", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "begine_w", "type": "int32", "default": 0 }, + { "name": "end_w", "type": "int32", "default": 0 }, + { "name": "stride_w", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ] + }, + { + "name": "SwapAxis", + "category": "Shape", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 } + ] + }, + { + "name": "TanH", + "category": "Activation" + }, + { + "name": "Threshold", + "category": "Activation", + "attributes": [ + { "name": "Threshold", "type": "float32", "default": 0 } + ] + }, + { + "name": "TopKV2", + "category": "Layer", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 }, + { "name": "sorted", "type": "int32", "default": 0 } + ] + }, + { + "name": "Transpose", + "category": "Transform", + "attributes": [ + { "name": "shape", "type": "int32[]", "default": [] } + ] + }, + { + "name": "Unary", + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + }, + { + "name": "Unsqueeze", + "category": "Transform", + "attributes": [ + { "name": "axises[]", "type": "int32[]", "default": [] } + ] + }, + { + "name": "Upsample", + "category": "Data", + "attributes": [ + { "name": "scale", "type": "float32", "default": 0 } + ] + }, + { + "name": "Where", + "category": "Layer" + }, + { + "name": "ZerosLike", + "category": "Layer" + } +] \ No newline at end of file diff --git a/tengine.js b/tengine.js new file mode 100755 index 00000000000..b0f70d7dfde --- /dev/null +++ b/tengine.js @@ -0,0 +1,665 @@ + +// Experimental + +import * as base from './base.js'; + +const tengine = {}; + +tengine.ModelFactory = class { + + match(context) { + return tengine.Reader.open(context.stream); + } + + async open(context, target) { + const metadata = await tengine.Metadata.open(context); + const reader = target; + reader.read(); + return new tengine.Model(metadata, reader); + } +}; + +tengine.Model = class { + + constructor(metadata, reader) { + this.format = `Tengine v${reader.version}`; + this.metadata = new Map(); + this.metadata.set('source', reader.source); + this.graphs = reader.graphs.map((graph) => new tengine.Graph(metadata, graph)); + } +}; + +tengine.Graph = class { + + constructor(metadata, graph) { + this.name = graph.id.toString(); + this.inputs = []; + this.outputs = []; + this.nodes = []; + const tensors = graph.tensors.map((tensor) => new tengine.Value(tensor)); + for (const input of graph.inputs) { + const node = graph.nodes[input]; + this.inputs.push(new tengine.Argument(node.name, node.outputs.map((output) => tensors[output]))); + } + for (const output of graph.outputs) { + const node = graph.nodes[output]; + this.outputs.push(new tengine.Argument(node.name, node.outputs.map((output) => tensors[output]))); + } + for (const node of graph.nodes) { + switch (node.type) { + case 'INPUT': + case 'Const': + break; + default: + this.nodes.push(new tengine.Node(metadata, node, tensors)); + break; + } + } + } +}; + +tengine.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +tengine.Value = class { + + constructor(tensor) { + this.name = tensor.name; + this.type = new tengine.TensorType(tensor.dataType, new tengine.TensorShape(tensor.dims)); + this.initializer = (tensor.type === 2) ? new tengine.Tensor(this.type, tensor.buffer) : null; + } +}; + +tengine.Node = class { + + constructor(metadata, node, tensors) { + this.name = node.name; + const type = node.type; + const version = node.version; + this.inputs = []; + this.outputs = []; + this.attributes = []; + this.type = metadata.type(type, version) || { name: type }; + for (let i = 0; i < node.params.length; i++) { + const metadata = (this.type && this.type.attributes && i < this.type.attributes.length) ? this.type.attributes[i] : null; + const name = metadata ? metadata.name : i.toString(); + this.attributes.push(new tengine.Attribute(metadata, name, node.params[i])); + } + const inputs = node.inputs; + let inputIndex = 0; + if (this.type && this.type.inputs) { + for (const inputDef of this.type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => tensors[id]); + this.inputs.push(new tengine.Argument(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } else { + this.inputs.push(...inputs.slice(inputIndex).map((id, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new tengine.Argument(inputName, [ tensors[id] ]); + })); + } + const outputs = node.outputs; + let outputIndex = 0; + if (this.type && this.type.outputs) { + for (const outputDef of this.type.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => tensors[id]); + this.outputs.push(new tengine.Argument(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } else { + this.outputs.push(...outputs.slice(outputIndex).map((id, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new tengine.Argument(outputName, [ tensors[id] ]); + })); + } + } +}; + +tengine.Attribute = class { + + constructor(metadata, key, value) { + this.type = ''; + this.name = key; + this.value = value; + if (metadata) { + this.name = metadata.name; + if (metadata.type) { + this.type = metadata.type; + } + if (metadata.visible === false) { + this.visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (this.value == metadata.default || (this.value && this.value.toString() == metadata.default.toString())) { + this.visible = false; + } + } + } + } +}; + +tengine.Tensor = class { + + constructor(type, values) { + this.type = type; + this.values = values; + } +}; + +tengine.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case 0: this.dataType = 'float32'; break; + case 1: this.dataType = 'float16'; break; + case 2: this.dataType = 'int8'; break; + case 3: this.dataType = 'uint8'; break; + case 4: this.dataType = 'int32'; break; + case 5: this.dataType = 'int16'; break; + default: throw new tengine.Error(`Unsupported data type'${dataType}'.`); + } + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +tengine.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : ''; + } +}; + +tengine.Metadata = class { + + static async open(context) { + if (tengine.Metadata._metadata) { + return tengine.Metadata._metadata; + } + try { + const data = await context.request('tengine-metadata.json'); + tengine.Metadata._metadata = new tengine.Metadata(data); + return tengine.Metadata._metadata; + } catch (error) { + tengine.Metadata._metadata = new tengine.Metadata(null); + return tengine.Metadata._metadata; + } + } + + constructor(data) { + this._map = new Map(); + if (data) { + const metadata = JSON.parse(data); + for (const item of metadata) { + if (item.name) { + const version = item.version || 0; + const name = `${item.name}:${version}`; + this._map.set(name, item); + } + } + } + } + + type(name, version) { + let current = version; + while (current > 0) { + if (this._map.has(`${name}:${current}`)) { + break; + } + current--; + } + if (current >= 0) { + const schema = this._map.get(`${name}:${current}`); + if (current !== version) { + this._map.set(`${name}:${version}`, schema); + } + return schema; + } + return null; + } +}; + +tengine.Reader = class { + + static open(stream) { + if (stream && stream.length > 4) { + const buffer = stream.peek(2); + if (buffer[0] < 4 && buffer[1] === 0) { + return new tengine.Reader(stream); + } + } + return null; + } + + constructor(stream) { + this._stream = stream; + // https://github.com/OAID/Tengine/wiki/The-format-of-tmfile + // https://github.com/OAID/Tengine/blob/tengine-lite/source/serializer/tmfile/tm2_format.h + } + + read() { + if (this._stream) { + const types = new Map(); + const register = (index, version, name, params) => { + types.set(`${index}:${version}`, { name: name, params: params }); + }; + const operator = (index, version) => { + let current = version; + while (current >= 0) { + if (types.has(`${index}:${current}`)) { + break; + } + current--; + } + if (current >= 0) { + const schema = types.get(`${index}:${current}`); + if (current !== version) { + types.set(`${index}:${version}`, schema); + } + return schema; + } + return null; + }; + register(0, 0, 'Accuracy', []); + register(1, 0, 'BatchNormalization', [ 'f', 'f', 'i' ]); + register(2, 0, 'BilinearResize', [ 'f', 'f', 'i' ]); + register(3, 0, 'Concat', [ 'i' ]); + register(4, 0, 'Const', []); + register(5, 0, 'Convolution', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(6, 0, 'Deconvolution', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(7, 0, 'DetectionOutput', [ 'i', 'i', 'i', 'f', 'f' ]); + register(8, 0, 'DropOut', []); + register(9, 0, 'Eltwise', [ 'i', 'i' ]); + register(10, 0, 'Flatten', [ 'i' ]); + register(11, 0, 'FullyConnected', [ 'i' ]); + register(12, 0, 'INPUT', []); + register(13, 0, 'LRN', [ 'i', 'f', 'f', 'i', 'f' ]); + register(14, 0, 'Normalize', [ 'i', 'i' ]); + register(15, 0, 'Permute', [ 'i', 'i', 'i', 'i', 'i' ]); + register(16, 0, 'Pooling', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(17, 0, 'Prelu', []); + register(18, 0, 'PriorBox', [ 'f[]', 'f[]', 'f[]', 'f[]', 'i', 'i', 'i', 'i', 'i', 'f', 'f', 'f', 'i', 'i' ]); + register(19, 0, 'Region', [ 'i', 'i', 'i', 'i', 'f', 'f', 'f[]' ]); + register(20, 0, 'ReLU', [ 'f' ]); + register(21, 0, 'ReLU6', []); + register(22, 0, 'Reorg', [ 'i' ]); + register(23, 0, 'Reshape', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + // register(23, 0, 'Reshape', [ 'i', 'i', 'i[]' ]); + register(24, 0, 'RoiPooling', [ 'i', 'i', 'f' ]); + register(25, 0, 'RPN', [ 'f[]', 'f[]', 'i', 'i', 'i', 'i', 'i', 'f', 'anchors' ]); + register(26, 0, 'Scale', [ 'i', 'i', 'i' ]); + register(27, 0, 'Slice', [ 'i', 'i[]', 'i[]', 'i[]', 'i', 'i', 'i', 'i', 'i' ]); + register(28, 0, 'SoftMax', [ 'i' ]); + register(29, 0, 'Split', [ 'i', 'i', 'boolean', 'boolean', 'i[]' ]); + register(30, 0, 'DetectionPostProcess', [ 'i', 'i', 'f', 'f', 'i', 'f[]' ]); + register(31, 0, 'Gemm', [ 'f', 'f', 'i', 'i' ]); + register(32, 0, 'Generic', [ 'i', 'i', 'string' ]); + register(33, 0, 'Logistic', []); + register(34, 0, 'LSTM', [ 'f', 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(35, 0, 'RNN', [ 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(36, 0, 'TanH', []); + register(37, 0, 'Sigmoid', []); + register(38, 0, 'Squeeze', [ 'i', 'i', 'i', 'i' ]); + register(39, 0, 'FusedbnScaleRelu', []); + register(40, 0, 'Pad', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'f' ]); + register(41, 0, 'StridedSlice', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(42, 0, 'ArgMax', [ 'i' ]); + register(43, 0, 'ArgMin', [ 'i' ]); + register(44, 0, 'TopKV2', [ 'i', 'i' ]); + register(45, 0, 'Reduction', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(46, 0, 'Max', []); + register(47, 0, 'Min', []); + register(48, 0, 'GRU', [ 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(49, 0, 'Addn', 'i'); + register(50, 0, 'SwapAxis', [ 'i', 'i' ]); + register(51, 0, 'Upsample', [ 'f' ]); + register(52, 0, 'SpaceToBatchND', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(53, 0, 'BatchToSpaceND', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(54, 0, 'Resize', [ 'f', 'f', 'i' ]); + register(55, 0, 'ShuffleChannel', [ 'i' ]); + register(56, 0, 'Crop', [ 'i', 'i', 'i', 'i', 'i', 'i', 'boolean', 'i', 'i' ]); + register(57, 0, 'ROIAlign', [ 'i', 'i', 'f' ]); + register(58, 0, 'Psroipooling', [ 'i', 'i', 'f', 'i' ]); + register(59, 0, 'Unary', [ 'i' ]); + register(60, 0, 'Expanddims', [ 'i' ]); + register(61, 0, 'Bias', [ 'i' ]); + register(62, 0, 'Noop', []); + register(63, 0, 'Threshold', [ 'f' ]); + register(64, 0, 'Hardsigmoid', [ 'f', 'f' ]); + register(65, 0, 'Embed', [ 'f', 'f', 'f', 'f' ]); + register(66, 0, 'InstanceNorm', [ 'f' ]); + register(67, 0, 'MVN', [ 'i', 'i', 'f' ]); + register(68, 0, 'Absval', []); + register(69, 0, 'Cast', [ 'i', 'i' ]); + register(70, 0, 'HardSwish', [ 'f', 'f' ]); + register(71, 0, 'Interp', [ 'i', 'f', 'f', 'i', 'i' ]); + register(72, 0, 'SELU', [ 'f', 'f' ]); + register(73, 0, 'ELU', [ 'f' ]); + register(74, 0, 'BroadMul', []); + register(75, 0, 'Logical', [ 'i' ]); + register(76, 0, 'Gather', [ 'i', 'i' ]); + register(77, 0, 'Transpose', [ 'i[]' ]); + register(78, 0, 'Comparison', [ 'i' ]); + register(79, 0, 'SpaceToDepth', [ 'i' ]); + register(80, 0, 'DepthToSpace', [ 'i' ]); + register(81, 0, 'Reverse', []); + register(82, 0, 'SparseToDense', [ 'i','i','i' ]); + register(83, 0, 'Ceil', []); + register(84, 0, 'SquaredDifference', []); + register(85, 0, 'Round', []); + register(86, 0, 'ZerosLike', []); + register(87, 0, 'Clip', [ 'f','f' ]); + register(88, 0, 'Unsqueeze', [ 'i[]' ]); + register(89, 0, 'ReduceL2', [ 'i','i' ]); + register(90, 0, 'Mean', []); + register(91, 0, 'MatMul', []); + register(92, 0, 'Expand', ['i[]']); + register(93, 0, 'Scatter', ['i','boolean']); + register(94, 0, 'Shape', []); + register(95, 0, 'Where', []); + register(96, 0, 'Tile', ['i','i']); + register(97, 0, 'Mish', []); + register(98, 0, 'L2Pool', []); + register(99, 0, 'LogSoftmax', []); + register(100, 0, 'ReLU1', []); + register(101, 0, 'L2Normalization', []); + register(102, 0, 'PackModel', ['i','i']); + register(103, 0, 'Num', []); + const buffer = this._stream.peek(); + const reader = new tengine.BinaryReader(buffer); + this._majorVersion = reader.uint16(); + this._minorVersion = reader.uint16(); + if (this._majorVersion !== 2) { + throw new tengine.Error(`Unsupported format version 'v${this._majorVersion}.${this._minorVersion}'.`); + } + this._compileVersion = reader.uint16(); + reader.skip(2); // struct align + reader.seek(reader.uint32()); // root table + this._originalFormat = reader.int32(); + this._subFormat = reader.int32(); + this._graphs = []; + const subgraphOffsets = reader.uint32s(); + for (const subgraphOffset of subgraphOffsets) { + reader.seek(subgraphOffset); + + const subgraph = {}; + subgraph.id = reader.int32(); + subgraph.graphLayout = reader.int32(); + /* + if (graphLayout == 0) { + return "NCHW"; + } + if (graphLayout == 1) { + return "NHWC"; + } + */ + subgraph.originalLayout = reader.int32(); + subgraph.inputs = reader.uint32s(); + subgraph.outputs = reader.uint32s(); + const nodeOffsets = reader.uint32s(); + const tensorOffsets = reader.uint32s(); + const bufferOffsets = reader.uint32s(); + subgraph.name = reader.string(); + subgraph.nodes = []; + subgraph.tensors = []; + this._graphs.push(subgraph); + // nodes + for (const nodeOffset of nodeOffsets) { + reader.seek(nodeOffset); + const node = {}; + node.id = reader.int32(); + node.inputs = reader.uint32s(); + node.outputs = reader.uint32s(); + const typeOffset = reader.int32(); + node.name = reader.string(); + const attributeOffsets = reader.uint32s(); + node.dynamicShape = reader.boolean(); + reader.seek(typeOffset); + node.version = reader.int32(); + const index = reader.int32(); + const paramsOffset = reader.uint32(); + const schema = operator(index, node.version); + node.type = schema ? schema.name : index.toString(); + const paramTypes = schema ? schema.params : []; + node.params = []; + if (paramsOffset) { + reader.seek(paramsOffset); + for (const paramType of paramTypes) { + if (paramType !== 'boolean') { + reader.align(4); + } + switch (paramType) { + case 'i': + node.params.push(reader.int32()); + break; + case 'f': + node.params.push(reader.float32()); + break; + case 'i[]': + node.params.push(reader.int32s()); + break; + case 'f[]': + node.params.push(reader.float32s()); + break; + case 'boolean': + node.params.push(reader.boolean()); + break; + case 'string': + node.params.push(reader.string()); + break; + case 'anchors': + node.params.push(reader.anchors(4)); + break; + default: + throw new tengine.Error(`Unsupported param type '${paramType}' in '${node.type}'.`); + } + } + } + if (node.type === 'Slice') { + node.params[6] = (this._originalFormat == 5) ? node.params[6] : 0; + } + node.attributes = attributeOffsets.map((attributeOffset) => { + reader.seek(attributeOffset); + const name = reader.string(); + const value = reader.string(); + const type = reader.int32(); + return { name: name, value: value, type: type }; + }); + subgraph.nodes.push(node); + } + // buffers + const buffers = bufferOffsets.map((bufferOffset) => { + reader.seek(bufferOffset); + const size = reader.uint32(); + const offset = reader.int32(); + if (offset !== 0) { + reader.seek(offset); + return reader.read(size); + } + return null; + }); + // tensors + subgraph.tensors = tensorOffsets.map((tensorOffset) => { + reader.seek(tensorOffset); + const tensor = {}; + tensor.id = reader.int32(); + tensor.buffer = buffers[reader.int32()]; + tensor.dims = reader.int32s(); + tensor.name = reader.string(); + const quantparamsOffset = reader.int32(); + tensor.layout = reader.int32(); + tensor.type = reader.int32(); // ar = 1, const = 2, input = 3, vdep, unknown + tensor.dataType = reader.int32(); + if (quantparamsOffset) { + reader.seek(quantparamsOffset); + tensor.quantparams = { + zeroPoint: reader.int32(), + scale: reader.float32(), + width: reader.int32() + }; + } + return tensor; + }); + for (const node of subgraph.nodes) { + if (node.type === 'Convolution') { + switch (subgraph.graphLayout) { + case 0: // NCHW + /* eslint-disable prefer-destructuring */ + node.params[6] = subgraph.tensors[node.inputs[1]].dims[1]; + /* eslint-enable prefer-destructuring */ + break; + case 1: // NHWC + /* eslint-disable prefer-destructuring */ + node.params[6] = subgraph.tensors[node.inputs[1]].dims[3]; + /* eslint-enable prefer-destructuring */ + break; + default: + throw new tengine.Error(`Unsupported 'Convolution' layout '${subgraph.graphLayout}'.`); + } + } + } + } + delete this._stream; + } + } + + get version() { + return `${this._majorVersion}.${this._minorVersion}`; + } + + get source() { + switch (this._originalFormat) { + case 0: return ''; + case 1: return 'Tengine'; + case 2: return 'Caffe'; + case 3: return 'ONNX'; + case 4: return 'MXNet'; + case 5: return 'TensorFlow'; + case 6: return 'TensorFlow Lite'; + case 7: return 'Darknet'; + case 8: return `DLA v${this._subFormat}`; + case 9: return 'ncnn'; + case 10: return 'MegEngine'; + case 11: return 'OneFlow'; + case 12: return 'Horizon'; + case 13: return 'Bitman'; + default: throw new tengine.Error(`Unsupported source '${this._originalFormat}'.`); + } + } + + get graphs() { + return this._graphs; + } +}; + +tengine.BinaryReader = class extends base.BinaryReader { + + string() { + const position = this.uint32(); + let content = ''; + if (position) { + const next = this._position; + this.seek(position); + const size = this.uint32(); + this.seek(this.uint32()); + for (let i = 0; i < size - 1; i++) { + content += String.fromCharCode(this._buffer[this._position++]); + } + this.seek(next); + } + return content; + } + + uint32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this.position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.uint32()); + } + this.seek(next); + } + return values; + } + + int32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this.position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.int32()); + } + this.seek(next); + } + return values; + } + + float32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this.position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.float32()); + } + this.seek(next); + } + return values; + } + + anchors(length) { + const arrays = []; + const offset = this.uint32(); + if (offset) { + const next = this._position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + const array = []; + for (let j = 0; j < length; j++) { + array.push(this.float32()); + } + arrays.push(array); + } + this.seek(next); + } + return arrays; + } +}; + +tengine.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Tengine model.'; + } +}; + +export const ModelFactory = tengine.ModelFactory; + diff --git a/tensorrt.js b/tensorrt.js new file mode 100644 index 00000000000..30473941e17 --- /dev/null +++ b/tensorrt.js @@ -0,0 +1,182 @@ + +import * as base from './base.js'; + +const tensorrt = {}; + +tensorrt.ModelFactory = class { + + match(context) { + const stream = context.stream; + return tensorrt.Engine.open(stream) || tensorrt.Container.open(stream); + } + + async open(context, target) { + return new tensorrt.Model(null, target); + } +}; + +tensorrt.Model = class { + + constructor(metadata, model) { + this._format = model.format; + this._graphs = [ new tensorrt.Graph(metadata, model) ]; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +tensorrt.Graph = class { + + constructor(/* metadata, model */) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +tensorrt.Engine = class { + + static open(stream) { + if (stream && stream.length >= 24) { + const signatures = [ + [ 0x70, 0x74, 0x72, 0x74 ], // ptrt + [ 0x66, 0x74, 0x72, 0x74 ] // ftrt + ]; + const buffer = stream.peek(4); + for (const signature of signatures) { + if (buffer.every((value, index) => value === signature[index])) { + return new tensorrt.Engine(stream); + } + } + } + return null; + } + + constructor(stream) { + this._stream = stream; + } + + get format() { + this._read(); + return 'TensorRT Engine'; + } + + _read() { + if (this._stream) { + const buffer = this._stream.peek(24); + const reader = new base.BinaryReader(buffer); + reader.skip(4); + const version = reader.uint32(); + reader.uint32(); + // let size = 0; + switch (version) { + case 0x0000: + case 0x002B: { + reader.uint32(); + /* size = */ reader.uint64(); + break; + } + case 0x0057: + case 0x0059: + case 0x0060: + case 0x0061: { + /* size = */ reader.uint64(); + reader.uint32(); + break; + } + default: { + const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + throw new tensorrt.Error(`Unsupported TensorRT engine signature (${content.substring(8)}).`); + } + } + // const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + // buffer = this._stream.read(24 + size); + // reader = new tensorrt.BinaryReader(buffer); + throw new tensorrt.Error('Invalid file content. File contains undocumented TensorRT engine data.'); + } + } +}; + +tensorrt.Container = class { + + static open(stream) { + if (stream) { + const buffer = stream.peek(Math.min(512, stream.length)); + if (buffer.length > 12 && buffer[6] === 0x00 && buffer[7] === 0x00) { + const reader = new base.BinaryReader(buffer); + const length = reader.uint64(); + if (length === stream.length) { + let position = reader.position + reader.uint32(); + if (position < reader.length) { + reader.seek(position); + const offset = reader.uint32(); + position = reader.position - offset - 4; + if (position > 0 && position < reader.length) { + reader.seek(position); + const length = reader.uint16(); + if (offset === length) { + return new tensorrt.Container(stream); + } + } + } + } + } + } + return null; + } + + constructor(stream) { + this._stream = stream; + } + + get format() { + this._read(); + return 'TensorRT FlatBuffers'; + } + + _read() { + // const buffer = this._stream.peek(Math.min(24, this._stream.length)); + // const content = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + throw new tensorrt.Error('Invalid file content. File contains undocumented TensorRT data.'); + } +}; + +tensorrt.BinaryReader = class extends base.BinaryReader { + + string() { + const length = this.uint64(); + const position = this._position; + this.skip(length); + const data = this._buffer.subarray(position, this._position); + this._decoder = this._decoder || new TextDecoder('utf-8'); + return this._decoder.decode(data); + } +}; + +tensorrt.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorRT model.'; + } +}; + +export const ModelFactory = tensorrt.ModelFactory; diff --git a/text.js b/text.js new file mode 100644 index 00000000000..09c7dad06d2 --- /dev/null +++ b/text.js @@ -0,0 +1,344 @@ + +const text = {}; + +text.Decoder = class { + + static open(data, encoding) { + if (typeof data === 'string') { + return new text.Decoder.String(data); + } + const assert = (encoding, condition) => { + if (encoding && encoding !== condition) { + throw new text.Error(`Invalid encoding '${encoding}'.`); + } + }; + const buffer = data instanceof Uint8Array ? data : data.peek(); + const length = buffer.length; + if (length >= 3 && buffer[0] === 0xef && buffer[1] === 0xbb && buffer[2] === 0xbf) { + assert(encoding, 'utf-8'); + return new text.Decoder.Utf8(buffer, 3, true); + } + if (length >= 2 && buffer[0] === 0xff && buffer[1] === 0xfe) { + assert(encoding, 'utf-16'); + return new text.Decoder.Utf16LE(buffer, 2); + } + if (length >= 2 && buffer[0] === 0xfe && buffer[1] === 0xff) { + assert(encoding, 'utf-16'); + return new text.Decoder.Utf16BE(buffer, 2); + } + if (length >= 4 && buffer[0] === 0x00 && buffer[1] === 0x00 && buffer[2] === 0xfe && buffer[3] === 0xff) { + assert(encoding, 'utf-32'); + return new text.Decoder.Utf32LE(buffer, 2); + } + if (length >= 4 && buffer[0] === 0xff && buffer[1] === 0xfe && buffer[2] === 0x00 && buffer[3] === 0x00) { + assert(encoding, 'utf-32'); + return new text.Decoder.Utf32BE(buffer, 2); + } + if (length >= 5 && buffer[0] === 0x2B && buffer[1] === 0x2F && buffer[2] === 0x76 && buffer[3] === 0x38 && buffer[4] === 0x2D) { + throw new text.Error("Unsupported UTF-7 encoding."); + } + if (length >= 4 && buffer[0] === 0x2B && buffer[1] === 0x2F && buffer[2] === 0x76 && (buffer[3] === 0x38 || buffer[3] === 0x39 || buffer[3] === 0x2B || buffer[3] === 0x2F)) { + throw new text.Error("Unsupported UTF-7 encoding."); + } + if (length >= 4 && buffer[0] === 0x84 && buffer[1] === 0x31 && buffer[2] === 0x95 && buffer[3] === 0x33) { + throw new text.Error("Unsupported GB-18030 encoding."); + } + if (length > 4 && (length % 2) == 0 && (buffer[0] === 0x00 || buffer[1] === 0x00 || buffer[2] === 0x00 || buffer[3] === 0x00)) { + const lo = new Uint32Array(256); + const hi = new Uint32Array(256); + const size = Math.min(1024, length); + for (let i = 0; i < size; i += 2) { + lo[buffer[i]]++; + hi[buffer[i + 1]]++; + } + if (lo[0x00] === 0 && (hi[0x00] / (length >> 1)) > 0.5) { + assert(encoding, 'utf-16'); + return new text.Decoder.Utf16LE(buffer, 0); + } + if (hi[0x00] === 0 && (lo[0x00] / (length >> 1)) > 0.5) { + assert(encoding, 'utf-16'); + return new text.Decoder.Utf16BE(buffer, 0); + } + } + if (encoding && (encoding.startsWith('iso-8859-') || encoding.startsWith('latin-'))) { + return new text.Decoder.Latin1(buffer, 0); + } + assert(encoding, 'utf-8'); + return new text.Decoder.Utf8(buffer, 0, encoding === 'utf-8'); + } +}; + +text.Decoder.String = class { + + constructor(buffer) { + this.buffer = /[\u0020-\uD800]/.test(buffer) ? buffer : buffer.match(/[\uD800-\uDBFF][\uDC00-\uDFFF]|[^\uD800-\uDFFF]/g); + this.position = 0; + this.length = this.buffer.length; + } + + get encoding() { + return null; + } + + decode() { + if (this.position < this.length) { + return this.buffer[this.position++]; + } + return undefined; + } +}; + +text.Decoder.Utf8 = class { + + constructor(buffer, position, fatal) { + this.position = position || 0; + this.buffer = buffer; + this.fatal = fatal; + } + + get encoding() { + return 'utf-8'; + } + + decode() { + const c = this.buffer[this.position]; + if (c === undefined) { + return c; + } + this.position++; + if (c < 0x80) { + return String.fromCodePoint(c); + } + if (c >= 0xC2 && c <= 0xDF) { + if (this.buffer[this.position] !== undefined) { + const c2 = this.buffer[this.position]; + this.position++; + return String.fromCharCode(((c & 0x1F) << 6) | (c2 & 0x3F)); + } + } + if (c >= 0xE0 && c <= 0xEF) { + if (this.buffer[this.position + 1] !== undefined) { + const c2 = this.buffer[this.position]; + if ((c !== 0xE0 || c2 >= 0xA0) && (c !== 0xED || c2 <= 0x9f)) { + const c3 = this.buffer[this.position + 1]; + if (c3 >= 0x80 && c3 < 0xFB) { + this.position += 2; + return String.fromCharCode(((c & 0x0F) << 12) | ((c2 & 0x3F) << 6) | ((c3 & 0x3F) << 0)); + } + } + } + } + if (c >= 0xF0 && c <= 0xF4) { + if (this.buffer[this.position + 2] !== undefined) { + const c2 = this.buffer[this.position]; + if (c2 >= 0x80 && c2 <= 0xBF) { + const c3 = this.buffer[this.position + 1]; + if (c3 >= 0x80 && c3 <= 0xBF) { + const c4 = this.buffer[this.position + 2]; + if (c4 >= 0x80 && c4 <= 0xBF) { + const codePoint = ((c & 0x07) << 18) | ((c2 & 0x3F) << 12) | ((c3 & 0x3F) << 6) | (c4 & 0x3F); + if (codePoint <= 0x10FFFF) { + this.position += 3; + return String.fromCodePoint(codePoint); + } + } + } + } + } + } + if (this.fatal) { + throw new text.Error('Invalid utf-8 character.'); + } + return String.fromCharCode(0xfffd); + } +}; + +text.Decoder.Latin1 = class { + + constructor(buffer, position) { + this.position = position || 0; + this.buffer = buffer; + } + + get encoding() { + return 'latin-1'; + } + + decode() { + const c = this.buffer[this.position]; + if (c === undefined) { + return c; + } + this.position++; + return String.fromCodePoint(c); + } +}; + +text.Decoder.Utf16LE = class { + + constructor(buffer, position) { + this.buffer = buffer; + this.position = position || 0; + this.length = buffer.length; + } + + get encoding() { + return 'utf-16'; + } + + decode() { + if (this.position + 1 < this.length) { + const c = this.buffer[this.position++] | (this.buffer[this.position++] << 8); + if (c < 0xD800 || c >= 0xDFFF) { + return String.fromCharCode(c); + } + if (c >= 0xD800 && c < 0xDBFF) { + if (this._position + 1 < this._length) { + const c2 = this._buffer[this._position++] | (this._buffer[this._position++] << 8); + if (c >= 0xDC00 || c < 0xDFFF) { + return String.fromCodePoint(0x10000 + ((c & 0x3ff) << 10) + (c2 & 0x3ff)); + } + } + } + return String.fromCharCode(0xfffd); + } + return undefined; + } +}; + +text.Decoder.Utf16BE = class { + + constructor(buffer, position) { + this.buffer = buffer; + this.position = position || 0; + this.length = buffer.length; + } + + get encoding() { + return 'utf-16'; + } + + decode() { + if (this.position + 1 < this.length) { + const c = (this.buffer[this.position++] << 8) | this.buffer[this.position++]; + if (c < 0xD800 || c >= 0xDFFF) { + return String.fromCharCode(c); + } + if (c >= 0xD800 && c < 0xDBFF) { + if (this._position + 1 < this._length) { + const c2 = (this._buffer[this._position++] << 8) | this._buffer[this._position++]; + if (c >= 0xDC00 || c < 0xDFFF) { + return String.fromCodePoint(0x10000 + ((c & 0x3ff) << 10) + (c2 & 0x3ff)); + } + } + } + return String.fromCharCode(0xfffd); + } + return undefined; + } +}; + +text.Decoder.Utf32LE = class { + + constructor(buffer, position) { + this.buffer = buffer; + this.position = position || 0; + this.length = buffer.length; + } + + get encoding() { + return 'utf-32'; + } + + decode() { + if (this.position + 3 < this.length) { + const c = this.buffer[this.position++] | (this.buffer[this.position++] << 8) || (this.buffer[this.position++] << 16) || (this.buffer[this.position++] << 24); + if (c < 0x10FFFF) { + return String.fromCodePoint(c); + } + return String.fromCharCode(0xfffd); + } + return undefined; + } +}; + +text.Decoder.Utf32BE = class { + + constructor(buffer, position) { + this.buffer = buffer; + this.position = position || 0; + this.length = buffer.length; + } + + get encoding() { + return 'utf-32'; + } + + decode() { + if (this.position + 3 < this.length) { + const c = (this.buffer[this.position++] << 24) || (this.buffer[this.position++] << 16) || (this.buffer[this.position++] << 8) | this.buffer[this.position++]; + if (c < 0x10FFFF) { + return String.fromCodePoint(c); + } + return String.fromCharCode(0xfffd); + } + return undefined; + } +}; + +text.Reader = class { + + constructor(data, length) { + this._decoder = text.Decoder.open(data); + this._position = 0; + this._length = length || Number.MAX_SAFE_INTEGER; + } + + static open(data, length) { + return new text.Reader(data, length); + } + + read() { + if (this._position >= this._length) { + return undefined; + } + let line = ''; + let buffer = null; + for (;;) { + const c = this._decoder.decode(); + if (c === undefined) { + this._length = this._position; + break; + } + this._position++; + if (this._position > this._length) { + break; + } + if (c === '\n') { + break; + } + line += c; + if (line.length >= 32) { + buffer = buffer || []; + buffer.push(line); + line = ''; + } + } + if (buffer) { + buffer.push(line); + return buffer.join(''); + } + return line; + } +}; + +text.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Text Error'; + } +}; + +export const Decoder = text.Decoder; +export const Reader = text.Reader; diff --git a/tf-metadata.json b/tf-metadata.json new file mode 100644 index 00000000000..9c7ee3cf812 --- /dev/null +++ b/tf-metadata.json @@ -0,0 +1,65228 @@ +[ + { + "name": "Abort", + "summary": "Raise a exception to abort the process when called.", + "description": "If exit_without_error is true, the process will exit normally,\notherwise it will exit with a SIGABORT signal.\n\nReturns nothing but an exception.", + "attributes": [ + { + "name": "error_msg", + "type": "string", + "description": "A string which is the message associated with the exception.", + "default": "" + }, + { + "name": "exit_without_error", + "type": "boolean", + "default": false + } + ] + }, + { + "name": "Abs", + "summary": "Computes the absolute value of a tensor.", + "description": "Given a tensor `x`, this operation returns a tensor containing the absolute\nvalue of each element in `x`. For example, if x is an input element and y is\nan output element, this operation computes \\\\(y = |x|\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "AccumulateNV2", + "summary": "Returns the element-wise sum of a list of tensors.", + "description": "`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not\nwait for all of its inputs to be ready before beginning to sum. This can\nsave memory if inputs are ready at different times, since minimum temporary\nstorage is proportional to the output size rather than the inputs size.\n\nUnlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.\n\nReturns a `Tensor` of same shape and type as the elements of `inputs`.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "shape", + "type": "shape", + "description": "Shape of elements of `inputs`." + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of `Tensor` objects, each with same shape and type.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sum", + "typeAttr": "T" + } + ] + }, + { + "name": "AccumulatorApplyGradient", + "summary": "Applies a gradient to a given accumulator.", + "description": "Does not add if local_step is lesser than the accumulator's global_step.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a accumulator.", + "type": 7, + "isRef": true + }, + { + "name": "local_step", + "description": "The local_step value at which the gradient was computed.", + "type": 9 + }, + { + "name": "gradient", + "description": "A tensor of the gradient to be accumulated.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "AccumulatorNumAccumulated", + "summary": "Returns the number of gradients aggregated in the given accumulators.", + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "num_accumulated", + "description": "The number of gradients aggregated in the given accumulator.", + "type": 3 + } + ] + }, + { + "name": "AccumulatorSetGlobalStep", + "summary": "Updates the accumulator with a new value for global_step.", + "description": "Logs warning if the accumulator's value is already higher than\nnew_global_step.", + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 7, + "isRef": true + }, + { + "name": "new_global_step", + "description": "The new global_step value to set.", + "type": 9 + } + ] + }, + { + "name": "AccumulatorTakeGradient", + "summary": "Extracts the average gradient in the given ConditionalAccumulator.", + "description": "The op blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it returns the average of\nthe accumulated gradients. Also automatically increments the recorded\nglobal_step in the accumulator by 1, and resets the aggregate to 0.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 7, + "isRef": true + }, + { + "name": "num_required", + "description": "Number of gradients required before we return an aggregate.", + "type": 3 + } + ], + "outputs": [ + { + "name": "average", + "description": "The average of the accumulated gradients.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "Acos", + "summary": "Computes acos of x element-wise.", + "description": "\n Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.\n\n Input range is `[-1, 1]` and the output has a range of `[0, pi]`.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Acosh", + "summary": "Computes inverse hyperbolic cosine of x element-wise.", + "description": "Given an input tensor, the function computes inverse hyperbolic cosine of every element.\nInput range is `[1, inf]`. It returns `nan` if the input lies outside the range.\n\n```python\nx = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\ntf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Add", + "summary": "Returns x + y element-wise.", + "description": "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nGiven two input tensors, the `tf.add` operation computes the sum for every element in the tensor.\n\nBoth input and output have a range `(-inf, inf)`.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `string`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "AddManySparseToTensorsMap", + "summary": "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.", + "description": "A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`, where\n\n```sparse_indices.shape[1] == sparse_shape.shape[0] == R```\n\nAn `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`\nhaving a first `sparse_indices` column taking values between `[0, N)`, where\nthe minibatch size `N == sparse_shape[0]`.\n\nThe input `SparseTensor` must have rank `R` greater than 1, and the first\ndimension is treated as the minibatch dimension. Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension. The stored\n`SparseTensor` objects pointed to by each row of the output `sparse_handles`\nwill have rank `R-1`.\n\nThe `SparseTensor` values can then be read out as part of a minibatch by passing\nthe given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op. If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddManySparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "container", + "type": "string", + "description": "The container name for the `SparseTensorsMap` created by this op.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation's unique name is used.", + "default": "" + } + ], + "inputs": [ + { + "name": "sparse_indices", + "description": "2-D. The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "sparse_shape", + "description": "1-D. The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_handles", + "description": "1-D. The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`. Shape: `[N]`.", + "type": 9 + } + ] + }, + { + "name": "AddN", + "summary": "Add all input tensors element wise.", + "description": " Inputs must be of same size and shape.\n\n ```python\n x = [9, 7, 10]\n tf.math.add_n(x) ==> 26\n ```", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `variant`." + } + ], + "inputs": [ + { + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sum", + "typeAttr": "T" + } + ] + }, + { + "name": "AddSparseToTensorsMap", + "summary": "Add a `SparseTensor` to a `SparseTensorsMap` return its handle.", + "description": "A `SparseTensor` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`.\n\nThis operator takes the given `SparseTensor` and adds it to a container\nobject (a `SparseTensorsMap`). A unique key within this container is generated\nin the form of an `int64`, and this is the value that is returned.\n\nThe `SparseTensor` can then be read out as part of a minibatch by passing\nthe key as a vector element to `TakeManySparseFromTensorsMap`. To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op. If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddSparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "container", + "type": "string", + "description": "The container name for the `SparseTensorsMap` created by this op.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation's unique name is used.", + "default": "" + } + ], + "inputs": [ + { + "name": "sparse_indices", + "description": "2-D. The `indices` of the `SparseTensor`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "1-D. The `values` of the `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "sparse_shape", + "description": "1-D. The `shape` of the `SparseTensor`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_handle", + "description": "0-D. The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`.", + "type": 9 + } + ] + }, + { + "name": "AddV2", + "summary": "Returns x + y element-wise.", + "description": "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "AdjustContrast", + "summary": "Deprecated. Disallowed in GraphDef version >= 2.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "images", + "typeAttr": "T" + }, + { + "name": "contrast_factor", + "type": 1 + }, + { + "name": "min_value", + "type": 1 + }, + { + "name": "max_value", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "AdjustContrastv2", + "summary": "Adjust the contrast of one or more images.", + "description": "`images` is a tensor of at least 3 dimensions. The last 3 dimensions are\ninterpreted as `[height, width, channels]`. The other dimensions only\nrepresent a collection of images, such as `[batch, height, width, channels].`\n\nContrast is adjusted independently for each channel of each image.\n\nFor each channel, the Op first computes the mean of the image pixels in the\nchannel and then adjusts each component of each pixel to\n`(x - mean) * contrast_factor + mean`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "Images to adjust. At least 3-D.", + "typeAttr": "T" + }, + { + "name": "contrast_factor", + "description": "A float multiplier for adjusting contrast.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The contrast-adjusted image or images.", + "typeAttr": "T" + } + ] + }, + { + "name": "AdjustHue", + "summary": "Adjust the hue of one or more images.", + "description": "`images` is a tensor of at least 3 dimensions. The last dimension is\ninterpreted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A delta is then applied all the hue values,\nand then remapped back to RGB colorspace.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "Images to adjust. At least 3-D.", + "typeAttr": "T" + }, + { + "name": "delta", + "description": "A float delta to add to the hue.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The hue-adjusted image or images.", + "typeAttr": "T" + } + ] + }, + { + "name": "AdjustSaturation", + "summary": "Adjust the saturation of one or more images.", + "description": "`images` is a tensor of at least 3 dimensions. The last dimension is\ninterpreted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A scale is then applied all the saturation\nvalues, and then remapped back to RGB colorspace.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "Images to adjust. At least 3-D.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A float scale to add to the saturation.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The hue-adjusted image or images.", + "typeAttr": "T" + } + ] + }, + { + "name": "All", + "summary": "Computes the \"logical and\" of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "type": 10 + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "type": 10 + } + ] + }, + { + "name": "AllCandidateSampler", + "summary": "Generates labels for candidate sampling with a learned unigram distribution.", + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to produce.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "AllToAll", + "summary": "An Op to exchange data across TPU replicas.", + "description": "On each replica, the input is split into `split_count` blocks along\n`split_dimension` and send to the other replicas given group_assignment. After\nreceiving `split_count` - 1 blocks from other replicas, we concatenate the\nblocks along `concat_dimension` as the output.\n\nFor example, suppose there are 2 TPU replicas:\nreplica 0 receives input: `[[A, B]]`\nreplica 1 receives input: `[[C, D]]`\n\ngroup_assignment=`[[0, 1]]`\nconcat_dimension=0\nsplit_dimension=1\nsplit_count=2\n\nreplica 0's output: `[[A], [C]]`\nreplica 1's output: `[[B], [D]]`", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The type of elements to be exchanged. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`." + }, + { + "name": "concat_dimension", + "type": "int64", + "description": "The dimension number to concatenate." + }, + { + "name": "split_dimension", + "type": "int64", + "description": "The dimension number to split." + }, + { + "name": "split_count", + "type": "int64", + "description": "The number of splits, this number must equal to the sub-group\nsize(group_assignment.get_shape()[1])" + } + ], + "inputs": [ + { + "name": "input", + "description": "The local input to the sum.", + "typeAttr": "T" + }, + { + "name": "group_assignment", + "description": "An int32 tensor with shape\n[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the\nreplica ids in the ith subgroup.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The exchanged result.", + "typeAttr": "T" + } + ] + }, + { + "name": "Angle", + "summary": "Returns the argument of a complex number.", + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the argument of each element in `input`. All elements in\n`input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part.\n\nThe argument returned by this operation is of the form \\\\(atan2(b, a)\\\\).\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.math.angle(input) ==> [2.0132, 1.056]\n```\n\n@compatibility(numpy)\nEquivalent to np.angle.\n@end_compatibility", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ] + }, + { + "name": "AnonymousHashTable", + "summary": "Creates a uninitialized anonymous hash table.", + "description": "This op creates a new anonymous hash table (as a resource) everytime\nit is executed, with the specified dtype of its keys and values,\nreturning the resource handle. Before using the table you will have\nto initialize it. After initialization the table will be\nimmutable. The table is anonymous in the sense that it can only be\naccessed by the returned resource handle (e.g. it cannot be looked up\nby a name in a resource manager). The table will be automatically\ndeleted when all resource handles pointing to it are gone.", + "attributes": [ + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "The resource handle to the newly created hash-table resource.", + "type": 20 + } + ] + }, + { + "name": "AnonymousIterator", + "summary": "A container for an iterator resource.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to the iterator that can be passed to a \"MakeIterator\" or\n\"IteratorGetNext\" op. In contrast to Iterator, AnonymousIterator prevents\nresource sharing by name, and does not keep a reference to the resource\ncontainer.", + "type": 20 + } + ] + }, + { + "name": "AnonymousIteratorV2", + "summary": "A container for an iterator resource.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to the iterator that can be passed to a \"MakeIterator\" or\n\"IteratorGetNext\" op. In contrast to Iterator, AnonymousIterator prevents\nresource sharing by name, and does not keep a reference to the resource\ncontainer.", + "type": 20 + }, + { + "name": "deleter", + "description": "A variant deleter that should be passed into the op that deletes the iterator.", + "type": 21 + } + ] + }, + { + "name": "AnonymousIteratorV3", + "summary": "A container for an iterator resource.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to the iterator that can be passed to a \"MakeIterator\" or\n\"IteratorGetNext\" op. In contrast to Iterator, AnonymousIterator prevents\nresource sharing by name, and does not keep a reference to the resource\ncontainer.", + "type": 20 + } + ] + }, + { + "name": "AnonymousMemoryCache", + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "AnonymousMultiDeviceIterator", + "summary": "A container for a multi device iterator resource.", + "attributes": [ + { + "name": "devices", + "type": "string[]", + "minimum": 1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to a multi device iterator that can be passed to a\n\"MultiDeviceIteratorGetNextFromShard\" op. In contrast to MultiDeviceIterator,\nAnonymousIterator prevents resource sharing by name, and does not keep a\nreference to the resource container.", + "type": 20 + }, + { + "name": "deleter", + "description": "A variant deleter that should be passed into the op that deletes the iterator.", + "type": 21 + } + ] + }, + { + "name": "AnonymousMultiDeviceIteratorV3", + "summary": "A container for a multi device iterator resource.", + "attributes": [ + { + "name": "devices", + "type": "string[]", + "minimum": 1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to a multi device iterator that can be passed to a\n\"MultiDeviceIteratorGetNextFromShard\" op. In contrast to MultiDeviceIterator,\nAnonymousIterator prevents resource sharing by name, and does not keep a\nreference to the resource container.", + "type": 20 + } + ] + }, + { + "name": "AnonymousMutableDenseHashTable", + "summary": "Creates an empty anonymous mutable hash table that uses tensors as the backing store.", + "description": "This op creates a new anonymous mutable hash table (as a resource) everytime\nit is executed, with the specified dtype of its keys and values,\nreturning the resource handle. Each value must be a scalar.\nData can be inserted into the table using\nthe insert operations. It does not support the initialization operation.\n\nIt uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThe table is anonymous in the sense that it can only be\naccessed by the returned resource handle (e.g. it cannot be looked up\nby a name in a resource manager). The table will be automatically\ndeleted when all resource handles pointing to it are gone.", + "attributes": [ + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "description": "The shape of each value.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "initial_num_buckets", + "type": "int64", + "description": "The initial number of hash table buckets. Must be a power\nto 2.", + "default": 131072 + }, + { + "name": "max_load_factor", + "type": "float32", + "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.", + "default": 0.800000011920929 + } + ], + "inputs": [ + { + "name": "empty_key", + "description": "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations.", + "typeAttr": "key_dtype" + }, + { + "name": "deleted_key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "The resource handle to the newly created hash-table resource.", + "type": 20 + } + ] + }, + { + "name": "AnonymousMutableHashTable", + "summary": "Creates an empty anonymous mutable hash table.", + "description": "This op creates a new anonymous mutable hash table (as a resource) everytime\nit is executed, with the specified dtype of its keys and values,\nreturning the resource handle. Each value must be a scalar.\nData can be inserted into the table using\nthe insert operations. It does not support the initialization operation.\nThe table is anonymous in the sense that it can only be\naccessed by the returned resource handle (e.g. it cannot be looked up\nby a name in a resource manager). The table will be automatically\ndeleted when all resource handles pointing to it are gone.", + "attributes": [ + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "The resource handle to the newly created hash-table resource.", + "type": 20 + } + ] + }, + { + "name": "AnonymousMutableHashTableOfTensors", + "summary": "Creates an empty anonymous mutable hash table of vector values.", + "description": "This op creates a new anonymous mutable hash table (as a resource) everytime\nit is executed, with the specified dtype of its keys and values,\nreturning the resource handle. Each value must be a vector.\nData can be inserted into the table using\nthe insert operations. It does not support the initialization operation.\nThe table is anonymous in the sense that it can only be\naccessed by the returned resource handle (e.g. it cannot be looked up\nby a name in a resource manager). The table will be automatically\ndeleted when all resource handles pointing to it are gone.", + "attributes": [ + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "The resource handle to the newly created hash-table resource.", + "type": 20 + } + ] + }, + { + "name": "AnonymousRandomSeedGenerator", + "inputs": [ + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "AnonymousSeedGenerator", + "inputs": [ + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "reshuffle", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "Any", + "summary": "Computes the \"logical or\" of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "type": 10 + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "type": 10 + } + ] + }, + { + "name": "ApplyAdaMax", + "summary": "Update '*var' according to the AdaMax algorithm.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nv_t <- max(beta2 * v_{t-1}, abs(g))\nvariable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "m", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "v", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "beta1_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta1", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAdadelta", + "summary": "Update '*var' according to the adadelta scheme.", + "description": "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum_update", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAdagrad", + "summary": "Update '*var' according to the adagrad scheme.", + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAdagradDA", + "summary": "Update '*var' according to the proximal adagrad scheme.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "gradient_accumulator", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "gradient_squared_accumulator", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "global_step", + "description": "Training step number. Must be a scalar.", + "type": 9 + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAdagradV2", + "summary": "Update '*var' according to the adagrad scheme.", + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAdam", + "summary": "Update '*var' according to the Adam algorithm.", + "description": "$$\\text{lr}_t := \\mathrm{lr} \\cdot \\frac{\\sqrt{1 - \\beta_2^t}}{1 - \\beta_1^t}$$\n$$m_t := \\beta_1 \\cdot m_{t-1} + (1 - \\beta_1) \\cdot g$$\n$$v_t := \\beta_2 \\cdot v_{t-1} + (1 - \\beta_2) \\cdot g^2$$\n$$\\text{var} := \\begin{cases} \\text{var} - (m_t \\beta_1 + g \\cdot (1 - \\beta_1))\\cdot\\text{lr}_t/(\\sqrt{v_t} + \\epsilon), &\\text{if use_nesterov}\\\\\\\\ \\text{var} - m_t \\cdot \\text{lr}_t /(\\sqrt{v_t} + \\epsilon), &\\text{otherwise} \\end{cases}$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, uses the nesterov update.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "m", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "v", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "beta1_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta1", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyAddSign", + "summary": "Update '*var' according to the AddSign update.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- (alpha + sign_decay * sign(g) *sign(m)) * g\nvariable <- variable - lr_t * update", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "m", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "alpha", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "sign_decay", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyCenteredRMSProp", + "summary": "Update '*var' according to the centered RMSProp algorithm.", + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mg", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "description": "Momentum Scale. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyFtrl", + "summary": "Update '*var' according to the Ftrl-proximal scheme.", + "description": "accum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyFtrlV2", + "summary": "Update '*var' according to the Ftrl-proximal scheme.", + "description": "grad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad * grad\nlinear += grad_with_shrinkage -\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 shrinkage regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyGradientDescent", + "summary": "Update '*var' by subtracting 'alpha' * 'delta' from it.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "delta", + "description": "The change.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyMomentum", + "summary": "Update '*var' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyPowerSign", + "summary": "Update '*var' according to the AddSign update.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g\nvariable <- variable - lr_t * update", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "m", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "logbase", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "sign_decay", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyProximalAdagrad", + "summary": "Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.", + "description": "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyProximalGradientDescent", + "summary": "Update '*var' as FOBOS algorithm with fixed learning rate.", + "description": "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "delta", + "description": "The change.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApplyRMSProp", + "summary": "Update '*var' according to the RMSProp algorithm.", + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ApproxTopK", + "summary": "Returns min/max k values and their indices of the input operand in an approximate manner.", + "description": "See https://arxiv.org/abs/2206.14286 for the algorithm details.\nThis op is only optimized on TPU currently.", + "attributes": [ + { + "name": "k", + "type": "int64", + "description": "Specifies the number of min/max-k.", + "minimum": 0 + }, + { + "name": "reduction_dimension", + "type": "int64", + "description": "Integer dimension along which to search. Default: -1.", + "default": -1 + }, + { + "name": "recall_target", + "type": "float32", + "description": "Recall target for the approximation. Range in (0,1]", + "default": 0.949999988079071 + }, + { + "name": "is_max_k", + "type": "boolean", + "description": "When true, computes max-k; otherwise computes min-k.", + "default": true + }, + { + "name": "reduction_input_size_override", + "type": "int64", + "description": "When set to a positive value, it overrides the size determined by\n`input[reduction_dim]` for evaluating the recall. This option is useful when\nthe given `input` is only a subset of the overall computation in SPMD or\ndistributed pipelines, where the true input size cannot be deferred by the\n`input` shape.", + "default": -1 + }, + { + "name": "aggregate_to_topk", + "type": "boolean", + "description": "When true, aggregates approximate results to top-k. When false, returns the\napproximate results. The number of the approximate results is implementation\ndefined and is greater equals to the specified `k`.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Array to search. Must be at least 1-D of the floating type", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "values", + "description": "The min/max k values along the `reduction_dimension` of the `input` operand.\nThe dimension are the same as the `input` operand except for the\n`reduction_dimension`: when `aggregate_to_topk` is true, the reduction\ndimension is `k`; otherwise, it is greater equals to `k` where the size is\nimplementation-defined.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "The indices of `values` along the `reduction_dimension` of the `input` operand.", + "type": 3 + } + ] + }, + { + "name": "ApproximateEqual", + "summary": "Returns the truth value of abs(x-y) < tolerance element-wise.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "tolerance", + "type": "float32", + "default": 9.999999747378752e-06 + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "ArgMax", + "summary": "Returns the index with the largest value across dimensions of a tensor.", + "description": "Note that in case of ties the identity of the return value is not guaranteed.\n\nUsage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmax(input = a)\n c = tf.keras.backend.eval(b)\n # c = 4\n # here a[4] = 166.32 which is the largest element of a across axis 0\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`, `bool`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "output_type", + "type": "type", + "description": "Must be one of the following: `int16`, `uint16`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "dimension", + "description": "int16, int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_type" + } + ] + }, + { + "name": "ArgMin", + "summary": "Returns the index with the smallest value across dimensions of a tensor.", + "description": "Note that in case of ties the identity of the return value is not guaranteed.\n\nUsage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmin(input = a)\n c = tf.keras.backend.eval(b)\n # c = 0\n # here a[0] = 1 which is the smallest element of a across axis 0\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`, `bool`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "output_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "dimension", + "description": "int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_type" + } + ] + }, + { + "name": "AsString", + "summary": "Converts each entry in the given tensor to strings.", + "description": "Supports many numeric types and boolean.\n\nFor Unicode, see the\n[https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)\ntutorial.\n\nExamples:\n\n>>> tf.strings.as_string([3, 2])\n\n>>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()\narray([b'3.14', b'2.72'], dtype=object)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`, `variant`, `string`." + }, + { + "name": "precision", + "type": "int64", + "description": "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1.", + "default": -1 + }, + { + "name": "scientific", + "type": "boolean", + "description": "Use scientific notation for floating point numbers.", + "default": false + }, + { + "name": "shortest", + "type": "boolean", + "description": "Use shortest representation (either scientific or standard) for\nfloating point numbers.", + "default": false + }, + { + "name": "width", + "type": "int64", + "description": "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1.", + "default": -1 + }, + { + "name": "fill", + "type": "string", + "description": "The value to pad if width > -1. If empty, pads with spaces.\nAnother typical value is '0'. String cannot be longer than 1 character.", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "Asin", + "summary": "Computes the trignometric inverse sine of x element-wise.", + "description": "The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that\nif `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.\n\n**Note**: The output of `tf.math.asin` will lie within the invertible range\nof sine, i.e [-pi/2, pi/2].\n\nFor example:\n\n```python\n# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]\nx = tf.constant([1.047, 0.785])\ny = tf.math.sin(x) # [0.8659266, 0.7068252]\n\ntf.math.asin(y) # [1.047, 0.785] = x\n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Asinh", + "summary": "Computes inverse hyperbolic sine of x element-wise.", + "description": " Given an input tensor, this function computes inverse hyperbolic sine\n for every element in the tensor. Both input and output has a range of\n `[-inf, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Assert", + "summary": "Asserts that the given condition is true.", + "description": "If `condition` evaluates to false, print the list of tensors in `data`.\n`summarize` determines how many entries of the tensors to print.", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 1 + }, + { + "name": "summarize", + "type": "int64", + "description": "Print this many entries of each tensor.", + "default": 3 + } + ], + "inputs": [ + { + "name": "condition", + "description": "The condition to evaluate.", + "type": 10 + }, + { + "name": "data", + "description": "The tensors to print out when condition is false.", + "typeListAttr": "T" + } + ] + }, + { + "name": "AssertCardinalityDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "cardinality", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "AssertNextDataset", + "summary": "A transformation that asserts which transformations happen next.", + "description": "This transformation checks whether the camel-case names (i.e. \"FlatMap\", not\n\"flat_map\") of the transformations following this transformation match the list\nof names in the `transformations` argument. If there is a mismatch, the\ntransformation raises an exception.\n\nThe check occurs when iterating over the contents of the dataset, which\nmeans that the check happens *after* any static optimizations are applied\nto the dataset graph.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.\n`AssertNextDataset` passes through the outputs of its input dataset.", + "type": 21 + }, + { + "name": "transformations", + "description": "A `tf.string` vector `tf.Tensor` identifying the transformations that are\nexpected to happen next.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "AssertPrevDataset", + "summary": "A transformation that asserts which transformations happened previously.", + "description": "This transformation checks the names and, optionally, the attribute name-value\npairs in the `transformations` argument against those of the transformations\nthat preceded this transformation. If there is a mismatch, the transformation\nraises an exception.\n\nThe check occurs when iterating over the contents of the dataset, which\nmeans that the check happens *after* any static optimizations are applied\nto the dataset graph.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.\n`AssertPrevDataset` passes through the outputs of its input dataset.", + "type": 21 + }, + { + "name": "transformations", + "description": "A `tf.string` vector `tf.Tensor` identifying the transformations, with optional\nattribute name-value pairs, that are expected to have happened previously.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Assign", + "category": "Control", + "summary": "Update 'ref' by assigning 'value' to it.", + "description": "This operation outputs \"ref\" after the assignment is done.\nThis makes it easier to chain operations that need to use the reset value.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "validate_shape", + "type": "boolean", + "description": "If true, the operation will validate that the shape\nof 'value' matches the shape of the Tensor being assigned to. If false,\n'ref' will take on the shape of 'value'.", + "default": true + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node. May be uninitialized.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "value", + "description": "The value to be assigned to the variable.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been reset.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "AssignAdd", + "summary": "Update 'ref' by adding 'value' to it.", + "description": "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "value", + "description": "The value to be added to the variable.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "AssignAddVariableOp", + "summary": "Adds a value to the current value of a variable.", + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to\nsee the incremented value or a subsequent newer one.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "the dtype of the value." + } + ], + "inputs": [ + { + "name": "resource", + "description": "handle to the resource in which to store the variable.", + "type": 20 + }, + { + "name": "value", + "description": "the value by which the variable will be incremented.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "AssignSub", + "summary": "Update 'ref' by subtracting 'value' from it.", + "description": "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "value", + "description": "The value to be subtracted to the variable.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "AssignSubVariableOp", + "summary": "Subtracts a value from the current value of a variable.", + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to\nsee the decremented value or a subsequent newer one.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "the dtype of the value." + } + ], + "inputs": [ + { + "name": "resource", + "description": "handle to the resource in which to store the variable.", + "type": 20 + }, + { + "name": "value", + "description": "the value by which the variable will be incremented.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "AssignVariableOp", + "summary": "Assigns a new value to a variable.", + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to return\nthis value or a subsequent newer value of the variable.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "the dtype of the value." + }, + { + "name": "validate_shape", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "resource", + "description": "handle to the resource in which to store the variable.", + "type": 20 + }, + { + "name": "value", + "description": "the value to set the new tensor to use.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "AssignVariableXlaConcatND", + "summary": "Concats input tensor across all dimensions.", + "description": "An op which merges slices the input tensor based on the given num_splits\nattribute, strips paddings optionally, and writes the merged tensor without\npaddings to the resource variable.\n\nThis op may be generated via the TPU bridge.\n\nFor example, with `input` tensor:\n```\n[[0, 1],\n [4, 5]]\n[[2, 3],\n [6, 7]]\n[[8, 9],\n [12, 13]]\n[[10, 11],\n [14, 15]]\n```\n`num_splits`:\n```\n[2, 2]\n```\nand `paddings`:\n```\n[1, 1]\n```\nthe expected `outputs` is:\n```\n[[0, 1, 2],\n [4, 5, 6],\n [8, 9, 10]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_concats", + "type": "int64[]", + "description": "Number of ways to merge per dimension." + }, + { + "name": "paddings", + "type": "int64[]", + "description": "Optional list of right paddings per dimension to strip from the final merged\ntensor. These paddings must not exceed the dimension size of the merged result\nprior to stripping paddings.", + "default": [] + } + ], + "inputs": [ + { + "name": "resource", + "description": "Resource variable for concatenated input tensors across all dimensions.\n }\n in_arg {\n name: \"inputs\"\n description: <>> x = [1., 1.]\n>>> y = [1., -1.]\n>>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy())\n[ 45. -45.]\n\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Atanh", + "summary": "Computes inverse hyperbolic tangent of x element-wise.", + "description": " Given an input tensor, this function computes inverse hyperbolic tangent\n for every element in the tensor. Input range is `[-1,1]` and output range is\n `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the\n input is `1`, output will be `inf`. Values outside the range will have\n `nan` as output.\n\n ```python\n x = tf.constant([-float(\"inf\"), -1, -0.5, 1, 0, 0.5, 10, float(\"inf\")])\n tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "AudioSpectrogram", + "summary": "Produces a visualization of audio data over time.", + "description": "Spectrograms are a standard way of representing audio information as a series of\nslices of frequency information, one slice for each window of time. By joining\nthese together into a sequence, they form a distinctive fingerprint of the sound\nover time.\n\nThis op expects to receive audio data as an input, stored as floats in the range\n-1 to 1, together with a window width in samples, and a stride specifying how\nfar to move the window between slices. From this it generates a three\ndimensional output. The first dimension is for the channels in the input, so a\nstereo audio input would have two here for example. The second dimension is time,\nwith successive frequency slices. The third dimension has an amplitude value for\neach frequency during that time slice.\n\nThis means the layout when converted and saved as an image is rotated 90 degrees\nclockwise from a typical spectrogram. Time is descending down the Y axis, and\nthe frequency decreases from left to right.\n\nEach value in the result represents the square root of the sum of the real and\nimaginary parts of an FFT on the current window of samples. In this way, the\nlowest dimension represents the power of each frequency in the current window,\nand adjacent windows are concatenated in the next dimension.\n\nTo get a more intuitive and visual look at what this operation does, you can run\ntensorflow/examples/wav_to_spectrogram to read in an audio file and save out the\nresulting spectrogram as a PNG image.", + "attributes": [ + { + "name": "window_size", + "type": "int64", + "description": "How wide the input window is in samples. For the highest efficiency\nthis should be a power of two, but other values are accepted." + }, + { + "name": "stride", + "type": "int64", + "description": "How widely apart the center of adjacent sample windows should be." + }, + { + "name": "magnitude_squared", + "type": "boolean", + "description": "Whether to return the squared magnitude or just the\nmagnitude. Using squared magnitude can avoid extra calculations.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Float representation of audio data.", + "type": 1 + } + ], + "outputs": [ + { + "name": "spectrogram", + "description": "3D representation of the audio frequencies as an image.", + "type": 1 + } + ] + }, + { + "name": "AudioSummary", + "summary": "Outputs a `Summary` protocol buffer with audio.", + "description": "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n* If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.", + "attributes": [ + { + "name": "sample_rate", + "type": "float32", + "description": "The sample rate of the signal in hertz." + }, + { + "name": "max_outputs", + "type": "int64", + "description": "Max number of batch elements to generate audio for.", + "minimum": 1, + "default": 3 + } + ], + "inputs": [ + { + "name": "tag", + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "type": 7 + }, + { + "name": "tensor", + "description": "2-D of shape `[batch_size, frames]`.", + "type": 1 + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "AudioSummaryV2", + "summary": "Outputs a `Summary` protocol buffer with audio.", + "description": "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n* If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.", + "attributes": [ + { + "name": "max_outputs", + "type": "int64", + "description": "Max number of batch elements to generate audio for.", + "minimum": 1, + "default": 3 + } + ], + "inputs": [ + { + "name": "tag", + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "type": 7 + }, + { + "name": "tensor", + "description": "2-D of shape `[batch_size, frames]`.", + "type": 1 + }, + { + "name": "sample_rate", + "description": "The sample rate of the signal in hertz.", + "type": 1 + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "AutoShardDataset", + "summary": "Creates a dataset that shards the input dataset.", + "description": "Creates a dataset that shards the input dataset by num_workers, returning a\nsharded dataset for the index-th worker. This attempts to automatically shard\na dataset by examining the Dataset graph and inserting a shard op before the\ninputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).\n\nThis dataset will throw a NotFound error if we cannot shard the dataset\nautomatically.", + "attributes": [ + { + "name": "auto_shard_policy", + "type": "int64", + "default": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "num_replicas", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "num_workers", + "description": "A scalar representing the number of workers to distribute this dataset across.", + "type": 9 + }, + { + "name": "index", + "description": "A scalar representing the index of the current worker out of num_workers.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "AvgPool", + "category": "Pool", + "summary": "Performs average pooling on the input.", + "description": "Each entry in `output` is the mean of the corresponding size `ksize`\nwindow in `value`.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the sliding window for each dimension of `value`.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of `value`.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "value", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The average pooled output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "AvgPool3D", + "summary": "Performs 3D average pooling on the input.", + "description": "Each entry in `output` is the mean of the corresponding size `ksize` window in\n`value`.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, depth, rows, cols, channels]` tensor to pool over.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The average pooled output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "AvgPool3DGrad", + "summary": "Computes gradients of average pooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "orig_input_shape", + "description": "The original input dimensions.", + "type": 3 + }, + { + "name": "grad", + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The backprop for input.", + "typeAttr": "T" + } + ] + }, + { + "name": "AvgPoolGrad", + "summary": "Computes gradients of the average pooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the sliding window for each dimension of the input.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "orig_input_shape", + "description": "1-D. Shape of the original input to `avg_pool`.", + "type": 3 + }, + { + "name": "grad", + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.\nthe output of `avg_pool`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D. Gradients w.r.t. the input of `avg_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "BandedTriangularSolve", + "attributes": [ + { + "name": "lower", + "type": "boolean", + "default": true + }, + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Barrier", + "summary": "Defines a barrier that persists across different graph executions.", + "description": "A barrier represents a key-value map, where each key is a string, and\neach value is a tuple of tensors.\n\nAt runtime, the barrier contains 'complete' and 'incomplete'\nelements. A complete element has defined tensors for all components of\nits value tuple, and may be accessed using BarrierTakeMany. An\nincomplete element has some undefined components in its value tuple,\nand may be updated using BarrierInsertMany.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The capacity of the barrier. The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this barrier will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the barrier.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "BarrierClose", + "summary": "Closes the given barrier.", + "description": "This operation signals that no more new elements will be inserted in the\ngiven barrier. Subsequent InsertMany that try to introduce a new key will fail.\nSubsequent InsertMany operations that just add missing components to already\nexisting elements will continue to succeed. Subsequent TakeMany operations will\ncontinue to succeed if sufficient completed elements remain in the barrier.\nSubsequent TakeMany operations that would block will fail immediately.", + "attributes": [ + { + "name": "cancel_pending_enqueues", + "type": "boolean", + "description": "If true, all pending enqueue requests that are\nblocked on the barrier's queue will be canceled. InsertMany will fail, even\nif no new key is introduced.", + "default": false + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a barrier.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "BarrierIncompleteSize", + "summary": "Computes the number of incomplete elements in the given barrier.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a barrier.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "size", + "description": "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier.", + "type": 3 + } + ] + }, + { + "name": "BarrierInsertMany", + "summary": "For each key, assigns the respective value to the specified component.", + "description": "If a key is not found in the barrier, this operation will create a new\nincomplete element. If a key is found in the barrier, and the element\nalready has a value at component_index, this operation will fail with\nINVALID_ARGUMENT, and leave the barrier in an undefined state.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "component_index", + "type": "int64", + "description": "The component of the barrier elements that is being assigned." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a barrier.", + "type": 7, + "isRef": true + }, + { + "name": "keys", + "description": "A one-dimensional tensor of keys, with length n.", + "type": 7 + }, + { + "name": "values", + "description": "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n.", + "typeAttr": "T" + } + ] + }, + { + "name": "BarrierReadySize", + "summary": "Computes the number of complete elements in the given barrier.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a barrier.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "size", + "description": "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier.", + "type": 3 + } + ] + }, + { + "name": "BarrierTakeMany", + "summary": "Takes the given number of completed elements from a barrier.", + "description": "This operation concatenates completed-element component tensors along\nthe 0th dimension to make a single component tensor.\n\nElements come out of the barrier when they are complete, and in the order\nin which they were placed into the barrier. The indices output provides\ninformation about the batch in which each element was originally inserted\ninto the barrier.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "allow_small_batch", + "type": "boolean", + "description": "Allow to return less than num_elements items if barrier is\nalready closed.", + "default": false + }, + { + "name": "wait_for_incomplete", + "type": "boolean", + "default": false + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a barrier.", + "type": 7, + "isRef": true + }, + { + "name": "num_elements", + "description": "A single-element tensor containing the number of elements to\ntake.", + "type": 3 + } + ], + "outputs": [ + { + "name": "indices", + "description": "A one-dimensional tensor of indices, with length num_elems.\nThese indices refer to the batch in which the values were placed into the\nbarrier (starting with MIN_LONG and increasing with each BarrierInsertMany).", + "type": 9 + }, + { + "name": "keys", + "description": "A one-dimensional tensor of keys, with length num_elements.", + "type": 7 + }, + { + "name": "values", + "description": "One any-dimensional tensor per component in a barrier element. All\nvalues have length num_elements in the 0th dimension.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "Batch", + "summary": "Batches all input tensors nondeterministically.", + "description": "When many instances of this Op are being run concurrently with the same\ncontainer/shared_name in the same device, some will output zero-shaped Tensors\nand others will output Tensors of size up to max_batch_size.\n\nAll Tensors in in_tensors are batched together (so, for example, labels and\nfeatures should be batched with a single instance of this operation.\n\nEach invocation of batch emits an `id` scalar which will be used to identify\nthis particular invocation when doing unbatch or its gradient.\n\nEach op which emits a non-empty batch will also emit a non-empty batch_index\nTensor, which, is a [K, 3] matrix where each row contains the invocation's id,\nstart, and length of elements of each set of Tensors present in batched_tensors.\n\nBatched tensors are concatenated along the first dimension, and all tensors in\nin_tensors must have the first dimension of the same size.\n\nin_tensors: The tensors to be batched.\nnum_batch_threads: Number of scheduling threads for processing batches of work.\n Determines the number of batches processed in parallel.\nmax_batch_size: Batch sizes will never be bigger than this.\nbatch_timeout_micros: Maximum number of microseconds to wait before outputting\n an incomplete batch.\nallowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does\n nothing. Otherwise, supplies a list of batch sizes, causing the op to pad\n batches up to one of those sizes. The entries must increase monotonically, and\n the final entry must equal max_batch_size.\ngrad_timeout_micros: The timeout to use for the gradient. See Unbatch.\nbatched_tensors: Either empty tensors or a batch of concatenated Tensors.\nbatch_index: If out_tensors is non-empty, has information to invert it.\ncontainer: Controls the scope of sharing of this batch.\nid: always contains a scalar with a unique ID for this invocation of Batch.\nshared_name: Concurrently running instances of batch in the same device with the\n same container and shared_name will batch their elements together. If left\n empty, the op name will be used as the shared name.\nT: the types of tensors to be batched.", + "attributes": [ + { + "name": "num_batch_threads", + "type": "int64" + }, + { + "name": "max_batch_size", + "type": "int64" + }, + { + "name": "max_enqueued_batches", + "type": "int64", + "default": 10 + }, + { + "name": "batch_timeout_micros", + "type": "int64" + }, + { + "name": "allowed_batch_sizes", + "type": "int64[]", + "default": [] + }, + { + "name": "grad_timeout_micros", + "type": "int64" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + }, + { + "name": "batching_queue", + "type": "string", + "default": "" + }, + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "in_tensors", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "batched_tensors", + "typeListAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "id", + "type": 9 + } + ] + }, + { + "name": "BatchCholesky", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchCholeskyGrad", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "l", + "typeAttr": "T" + }, + { + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchDataset", + "summary": "Creates a dataset that batches `batch_size` elements from `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "BatchDatasetV2", + "summary": "Creates a dataset that batches `batch_size` elements from `input_dataset`.", + "attributes": [ + { + "name": "parallel_copy", + "type": "boolean", + "default": false + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a batch.", + "type": 9 + }, + { + "name": "drop_remainder", + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "BatchFFT", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchFFT2D", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchFFT3D", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchFunction", + "summary": "Batches all the inputs tensors to the computation done by the function.", + "description": "So, for example, in the following code\n\n ```python\n\n # This input will be captured.\n y = tf.placeholder_with_default(1.0, shape=[])\n\n @tf.Defun(tf.float32)\n def computation(a):\n return tf.matmul(a, a) + y\n\n b = gen_batch_ops.batch_function(\n f=computation\n in_tensors=[a],\n captured_tensors=computation.captured_inputs,\n Tout=[o.type for o in computation.definition.signature.output_arg],\n num_batch_threads=1,\n max_batch_size=10,\n batch_timeout_micros=100000, # 100ms\n allowed_batch_sizes=[3, 10],\n batching_queue=\"\")\n ```\n\nIf more than one session.run call is simultaneously trying to compute `b`\nthe values of `a` will be gathered, non-deterministically concatenated\nalong the first axis, and only one thread will run the computation.\n\nAssumes that all arguments of the function are Tensors which will be batched\nalong their first dimension.\n\nArguments that are captured, are not batched. The session.run call which does\nthe concatenation, will use the values of the captured tensors available to it.\nTherefore, typical uses of captured tensors should involve values which remain\nunchanged across session.run calls. Inference is a good example of this.\n\nSparseTensor is not supported. The return value of the decorated function\nmust be a Tensor or a list/tuple of Tensors.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "num_batch_threads", + "type": "int64", + "description": "Number of scheduling threads for processing batches of work.\nDetermines the number of batches processed in parallel." + }, + { + "name": "max_batch_size", + "type": "int64", + "description": "Batch sizes will never be bigger than this." + }, + { + "name": "batch_timeout_micros", + "type": "int64", + "description": "Maximum number of microseconds to wait before outputting\nan incomplete batch." + }, + { + "name": "max_enqueued_batches", + "type": "int64", + "description": "Maximum number of batches enqueued. Default: 10.", + "default": 10 + }, + { + "name": "allowed_batch_sizes", + "type": "int64[]", + "description": "Optional list of allowed batch sizes. If left empty, does\nnothing. Otherwise, supplies a list of batch sizes, causing the op to pad\nbatches up to one of those sizes. The entries must increase monotonically.\nIf enable_large_batch_splitting is false (i.e., large-input-split is not\nenabled) the final entry must equal max_batch_size.", + "default": [] + }, + { + "name": "container", + "type": "string", + "description": "Controls the scope of sharing of this batch.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "Concurrently running instances of batch in the same device with the\nsame container and shared_name will batch their elements together. If left\nempty, the op name will be used as the shared name.", + "default": "" + }, + { + "name": "batching_queue", + "type": "string", + "default": "" + }, + { + "name": "low_priority_max_batch_size", + "type": "int64", + "default": 0 + }, + { + "name": "low_priority_batch_timeout_micros", + "type": "int64", + "default": 0 + }, + { + "name": "low_priority_allowed_batch_sizes", + "type": "int64[]", + "default": [] + }, + { + "name": "low_priority_max_enqueued_batches", + "type": "int64", + "default": 0 + }, + { + "name": "Tin", + "type": "type[]", + "description": "the types of tensors to be batched.", + "minimum": 1 + }, + { + "name": "Tcaptured", + "type": "type[]", + "description": "the types of the captured tensors.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "the types of the output tensors.", + "minimum": 1 + }, + { + "name": "enable_large_batch_splitting", + "type": "boolean", + "description": "input with a large size (i.e., larger than the largest value of\n`allowed_batch_sizes`) will be splitted into multiple batches with batch size.", + "default": false + } + ], + "inputs": [ + { + "name": "in_tensors", + "description": "The tensors to be batched.", + "typeListAttr": "Tin" + }, + { + "name": "captured_tensors", + "description": "The tensors which are captured in the function, and don't need\nto be batched.", + "typeListAttr": "Tcaptured" + } + ], + "outputs": [ + { + "name": "out_tensors", + "description": "The output tensors.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "BatchIFFT", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchIFFT2D", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchIFFT3D", + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + }, + { + "name": "BatchMatMul", + "summary": "Multiplies slices of two tensors in batches.", + "description": "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n r_o = c_x if adj_x else r_x\n c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`." + }, + { + "name": "adj_x", + "type": "boolean", + "description": "If `True`, adjoint the slices of `x`. Defaults to `False`.", + "default": false + }, + { + "name": "adj_y", + "type": "boolean", + "description": "If `True`, adjoint the slices of `y`. Defaults to `False`.", + "default": false + }, + { + "name": "grad_x", + "type": "boolean", + "default": false + }, + { + "name": "grad_y", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "x", + "description": "2-D or higher with shape `[..., r_x, c_x]`.", + "typeAttr": "T" + }, + { + "name": "y", + "description": "2-D or higher with shape `[..., r_y, c_y]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "3-D or higher with shape `[..., r_o, c_o]`", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatMulV2", + "summary": "Multiplies slices of two tensors in batches.", + "description": "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n r_o = c_x if adj_x else r_x\n c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])\n\n*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More\nabout broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`." + }, + { + "name": "adj_x", + "type": "boolean", + "description": "If `True`, adjoint the slices of `x`. Defaults to `False`.", + "default": false + }, + { + "name": "adj_y", + "type": "boolean", + "description": "If `True`, adjoint the slices of `y`. Defaults to `False`.", + "default": false + }, + { + "name": "grad_x", + "type": "boolean", + "default": false + }, + { + "name": "grad_y", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "x", + "description": "2-D or higher with shape `[..., r_x, c_x]`.", + "typeAttr": "T" + }, + { + "name": "y", + "description": "2-D or higher with shape `[..., r_y, c_y]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "3-D or higher with shape `[..., r_o, c_o]`", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatMulV3", + "summary": "Multiplies slices of two tensors in batches.", + "description": "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n r_o = c_x if adj_x else r_x\n c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])\n\n*NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More\nabout broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n", + "attributes": [ + { + "name": "Ta", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + }, + { + "name": "Tb", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + }, + { + "name": "Tout", + "type": "type", + "description": "If not spcified, Tout is the same type to input type. Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + }, + { + "name": "adj_x", + "type": "boolean", + "description": "If `True`, adjoint the slices of `x`. Defaults to `False`.", + "default": false + }, + { + "name": "adj_y", + "type": "boolean", + "description": "If `True`, adjoint the slices of `y`. Defaults to `False`.", + "default": false + }, + { + "name": "grad_x", + "type": "boolean", + "default": false + }, + { + "name": "grad_y", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "x", + "description": "2-D or higher with shape `[..., r_x, c_x]`.", + "typeAttr": "Ta" + }, + { + "name": "y", + "description": "2-D or higher with shape `[..., r_y, c_y]`.", + "typeAttr": "Tb" + } + ], + "outputs": [ + { + "name": "output", + "description": "3-D or higher with shape `[..., r_o, c_o]`", + "typeAttr": "Tout" + } + ] + }, + { + "name": "BatchMatrixBandPart", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "num_lower", + "type": 9 + }, + { + "name": "num_upper", + "type": 9 + } + ], + "outputs": [ + { + "name": "band", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixDeterminant", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixDiag", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixDiagPart", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixInverse", + "attributes": [ + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixSetDiag", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixSolve", + "attributes": [ + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixSolveLs", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + }, + { + "name": "fast", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + }, + { + "name": "l2_regularizer", + "type": 2 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchMatrixTriangularSolve", + "attributes": [ + { + "name": "lower", + "type": "boolean", + "default": true + }, + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchNormWithGlobalNormalization", + "category": "Normalization", + "summary": "Batch normalization.", + "description": "This op is deprecated. Prefer `tf.nn.batch_normalization`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "variance_epsilon", + "type": "float32", + "description": "A small float number to avoid dividing by 0." + }, + { + "name": "scale_after_normalization", + "type": "boolean", + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." + } + ], + "inputs": [ + { + "name": "t", + "description": "A 4D input Tensor.", + "typeAttr": "T" + }, + { + "name": "m", + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "T" + }, + { + "name": "v", + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor.", + "typeAttr": "T" + }, + { + "name": "gamma", + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "result", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchNormWithGlobalNormalizationGrad", + "summary": "Gradients for batch normalization.", + "description": "This op is deprecated. See `tf.nn.batch_normalization`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "variance_epsilon", + "type": "float32", + "description": "A small float number to avoid dividing by 0." + }, + { + "name": "scale_after_normalization", + "type": "boolean", + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." + } + ], + "inputs": [ + { + "name": "t", + "description": "A 4D input Tensor.", + "typeAttr": "T" + }, + { + "name": "m", + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "T" + }, + { + "name": "v", + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "T" + }, + { + "name": "gamma", + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor.", + "typeAttr": "T" + }, + { + "name": "backprop", + "description": "4D backprop Tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "dx", + "description": "4D backprop tensor for input.", + "typeAttr": "T" + }, + { + "name": "dm", + "description": "1D backprop tensor for mean.", + "typeAttr": "T" + }, + { + "name": "dv", + "description": "1D backprop tensor for variance.", + "typeAttr": "T" + }, + { + "name": "db", + "description": "1D backprop tensor for beta.", + "typeAttr": "T" + }, + { + "name": "dg", + "description": "1D backprop tensor for gamma.", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchSelfAdjointEig", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchSelfAdjointEigV2", + "attributes": [ + { + "name": "compute_v", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "e", + "typeAttr": "T" + }, + { + "name": "v", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchSvd", + "attributes": [ + { + "name": "compute_uv", + "type": "boolean", + "default": true + }, + { + "name": "full_matrices", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "s", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "v", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchToSpace", + "summary": "BatchToSpace for 4-D tensors of type T.", + "description": "This is a legacy version of the more general BatchToSpaceND.\n\nRearranges (permutes) data from batch into blocks of spatial data, followed by\ncropping. This is the reverse transformation of SpaceToBatch. More specifically,\nthis op outputs a copy of the input tensor where values from the `batch`\ndimension are moved in spatial blocks to the `height` and `width` dimensions,\nfollowed by cropping along the `height` and `width` dimensions.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "block_size", + "type": "int64", + "minimum": 2 + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`.", + "typeAttr": "T" + }, + { + "name": "crops", + "description": "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n crops = [[crop_top, crop_bottom], [crop_left, crop_right]]", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, height, width, depth]`, where:\n\n height = height_pad - crop_top - crop_bottom\n width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [3]], [[5], [7]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```", + "typeAttr": "T" + } + ] + }, + { + "name": "BatchToSpaceND", + "summary": "BatchToSpace for N-D tensors of type T.", + "description": "This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of shape\n`block_shape + [batch]`, interleaves these blocks back into the grid defined by\nthe spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as\nthe input. The spatial dimensions of this intermediate result are then\noptionally cropped according to `crops` to produce the output. This is the\nreverse of SpaceToBatch. See below for a precise description.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tblock_shape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tcrops", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions.", + "typeAttr": "T" + }, + { + "name": "block_shape", + "description": "1-D with shape `[M]`, all values must be >= 1.", + "typeAttr": "Tblock_shape" + }, + { + "name": "crops", + "description": "2-D with shape `[M, 2]`, all values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n dimension `i + 1`, which corresponds to spatial dimension `i`. It is\n required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n [block_shape[0], ..., block_shape[M-1],\n batch / prod(block_shape),\n input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1], block_shape[0],\n ...,\n input_shape[M], block_shape[M-1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0],\n ...,\n input_shape[M] * block_shape[M-1],\n\n input_shape[M+1],\n ...,\n input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output of shape:\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n ...,\n input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [2, 0]]`:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```", + "typeAttr": "Tcrops" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselI0", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselI0e", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselI1", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselI1e", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselJ0", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselJ1", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselK0", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselK0e", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselK1", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselK1e", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselY0", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "BesselY1", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Betainc", + "summary": "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\).", + "description": "The regularized incomplete beta integral is defined as:\n\n\n\\\\(I_x(a, b) = \\frac{B(x; a, b)}{B(a, b)}\\\\)\n\nwhere\n\n\n\\\\(B(x; a, b) = \\int_0^x t^{a-1} (1 - t)^{b-1} dt\\\\)\n\n\nis the incomplete beta function and \\\\(B(a, b)\\\\) is the *complete*\nbeta function.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "b", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "BiasAdd", + "category": "Layer", + "summary": "Adds `bias` to `value`.", + "description": "This is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "value", + "description": "Any number of dimensions.", + "typeAttr": "T" + }, + { + "name": "bias", + "description": "1-D with size the last dimension of `value`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Broadcasted sum of `value` and `bias`.", + "typeAttr": "T" + } + ] + }, + { + "name": "BiasAddGrad", + "summary": "The backward operation for \"BiasAdd\" on the \"bias\" tensor.", + "description": "It accumulates all the values from out_backprop into the feature dimension.\nFor NHWC data format, the feature dimension is the last. For NCHW data format,\nthe feature dimension is the third-to-last.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "out_backprop", + "description": "Any number of dimensions.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D with size the feature dimension of `out_backprop`.", + "typeAttr": "T" + } + ] + }, + { + "name": "BiasAddV1", + "summary": "Adds `bias` to `value`.", + "description": "This is a deprecated version of BiasAdd and will be soon removed.\n\nThis is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "value", + "description": "Any number of dimensions.", + "typeAttr": "T" + }, + { + "name": "bias", + "description": "1-D with size the last dimension of `value`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Broadcasted sum of `value` and `bias`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Bincount", + "summary": "Counts the number of occurrences of each value in an integer array.", + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "arr", + "description": "int32 `Tensor`.", + "type": 3 + }, + { + "name": "size", + "description": "non-negative int32 scalar `Tensor`.", + "type": 3 + }, + { + "name": "weights", + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "bins", + "description": "1D `Tensor` with length equal to `size`. The counts or summed weights for\neach value in the range [0, size).", + "typeAttr": "T" + } + ] + }, + { + "name": "Bitcast", + "summary": "Bitcasts a tensor from one type to another without copying data.", + "description": "Given a tensor `input`, this operation returns a tensor that has the same buffer\ndata as `input` with datatype `type`.\n\nIf the input datatype `T` is larger than the output datatype `type` then the\nshape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\n\nIf `T` is smaller than `type`, the operator requires that the rightmost\ndimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\n[..., sizeof(`type`)/sizeof(`T`)] to [...].\n\ntf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype\n(e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()\ngives module error.\nFor example,\n\nExample 1:\n\n>>> a = [1., 2., 3.]\n>>> equality_bitcast = tf.bitcast(a, tf.complex128)\nTraceback (most recent call last):\n...\nInvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]\n>>> equality_cast = tf.cast(a, tf.complex128)\n>>> print(equality_cast)\ntf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)\n\nExample 2:\n\n>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)\n\n\nExample 3:\n\n>>> x = [1., 2., 3.]\n>>> y = [0., 2., 3.]\n>>> equality= tf.equal(x,y)\n>>> equality_cast = tf.cast(equality,tf.float32)\n>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)\n>>> print(equality)\ntf.Tensor([False True True], shape=(3,), dtype=bool)\n>>> print(equality_cast)\ntf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)\n>>> print(equality_bitcast)\ntf.Tensor(\n [[ 0 0 0 0]\n [ 0 0 128 63]\n [ 0 0 128 63]], shape=(3, 4), dtype=uint8)\n\n*NOTE*: Bitcast is implemented as a low-level cast, so machines with different\nendian orderings will give different results. A copy from input buffer to output\nbuffer is made on BE machines when types are of different sizes in order to get\nthe same casting results as on LE machines.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`." + }, + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "type" + } + ] + }, + { + "name": "BitwiseAnd", + "summary": "Elementwise computes the bitwise AND of `x` and `y`.", + "description": "The result will have those bits set, that are set in both `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_and(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "BitwiseOr", + "summary": "Elementwise computes the bitwise OR of `x` and `y`.", + "description": "The result will have those bits set, that are set in `x`, `y` or both. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_or(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "BitwiseXor", + "summary": "Elementwise computes the bitwise XOR of `x` and `y`.", + "description": "The result will have those bits set, that are different in `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([5, 5, 4, 5], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_xor(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "BlockLSTM", + "summary": "Computes the LSTM cell forward propagation for all the time steps.", + "description": "This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n```python\nfor x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\nreturn pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n```", + "attributes": [ + { + "name": "forget_bias", + "type": "float32", + "description": "The forget gate bias.", + "default": 1.0 + }, + { + "name": "cell_clip", + "type": "float32", + "description": "Value to clip the 'cs' value to.", + "default": 3.0 + }, + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether to use peephole weights.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "seq_len_max", + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "type": 9 + }, + { + "name": "x", + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "Value of the initial cell state.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "Initial output of cell (to be used for peephole).", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "i", + "description": "The input gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "h", + "description": "The output h vector over the whole time sequence.", + "typeAttr": "T" + } + ] + }, + { + "name": "BlockLSTMGrad", + "summary": "Computes the LSTM cell backward propagation for the entire time sequence.", + "description": "This implementation is to be used in conjunction of LSTMBlock.", + "attributes": [ + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether to use peephole weights." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "seq_len_max", + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "type": 9 + }, + { + "name": "x", + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "Value of the initial cell state.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "Initial output of cell (to be used for peephole).", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "The input gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "h", + "description": "The output h vector over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs_grad", + "description": "The current gradient of cs.", + "typeAttr": "T" + }, + { + "name": "h_grad", + "description": "The gradient of h vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "x_grad", + "description": "The gradient of x to be back-propped.", + "typeAttr": "T" + }, + { + "name": "cs_prev_grad", + "description": "The gradient of cs_prev to be back-propped.", + "typeAttr": "T" + }, + { + "name": "h_prev_grad", + "description": "The gradient of h_prev to be back-propped.", + "typeAttr": "T" + }, + { + "name": "w_grad", + "description": "The gradient for w to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wci_grad", + "description": "The gradient for wci to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wcf_grad", + "description": "The gradient for wcf to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wco_grad", + "description": "The gradient for wco to be back-propped.", + "typeAttr": "T" + }, + { + "name": "b_grad", + "description": "The gradient for w to be back-propped.", + "typeAttr": "T" + } + ] + }, + { + "name": "BlockLSTMGradV2", + "summary": "Computes the LSTM cell backward propagation for the entire time sequence.", + "description": "This implementation is to be used in conjunction of BlockLSTMV2.", + "attributes": [ + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether to use peephole weights." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "seq_len_max", + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "type": 9 + }, + { + "name": "x", + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "Value of the initial cell state.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "Initial output of cell (to be used for peephole).", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "The input gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "h", + "description": "The output h vector over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs_grad", + "description": "The current gradient of cs.", + "typeAttr": "T" + }, + { + "name": "h_grad", + "description": "The gradient of h vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "x_grad", + "description": "The gradient of x to be back-propped.", + "typeAttr": "T" + }, + { + "name": "cs_prev_grad", + "description": "The gradient of cs_prev to be back-propped.", + "typeAttr": "T" + }, + { + "name": "h_prev_grad", + "description": "The gradient of h_prev to be back-propped.", + "typeAttr": "T" + }, + { + "name": "w_grad", + "description": "The gradient for w to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wci_grad", + "description": "The gradient for wci to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wcf_grad", + "description": "The gradient for wcf to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wco_grad", + "description": "The gradient for wco to be back-propped.", + "typeAttr": "T" + }, + { + "name": "b_grad", + "description": "The gradient for w to be back-propped.", + "typeAttr": "T" + } + ] + }, + { + "name": "BlockLSTMV2", + "summary": "Computes the LSTM cell forward propagation for all the time steps.", + "description": "This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n```python\nfor x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\nreturn pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n\nNote that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,\nthis op uses IFCO. So in order for the following snippet to be equivalent\nall gate-related outputs should be reordered.\n```", + "attributes": [ + { + "name": "cell_clip", + "type": "float32", + "description": "Value to clip the 'cs' value to.", + "default": 0.0 + }, + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether to use peephole weights.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "seq_len_max", + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "type": 9 + }, + { + "name": "x", + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "Value of the initial cell state.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "Initial output of cell (to be used for peephole).", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "i", + "description": "The input gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh over the whole time sequence.", + "typeAttr": "T" + }, + { + "name": "h", + "description": "The output h vector over the whole time sequence.", + "typeAttr": "T" + } + ] + }, + { + "name": "BoostedTreesAggregateStats", + "summary": "Aggregates the summary of accumulated stats for the batch.", + "description": "The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.", + "attributes": [ + { + "name": "max_splits", + "type": "int64", + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1 + }, + { + "name": "num_buckets", + "type": "int64", + "description": "int; equals to the maximum possible value of bucketized feature.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "node_ids", + "description": "int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].", + "type": 3 + }, + { + "name": "gradients", + "description": "float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.", + "type": 1 + }, + { + "name": "hessians", + "description": "float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.", + "type": 1 + }, + { + "name": "feature", + "description": "int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "stats_summary", + "description": "output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])\ncontaining accumulated stats for each node, feature dimension and bucket.", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesBucketize", + "summary": "Bucketize each feature based on bucket boundaries.", + "description": "An op that returns a list of float tensors, where each tensor represents the\nbucketized values for a single feature.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "inferred int; number of features.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "float_values", + "description": "float; List of Rank 1 Tensor each containing float values for a single feature.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "bucket_boundaries", + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a single\nfeature.", + "numberAttr": "num_features", + "type": 1 + } + ], + "outputs": [ + { + "name": "buckets", + "description": "int; List of Rank 1 Tensors each containing the bucketized values for a single feature.", + "numberAttr": "num_features", + "type": 3 + } + ] + }, + { + "name": "BoostedTreesCalculateBestFeatureSplit", + "summary": "Calculates gains for each feature and returns the best possible split information for the feature.", + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "attributes": [ + { + "name": "logits_dimension", + "type": "int64", + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1 + }, + { + "name": "split_type", + "type": "string", + "description": "A string indicating if this Op should perform inequality split or equality split. Must be one of the following: `inequality`, `equality`.", + "default": "inequality" + } + ], + "inputs": [ + { + "name": "node_id_range", + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "type": 3 + }, + { + "name": "stats_summary", + "description": "A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.\nThe first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "type": 1 + }, + { + "name": "l1", + "description": "l1 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "l2", + "description": "l2 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "tree_complexity", + "description": "adjustment to the gain, per leaf based.", + "type": 1 + }, + { + "name": "min_node_weight", + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "type": 1 + } + ], + "outputs": [ + { + "name": "node_ids", + "description": "A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "gains", + "description": "A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "type": 1 + }, + { + "name": "feature_dimensions", + "description": "A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "thresholds", + "description": "A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "left_node_contribs", + "description": "A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "type": 1 + }, + { + "name": "right_node_contribs", + "description": "A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "type": 1 + }, + { + "name": "split_with_default_directions", + "description": "A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesCalculateBestFeatureSplitV2", + "summary": "Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.", + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "inferred from the size of `stats_summary_list`; the number of total features.", + "minimum": 1 + }, + { + "name": "logits_dimension", + "type": "int64", + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "node_id_range", + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "type": 3 + }, + { + "name": "stats_summaries_list", + "description": "A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.\nThe first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "split_types", + "description": "A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.", + "type": 7 + }, + { + "name": "candidate_feature_ids", + "description": "Rank 1 tensor with ids for each feature. This is the real id of the feature.", + "type": 3 + }, + { + "name": "l1", + "description": "l1 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "l2", + "description": "l2 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "tree_complexity", + "description": "adjustment to the gain, per leaf based.", + "type": 1 + }, + { + "name": "min_node_weight", + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "type": 1 + } + ], + "outputs": [ + { + "name": "node_ids", + "description": "A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "gains", + "description": "A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "type": 1 + }, + { + "name": "feature_ids", + "description": "A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "feature_dimensions", + "description": "A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "thresholds", + "description": "A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "type": 3 + }, + { + "name": "left_node_contribs", + "description": "A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "type": 1 + }, + { + "name": "right_node_contribs", + "description": "A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "type": 1 + }, + { + "name": "split_with_default_directions", + "description": "A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesCalculateBestGainsPerFeature", + "summary": "Calculates gains for each feature and returns the best possible split information for the feature.", + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe length of output lists are all of the same length, `num_features`.\nThe output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.", + "attributes": [ + { + "name": "max_splits", + "type": "int64", + "description": "the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.", + "minimum": 1 + }, + { + "name": "num_features", + "type": "int64", + "description": "inferred from the size of `stats_summary_list`; the number of total features.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "node_id_range", + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "type": 3 + }, + { + "name": "stats_summary_list", + "description": "A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "l1", + "description": "l1 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "l2", + "description": "l2 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "tree_complexity", + "description": "adjustment to the gain, per leaf based.", + "type": 1 + }, + { + "name": "min_node_weight", + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "type": 1 + } + ], + "outputs": [ + { + "name": "node_ids_list", + "description": "An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "gains_list", + "description": "An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "thresholds_list", + "description": "An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "left_node_contribs_list", + "description": "A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "right_node_contribs_list", + "description": "A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "numberAttr": "num_features", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesCenterBias", + "summary": "Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble.", + "type": 20 + }, + { + "name": "mean_gradients", + "description": "A tensor with shape=[logits_dimension] with mean of gradients for a first node.", + "type": 1 + }, + { + "name": "mean_hessians", + "description": "A tensor with shape=[logits_dimension] mean of hessians for a first node.", + "type": 1 + }, + { + "name": "l1", + "description": "l1 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "l2", + "description": "l2 regularization factor on leaf weights, per instance based.", + "type": 1 + } + ], + "outputs": [ + { + "name": "continue_centering", + "description": "Bool, whether to continue bias centering.", + "type": 10 + } + ] + }, + { + "name": "BoostedTreesCreateEnsemble", + "summary": "Creates a tree ensemble model and returns a handle to it.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble resource to be created.", + "type": 20 + }, + { + "name": "stamp_token", + "description": "Token to use as the initial value of the resource stamp.", + "type": 9 + }, + { + "name": "tree_ensemble_serialized", + "description": "Serialized proto of the tree ensemble.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesCreateQuantileStreamResource", + "summary": "Create the Resource for Quantile Streams.", + "attributes": [ + { + "name": "max_elements", + "type": "int64", + "description": "int; The maximum number of data points that can be fed to the stream.", + "default": 1099511627776 + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource; Handle to quantile stream resource.", + "type": 20 + }, + { + "name": "epsilon", + "description": "float; The required approximation error of the stream resource.", + "type": 1 + }, + { + "name": "num_streams", + "description": "int; The number of streams managed by the resource that shares the same epsilon.", + "type": 9 + } + ] + }, + { + "name": "BoostedTreesDeserializeEnsemble", + "summary": "Deserializes a serialized tree ensemble config and replaces current tree", + "description": "ensemble.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble.", + "type": 20 + }, + { + "name": "stamp_token", + "description": "Token to use as the new value of the resource stamp.", + "type": 9 + }, + { + "name": "tree_ensemble_serialized", + "description": "Serialized proto of the ensemble.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesEnsembleResourceHandleOp", + "summary": "Creates a handle to a BoostedTreesEnsembleResource", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ] + }, + { + "name": "BoostedTreesExampleDebugOutputs", + "summary": "Debugging/model interpretability outputs for each example.", + "description": "It traverses all the trees and computes debug metrics for individual examples,\nsuch as getting split feature ids and logits after each split along the decision\npath used to compute directional feature contributions.", + "attributes": [ + { + "name": "num_bucketized_features", + "type": "int64", + "description": "Inferred.", + "minimum": 1 + }, + { + "name": "logits_dimension", + "type": "int64", + "description": "scalar, dimension of the logits, to be used for constructing the protos in\nexamples_debug_outputs_serialized." + } + ], + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "name": "bucketized_features", + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "name": "examples_debug_outputs_serialized", + "description": "Output rank 1 Tensor containing a proto serialized as a string for each example.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesFlushQuantileSummaries", + "summary": "Flush the quantile summaries from each quantile stream resource.", + "description": "An op that outputs a list of quantile summaries of a quantile stream resource.\nEach summary Tensor is rank 2, containing summaries (value, weight, min_rank,\nmax_rank) for a single feature.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource handle referring to a QuantileStreamResource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "summaries", + "numberAttr": "num_features", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesGetEnsembleStates", + "summary": "Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble.", + "type": 20 + } + ], + "outputs": [ + { + "name": "stamp_token", + "description": "Stamp token of the tree ensemble resource.", + "type": 9 + }, + { + "name": "num_trees", + "description": "The number of trees in the tree ensemble resource.", + "type": 3 + }, + { + "name": "num_finalized_trees", + "description": "The number of trees that were finished successfully.", + "type": 3 + }, + { + "name": "num_attempted_layers", + "description": "The number of layers we attempted to build (but not necessarily succeeded).", + "type": 3 + }, + { + "name": "last_layer_nodes_range", + "description": "Rank size 2 tensor that contains start and end ids of the nodes in the latest\nlayer.", + "type": 3 + } + ] + }, + { + "name": "BoostedTreesMakeQuantileSummaries", + "summary": "Makes the summary of quantiles for the batch.", + "description": "An op that takes a list of tensors (one tensor per feature) and outputs the\nquantile summaries for each tensor.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "int; Inferred from the size of float_values.\nThe number of float features.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "float_values", + "description": "float; List of Rank 1 Tensors each containing values for a single feature.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "example_weights", + "description": "float; Rank 1 Tensor with weights per instance.", + "type": 1 + }, + { + "name": "epsilon", + "description": "float; The required maximum approximation error.", + "type": 1 + } + ], + "outputs": [ + { + "name": "summaries", + "description": "float; List of Rank 2 Tensors each containing the quantile summary\n(value, weight, min_rank, max_rank) of a single feature.", + "numberAttr": "num_features", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesMakeStatsSummary", + "summary": "Makes the summary of accumulated stats for the batch.", + "description": "The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.", + "attributes": [ + { + "name": "max_splits", + "type": "int64", + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1 + }, + { + "name": "num_buckets", + "type": "int64", + "description": "int; equals to the maximum possible value of bucketized feature.", + "minimum": 1 + }, + { + "name": "num_features", + "type": "int64", + "description": "int; inferred from the size of bucketized_features_list; the number of features.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "node_ids", + "description": "int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.", + "type": 3 + }, + { + "name": "gradients", + "description": "float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.", + "type": 1 + }, + { + "name": "hessians", + "description": "float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.", + "type": 1 + }, + { + "name": "bucketized_features_list", + "description": "int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).", + "numberAttr": "num_features", + "type": 3 + } + ], + "outputs": [ + { + "name": "stats_summary", + "description": "output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesPredict", + "summary": "Runs multiple additive regression ensemble predictors on input instances and", + "description": "computes the logits. It is designed to be used during prediction.\nIt traverses all the trees and calculates the final score for each instance.", + "attributes": [ + { + "name": "num_bucketized_features", + "type": "int64", + "description": "Inferred.", + "minimum": 1 + }, + { + "name": "logits_dimension", + "type": "int64", + "description": "scalar, dimension of the logits, to be used for partial logits\nshape." + } + ], + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "name": "bucketized_features", + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "name": "logits", + "description": "Output rank 2 Tensor containing logits for each example.", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesQuantileStreamResourceAddSummaries", + "summary": "Add the quantile summaries to each quantile stream resource.", + "description": "An op that adds a list of quantile summaries to a quantile stream resource. Each\nsummary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)\nfor a single feature.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource handle referring to a QuantileStreamResource.", + "type": 20 + }, + { + "name": "summaries", + "description": "string; List of Rank 2 Tensor each containing the summaries for a single feature.", + "numberAttr": "num_features", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesQuantileStreamResourceDeserialize", + "summary": "Deserialize bucket boundaries and ready flag into current QuantileAccumulator.", + "description": "An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.", + "attributes": [ + { + "name": "num_streams", + "type": "int64", + "description": "inferred int; number of features to get bucket boundaries for.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource handle referring to a QuantileStreamResource.", + "type": 20 + }, + { + "name": "bucket_boundaries", + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.", + "numberAttr": "num_streams", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesQuantileStreamResourceFlush", + "summary": "Flush the summaries for a quantile stream resource.", + "description": "An op that flushes the summaries for a quantile stream resource.", + "attributes": [ + { + "name": "generate_quantiles", + "type": "boolean", + "description": "bool; If True, the output will be the num_quantiles for each stream where the ith\nentry is the ith quantile of the input with an approximation error of epsilon.\nDuplicate values may be present.\nIf False, the output will be the points in the histogram that we got which roughly\ntranslates to 1/epsilon boundaries and without any duplicates.\nDefault to False.", + "default": false + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource handle referring to a QuantileStreamResource.", + "type": 20 + }, + { + "name": "num_buckets", + "description": "int; approximate number of buckets unless using generate_quantiles.", + "type": 9 + } + ] + }, + { + "name": "BoostedTreesQuantileStreamResourceGetBucketBoundaries", + "summary": "Generate the bucket boundaries for each feature based on accumulated summaries.", + "description": "An op that returns a list of float tensors for a quantile stream resource. Each\ntensor is Rank 1 containing bucket boundaries for a single feature.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "inferred int; number of features to get bucket boundaries for.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource handle referring to a QuantileStreamResource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "bucket_boundaries", + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.", + "numberAttr": "num_features", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesQuantileStreamResourceHandleOp", + "summary": "Creates a handle to a BoostedTreesQuantileStreamResource.", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ] + }, + { + "name": "BoostedTreesSerializeEnsemble", + "summary": "Serializes the tree ensemble to a proto.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble.", + "type": 20 + } + ], + "outputs": [ + { + "name": "stamp_token", + "description": "Stamp token of the tree ensemble resource.", + "type": 9 + }, + { + "name": "tree_ensemble_serialized", + "description": "Serialized proto of the ensemble.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesSparseAggregateStats", + "summary": "Aggregates the summary of accumulated stats for the batch.", + "description": "The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.", + "attributes": [ + { + "name": "max_splits", + "type": "int64", + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1 + }, + { + "name": "num_buckets", + "type": "int64", + "description": "int; equals to the maximum possible value of bucketized feature + 1.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "node_ids", + "description": "int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].", + "type": 3 + }, + { + "name": "gradients", + "description": "float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.", + "type": 1 + }, + { + "name": "hessians", + "description": "float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.", + "type": 1 + }, + { + "name": "feature_indices", + "description": "int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).\nNumber of sparse entries across all instances from the batch. The first value is\nthe index of the instance, the second is dimension of the feature. The second axis\ncan only have 2 values, i.e., the input dense version of Tensor can only be matrix.", + "type": 3 + }, + { + "name": "feature_values", + "description": "int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).\nNumber of sparse entries across all instances from the batch. The first value is\nthe index of the instance, the second is dimension of the feature.", + "type": 3 + }, + { + "name": "feature_shape", + "description": "int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).\nThe first axis can only have 2 values, [batch_size, feature_dimension].", + "type": 3 + } + ], + "outputs": [ + { + "name": "stats_summary_indices", + "description": "int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])\nThe second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension.\nstatistics_dimension = logits_dimension + hessian_dimension.", + "type": 3 + }, + { + "name": "stats_summary_values", + "description": "output Rank 1 Tensor (shape=[number of non zero statistics])", + "type": 1 + }, + { + "name": "stats_summary_shape", + "description": "output Rank 1 Tensor (shape=[4])\nThe tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension],\nwhere statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension\nis the same as label_dimension, i.e., the output space. hessian_dimension can be the same\nas logits dimension when diagonal hessian is used, or label_dimension^2 when full\nhessian is used.", + "type": 3 + } + ] + }, + { + "name": "BoostedTreesSparseCalculateBestFeatureSplit", + "summary": "Calculates gains for each feature and returns the best possible split information for the feature.", + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "attributes": [ + { + "name": "logits_dimension", + "type": "int64", + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1 + }, + { + "name": "split_type", + "type": "string", + "description": "A string indicating if this Op should perform inequality split or equality split. Must be one of the following: `inequality`.", + "default": "inequality" + } + ], + "inputs": [ + { + "name": "node_id_range", + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "type": 3 + }, + { + "name": "stats_summary_indices", + "description": "A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.\nstats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.", + "type": 3 + }, + { + "name": "stats_summary_values", + "description": "A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.", + "type": 1 + }, + { + "name": "stats_summary_shape", + "description": "A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].", + "type": 3 + }, + { + "name": "l1", + "description": "l1 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "l2", + "description": "l2 regularization factor on leaf weights, per instance based.", + "type": 1 + }, + { + "name": "tree_complexity", + "description": "adjustment to the gain, per leaf based.", + "type": 1 + }, + { + "name": "min_node_weight", + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "type": 1 + } + ], + "outputs": [ + { + "name": "node_ids", + "description": "A Rank 1 tensor indicating possible node ids that can be split.", + "type": 3 + }, + { + "name": "gains", + "description": "A Rank 1 tensor indicating the best gains to split each node.", + "type": 1 + }, + { + "name": "feature_dimensions", + "description": "A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.", + "type": 3 + }, + { + "name": "thresholds", + "description": "A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.", + "type": 3 + }, + { + "name": "left_node_contribs", + "description": "A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.\nThis value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.", + "type": 1 + }, + { + "name": "right_node_contribs", + "description": "A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "type": 1 + }, + { + "name": "split_with_default_directions", + "description": "A Rank 1 tensor indicating which direction to go if data is missing.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "type": 7 + } + ] + }, + { + "name": "BoostedTreesTrainingPredict", + "summary": "Runs multiple additive regression ensemble predictors on input instances and", + "description": "computes the update to cached logits. It is designed to be used during training.\nIt traverses the trees starting from cached tree id and cached node id and\ncalculates the updates to be pushed to the cache.", + "attributes": [ + { + "name": "num_bucketized_features", + "type": "int64", + "description": "Inferred.", + "minimum": 1 + }, + { + "name": "logits_dimension", + "type": "int64", + "description": "scalar, dimension of the logits, to be used for partial logits\nshape." + } + ], + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "name": "cached_tree_ids", + "description": "Rank 1 Tensor containing cached tree ids which is the starting\ntree of prediction.", + "type": 3 + }, + { + "name": "cached_node_ids", + "description": "Rank 1 Tensor containing cached node id which is the starting\nnode of prediction.", + "type": 3 + }, + { + "name": "bucketized_features", + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "name": "partial_logits", + "description": "Rank 2 Tensor containing logits update (with respect to cached\nvalues stored) for each example.", + "type": 1 + }, + { + "name": "tree_ids", + "description": "Rank 1 Tensor containing new tree ids for each example.", + "type": 3 + }, + { + "name": "node_ids", + "description": "Rank 1 Tensor containing new node ids in the new tree_ids.", + "type": 3 + } + ] + }, + { + "name": "BoostedTreesUpdateEnsemble", + "summary": "Updates the tree ensemble by either adding a layer to the last tree being grown", + "description": "or by starting a new tree.", + "attributes": [ + { + "name": "pruning_mode", + "type": "int64", + "description": "0-No pruning, 1-Pre-pruning, 2-Post-pruning.", + "minimum": 0 + }, + { + "name": "num_features", + "type": "int64", + "description": "Number of features that have best splits returned. INFERRED.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the ensemble variable.", + "type": 20 + }, + { + "name": "feature_ids", + "description": "Rank 1 tensor with ids for each feature. This is the real id of\nthe feature that will be used in the split.", + "type": 3 + }, + { + "name": "node_ids", + "description": "List of rank 1 tensors representing the nodes for which this feature\nhas a split.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "gains", + "description": "List of rank 1 tensors representing the gains for each of the feature's\nsplit.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "thresholds", + "description": "List of rank 1 tensors representing the thesholds for each of the\nfeature's split.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "left_node_contribs", + "description": "List of rank 2 tensors with left leaf contribs for each of\nthe feature's splits. Will be added to the previous node values to constitute\nthe values of the left nodes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "right_node_contribs", + "description": "List of rank 2 tensors with right leaf contribs for each\nof the feature's splits. Will be added to the previous node values to constitute\nthe values of the right nodes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "max_depth", + "description": "Max depth of the tree to build.", + "type": 3 + }, + { + "name": "learning_rate", + "description": "shrinkage const for each new tree.", + "type": 1 + } + ] + }, + { + "name": "BoostedTreesUpdateEnsembleV2", + "summary": "Updates the tree ensemble by adding a layer to the last tree being grown", + "description": "or by starting a new tree.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "Number of features that have best splits returned. INFERRED.", + "minimum": 0 + }, + { + "name": "logits_dimension", + "type": "int64", + "description": "scalar, dimension of the logits", + "default": 1 + }, + { + "name": "num_groups", + "type": "int64", + "description": "Number of groups of split information to process, where a group contains feature\nids that are processed together in BoostedTreesCalculateBestFeatureSplitOpV2.\nINFERRED.", + "minimum": 1, + "default": 1 + } + ], + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the ensemble variable.", + "type": 20 + }, + { + "name": "feature_ids", + "description": "Rank 1 tensor with ids for each feature. This is the real id of\nthe feature that will be used in the split.", + "numberAttr": "num_groups", + "type": 3 + }, + { + "name": "dimension_ids", + "description": "List of rank 1 tensors representing the dimension in each feature.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "node_ids", + "description": "List of rank 1 tensors representing the nodes for which this feature\nhas a split.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "gains", + "description": "List of rank 1 tensors representing the gains for each of the feature's\nsplit.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "thresholds", + "description": "List of rank 1 tensors representing the thesholds for each of the\nfeature's split.", + "numberAttr": "num_features", + "type": 3 + }, + { + "name": "left_node_contribs", + "description": "List of rank 2 tensors with left leaf contribs for each of\nthe feature's splits. Will be added to the previous node values to constitute\nthe values of the left nodes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "right_node_contribs", + "description": "List of rank 2 tensors with right leaf contribs for each\nof the feature's splits. Will be added to the previous node values to constitute\nthe values of the right nodes.", + "numberAttr": "num_features", + "type": 1 + }, + { + "name": "split_types", + "description": "List of rank 1 tensors representing the split type for each feature.", + "numberAttr": "num_features", + "type": 7 + }, + { + "name": "max_depth", + "description": "Max depth of the tree to build.", + "type": 3 + }, + { + "name": "learning_rate", + "description": "shrinkage const for each new tree.", + "type": 1 + }, + { + "name": "pruning_mode", + "description": "0-No pruning, 1-Pre-pruning, 2-Post-pruning.", + "type": 3 + } + ] + }, + { + "name": "BroadcastArgs", + "summary": "Return the shape of s0 op s1 with broadcast.", + "description": "Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the\nbroadcasted shape. `s0`, `s1` and `r0` are all integer vectors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "s0", + "typeAttr": "T" + }, + { + "name": "s1", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r0", + "typeAttr": "T" + } + ] + }, + { + "name": "BroadcastGradientArgs", + "summary": "Return the reduction indices for computing gradients of s0 op s1 with broadcast.", + "description": "This is typically used by gradient computations for a broadcasting operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "s0", + "typeAttr": "T" + }, + { + "name": "s1", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r0", + "typeAttr": "T" + }, + { + "name": "r1", + "typeAttr": "T" + } + ] + }, + { + "name": "BroadcastTo", + "summary": "Broadcast an array for a compatible shape.", + "description": "Broadcasting is the process of making arrays to have compatible shapes\nfor arithmetic operations. Two shapes are compatible if for each\ndimension pair they are either equal or one of them is one.\n\nFor example:\n\n>>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,)\n>>> y = tf.broadcast_to(x, [2, 3])\n>>> print(y)\ntf.Tensor(\n [[1 2 3]\n [1 2 3]], shape=(2, 3), dtype=int32)\n\nIn the above example, the input Tensor with the shape of `[1, 3]`\nis broadcasted to output Tensor with shape of `[2, 3]`.\n\nWhen broadcasting, if a tensor has fewer axes than necessary its shape is\npadded on the left with ones. So this gives the same result as the previous\nexample:\n\n>>> x = tf.constant([1, 2, 3]) # Shape (3,)\n>>> y = tf.broadcast_to(x, [2, 3])\n\n\nWhen doing broadcasted operations such as multiplying a tensor\nby a scalar, broadcasting (usually) confers some time or space\nbenefit, as the broadcasted tensor is never materialized.\n\nHowever, `broadcast_to` does not carry with it any such benefits.\nThe newly-created tensor takes the full memory of the broadcasted\nshape. (In a graph context, `broadcast_to` might be fused to\nsubsequent operation and then be optimized away, however.)", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A Tensor to broadcast.", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "An 1-D `int` Tensor. The shape of the desired output.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "Bucketize", + "summary": "Bucketizes 'input' based on 'boundaries'.", + "description": "For example, if the inputs are\n boundaries = [0, 10, 100]\n input = [[-5, 10000]\n [150, 10]\n [5, 100]]\n\nthen the output will be\n output = [[0, 3]\n [3, 2]\n [1, 3]]", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "boundaries", + "type": "float32[]", + "description": "A sorted list of floats gives the boundary of the buckets." + } + ], + "inputs": [ + { + "name": "input", + "description": "Any shape of Tensor contains with int or float type.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Same shape with 'input', each value of input replaced with bucket index.\n\n@compatibility(numpy)\nEquivalent to np.digitize.\n@end_compatibility", + "type": 3 + } + ] + }, + { + "name": "BytesProducedStatsDataset", + "summary": "Records the bytes size of each element of `input_dataset` in a StatsAggregator.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "CSRSparseMatrixComponents", + "summary": "Reads out the CSR components at batch `index`.", + "description": "This op is meant only for debugging / testing, and its interface is not expected\nto be stable.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "csr_sparse_matrix", + "description": "A batched CSRSparseMatrix.", + "type": 21 + }, + { + "name": "index", + "description": "The index in `csr_sparse_matrix`'s batch.", + "type": 3 + } + ], + "outputs": [ + { + "name": "row_ptrs", + "description": "An array containing CSR matrix row pointers.", + "type": 3 + }, + { + "name": "col_inds", + "description": "An array containing CSR matrix column indices.", + "type": 3 + }, + { + "name": "values", + "description": "An array containing CSR matrix nonzero values.", + "typeAttr": "type" + } + ] + }, + { + "name": "CSRSparseMatrixToDense", + "summary": "Convert a (possibly batched) CSRSparseMatrix to dense.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "sparse_input", + "description": "A batched CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "dense_output", + "description": "A dense tensor.", + "typeAttr": "type" + } + ] + }, + { + "name": "CSRSparseMatrixToSparseTensor", + "summary": "Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "sparse_matrix", + "description": "A (possibly batched) CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "indices", + "description": "SparseTensor indices.", + "type": 9 + }, + { + "name": "values", + "description": "SparseTensor values.", + "typeAttr": "type" + }, + { + "name": "dense_shape", + "description": "SparseTensor dense shape.", + "type": 9 + } + ] + }, + { + "name": "CSVDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`." + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "compression_type", + "type": 7 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "header", + "type": 10 + }, + { + "name": "field_delim", + "type": 7 + }, + { + "name": "use_quote_delim", + "type": 10 + }, + { + "name": "na_value", + "type": 7 + }, + { + "name": "select_cols", + "type": 9 + }, + { + "name": "record_defaults", + "typeListAttr": "output_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "CSVDatasetV2", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`." + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "compression_type", + "type": 7 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "header", + "type": 10 + }, + { + "name": "field_delim", + "type": 7 + }, + { + "name": "use_quote_delim", + "type": 10 + }, + { + "name": "na_value", + "type": 7 + }, + { + "name": "select_cols", + "type": 9 + }, + { + "name": "record_defaults", + "typeListAttr": "output_types" + }, + { + "name": "exclude_cols", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "CTCBeamSearchDecoder", + "summary": "Performs beam search decoding on the logits given in input.", + "description": "A note about the attribute merge_repeated: For the beam search decoder,\nthis means that if consecutive entries in a beam are the same, only\nthe first of these is emitted. That is, when the top path is \"A B B B B\",\n\"A B\" is returned if merge_repeated = True but \"A B B B B\" is\nreturned if merge_repeated = False.", + "attributes": [ + { + "name": "beam_width", + "type": "int64", + "description": "A scalar >= 0 (beam search beam width).", + "minimum": 1 + }, + { + "name": "top_paths", + "type": "int64", + "description": "A scalar >= 0, <= beam_width (controls output size).", + "minimum": 1 + }, + { + "name": "merge_repeated", + "type": "boolean", + "description": "If true, merge repeated classes in output.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "inputs", + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "typeAttr": "T" + }, + { + "name": "sequence_length", + "description": "A vector containing sequence lengths, size `(batch)`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "decoded_indices", + "description": "A list (length: top_paths) of indices matrices. Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor`. The rows store: [batch, time].", + "numberAttr": "top_paths", + "type": 9 + }, + { + "name": "decoded_values", + "description": "A list (length: top_paths) of values vectors. Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor`. The vector stores the decoded classes for beam j.", + "numberAttr": "top_paths", + "type": 9 + }, + { + "name": "decoded_shape", + "description": "A list (length: top_paths) of shape vector. Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`.", + "numberAttr": "top_paths", + "type": 9 + }, + { + "name": "log_probability", + "description": "A matrix, shaped: `(batch_size x top_paths)`. The\nsequence log-probabilities.", + "typeAttr": "T" + } + ] + }, + { + "name": "CTCGreedyDecoder", + "summary": "Performs greedy decoding on the logits given in inputs.", + "description": "A note about the attribute merge_repeated: if enabled, when\nconsecutive logits' maximum indices are the same, only the first of\nthese is emitted. Labeling the blank '*', the sequence \"A B B * B B\"\nbecomes \"A B B\" if merge_repeated = True and \"A B B B B\" if\nmerge_repeated = False.\n\nRegardless of the value of merge_repeated, if the maximum index of a given\ntime and batch corresponds to the blank, index `(num_classes - 1)`, no new\nelement is emitted.", + "attributes": [ + { + "name": "merge_repeated", + "type": "boolean", + "description": "If True, merge repeated classes in output.", + "default": false + }, + { + "name": "blank_index", + "type": "int64", + "default": -1 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "inputs", + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "typeAttr": "T" + }, + { + "name": "sequence_length", + "description": "A vector containing sequence lengths, size `(batch_size)`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "decoded_indices", + "description": "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor`. The rows store: [batch, time].", + "type": 9 + }, + { + "name": "decoded_values", + "description": "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor`. The vector stores the decoded classes.", + "type": 9 + }, + { + "name": "decoded_shape", + "description": "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`.", + "type": 9 + }, + { + "name": "log_probability", + "description": "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities.", + "typeAttr": "T" + } + ] + }, + { + "name": "CTCLoss", + "summary": "Calculates the CTC Loss (log probability) for each batch entry. Also calculates", + "description": "the gradient. This class performs the softmax operation for you, so inputs\nshould be e.g. linear projections of outputs by an LSTM.", + "attributes": [ + { + "name": "preprocess_collapse_repeated", + "type": "boolean", + "description": "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation.", + "default": false + }, + { + "name": "ctc_merge_repeated", + "type": "boolean", + "description": "Scalar. If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels. This is a simplified version of CTC.", + "default": true + }, + { + "name": "ignore_longer_outputs_than_inputs", + "type": "boolean", + "description": "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don't contribute to the loss term and have zero-gradient.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "inputs", + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "typeAttr": "T" + }, + { + "name": "labels_indices", + "description": "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`.", + "type": 9 + }, + { + "name": "labels_values", + "description": "The values (labels) associated with the given batch and time.", + "type": 3 + }, + { + "name": "sequence_length", + "description": "A vector containing sequence lengths (batch).", + "type": 3 + } + ], + "outputs": [ + { + "name": "loss", + "description": "A vector (batch) containing log-probabilities.", + "typeAttr": "T" + }, + { + "name": "gradient", + "description": "The gradient of `loss`. 3-D, shape:\n`(max_time x batch_size x num_classes)`.", + "typeAttr": "T" + } + ] + }, + { + "name": "CTCLossV2", + "summary": "Calculates the CTC Loss (log probability) for each batch entry. Also calculates", + "description": "the gradient. This class performs the softmax operation for you, so inputs\nshould be e.g. linear projections of outputs by an LSTM.", + "attributes": [ + { + "name": "preprocess_collapse_repeated", + "type": "boolean", + "description": "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation.", + "default": false + }, + { + "name": "ctc_merge_repeated", + "type": "boolean", + "description": "Scalar. If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels. This is a simplified version of CTC.", + "default": true + }, + { + "name": "ignore_longer_outputs_than_inputs", + "type": "boolean", + "description": "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don't contribute to the loss term and have zero-gradient.", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank\nlabel is 0 rather num_classes - 1.", + "type": 1 + }, + { + "name": "labels_indices", + "description": "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`.", + "type": 9 + }, + { + "name": "labels_values", + "description": "The values (labels) associated with the given batch and time.", + "type": 3 + }, + { + "name": "sequence_length", + "description": "A vector containing sequence lengths (batch).", + "type": 3 + } + ], + "outputs": [ + { + "name": "loss", + "description": "A vector (batch) containing log-probabilities.", + "type": 1 + }, + { + "name": "gradient", + "description": "The gradient of `loss`. 3-D, shape:\n`(max_time x batch_size x num_classes)`.", + "type": 1 + } + ] + }, + { + "name": "CacheDataset", + "summary": "Creates a dataset that caches elements from `input_dataset`.", + "description": "A CacheDataset will iterate over the input_dataset, and store tensors. If the\ncache already exists, the cache will be used. If the cache is inappropriate\n(e.g. cannot be opened, contains tensors of the wrong shape / size), an error\nwill the returned when used.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "filename", + "description": "A path on the filesystem where we should cache the dataset. Note: this\nwill be a directory.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "CacheDatasetV2", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "filename", + "type": 7 + }, + { + "name": "cache", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Case", + "summary": "An n-way switch statement which calls a single branch function.", + "description": " An n-way switch statement, implementing the following:\n ```\n switch (branch_index) {\n case 0:\n output = branches[0](input);\n break;\n case 1:\n output = branches[1](input);\n break;\n ...\n case [[nbranches-1]]:\n default:\n output = branches[nbranches-1](input);\n break;\n }\n ```", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "branches", + "type": "function[]", + "description": " A list of functions each of which takes 'inputs' and returns a list of\n tensors, whose types are the same as what every other branch returns.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + } + ], + "inputs": [ + { + "name": "branch_index", + "description": "The branch selector, an int32 Tensor.", + "type": 3 + }, + { + "name": "input", + "description": "A list of input tensors passed to the branch function.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "Cast", + "summary": "Cast x of type SrcT to y of DstT.", + "attributes": [ + { + "name": "SrcT", + "type": "type" + }, + { + "name": "DstT", + "type": "type" + }, + { + "name": "Truncate", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "SrcT" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "DstT" + } + ] + }, + { + "name": "Ceil", + "summary": "Returns element-wise smallest integer not less than x.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "CheckNumerics", + "summary": "Checks a tensor for NaN and Inf values.", + "description": "When run, reports an `InvalidArgument` error if `tensor` has any values\nthat are not a number (NaN) or infinity (Inf). Otherwise, returns the input\ntensor.\n\nExample usage:\n\n``` python\na = tf.Variable(1.0)\ntf.debugging.check_numerics(a, message='')\n\nb = tf.Variable(np.nan)\ntry:\n tf.debugging.check_numerics(b, message='Checking b')\nexcept Exception as e:\n assert \"Checking b : Tensor had NaN values\" in e.message\n\nc = tf.Variable(np.inf)\ntry:\n tf.debugging.check_numerics(c, message='Checking c')\nexcept Exception as e:\n assert \"Checking c : Tensor had Inf values\" in e.message\n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "message", + "type": "string", + "description": "Prefix of the error message." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "CheckNumericsV2", + "summary": "Checks a tensor for NaN, -Inf and +Inf values.", + "description": "When run, reports an `InvalidArgument` error if `tensor` has any values\nthat are not a number (NaN) or infinity (Inf). Otherwise, returns the input\ntensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf\nin the errors it throws.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "message", + "type": "string", + "description": "Prefix of the error message." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Cholesky", + "summary": "Computes the Cholesky decomposition of one or more square matrices.", + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices.\n\nThe input has to be symmetric and positive definite. Only the lower-triangular\npart of the input will be used for this operation. The upper-triangular part\nwill not be read.\n\nThe output is a tensor of the same shape as the input\ncontaining the Cholesky decompositions for all input submatrices `[..., :, :]`.\n\n**Note**: The gradient computation on GPU is faster for large matrices but\nnot for large batch dimensions when the submatrices are small. In this\ncase it might be faster to use the CPU.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "CholeskyGrad", + "summary": "Computes the reverse mode backpropagated gradient of the Cholesky algorithm.", + "description": "For an explanation see \"Differentiation of the Cholesky algorithm\" by\nIain Murray http://arxiv.org/abs/1602.07527.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "l", + "description": "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Symmetrized version of df/dA . Shape is `[..., M, M]`", + "typeAttr": "T" + } + ] + }, + { + "name": "ChooseFastestBranchDataset", + "attributes": [ + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "num_elements_per_branch", + "type": "int64", + "minimum": 1 + }, + { + "name": "branches", + "type": "function[]", + "minimum": 1 + }, + { + "name": "other_arguments_lengths", + "type": "int64[]", + "minimum": 1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "ratio_numerator", + "type": 9 + }, + { + "name": "ratio_denominator", + "type": 9 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ChooseFastestDataset", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "num_experiments", + "type": "int64" + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ClipByValue", + "summary": "Clips tensor values to a specified min and max.", + "description": "Given a tensor `t`, this operation returns a tensor of the same type and\nshape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\nAny values less than `clip_value_min` are set to `clip_value_min`. Any values\ngreater than `clip_value_max` are set to `clip_value_max`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "t", + "description": "A `Tensor`.", + "typeAttr": "T" + }, + { + "name": "clip_value_min", + "description": "A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape\nas `t`. The minimum value to clip by.", + "typeAttr": "T" + }, + { + "name": "clip_value_max", + "description": "A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape\nas `t`. The maximum value to clip by.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A clipped `Tensor` with the same shape as input 't'.", + "typeAttr": "T" + } + ] + }, + { + "name": "CloseSummaryWriter", + "inputs": [ + { + "name": "writer", + "type": 20 + } + ] + }, + { + "name": "CollateTPUEmbeddingMemory", + "summary": "An op that merges the string-encoded memory config protos from all hosts.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "memory_configs", + "description": "String-encoded memory config protos containing metadata about\nthe memory allocations reserved for TPUEmbedding across all hosts.", + "numberAttr": "N", + "type": 7 + } + ], + "outputs": [ + { + "name": "merged_memory_config", + "type": 7 + } + ] + }, + { + "name": "CollectiveAllToAllV2", + "summary": "Mutually exchanges multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, + { + "name": "Nordering_token", + "type": "int64", + "minimum": 0, + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + }, + { + "name": "ordering_token", + "numberAttr": "Nordering_token", + "type": 20 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveAllToAllV3", + "summary": "Mutually exchanges multiple tensors of identical type and shape.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "communicator", + "type": 20 + }, + { + "name": "group_assignment", + "type": 3 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveAssignGroupV2", + "summary": "Assign group keys based on group assignment.", + "inputs": [ + { + "name": "group_assignment", + "type": 3 + }, + { + "name": "device_index", + "type": 3 + }, + { + "name": "base_key", + "type": 3 + } + ], + "outputs": [ + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + } + ] + }, + { + "name": "CollectiveBcastRecv", + "summary": "Receives a tensor value broadcast from another device.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveBcastRecvV2", + "summary": "Receives a tensor value broadcast from another device.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + }, + { + "name": "shape", + "typeAttr": "Tshape" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveBcastSend", + "summary": "Broadcasts a tensor value to one or more other devices.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveBcastSendV2", + "summary": "Broadcasts a tensor value to one or more other devices.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveGather", + "summary": "Mutually accumulates multiple tensors of identical type and shape.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveGatherV2", + "summary": "Mutually accumulates multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, + { + "name": "Nordering_token", + "type": "int64", + "minimum": 0, + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + }, + { + "name": "ordering_token", + "numberAttr": "Nordering_token", + "type": 20 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveInitializeCommunicator", + "summary": "Initializes a group for collective operations.", + "attributes": [ + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "group_key", + "type": 3 + }, + { + "name": "rank", + "type": 3 + }, + { + "name": "group_size", + "type": 3 + } + ], + "outputs": [ + { + "name": "communicator", + "type": 20 + } + ] + }, + { + "name": "CollectivePermute", + "summary": "An Op to permute tensors across replicated TPU instances.", + "description": "Each instance supplies its own input.\n\nFor example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing\nsource_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:\n`[D, A, B, C]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The type of elements to be exchanged. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The local input to be permuted. Currently only supports float and\nbfloat16.", + "typeAttr": "T" + }, + { + "name": "source_target_pairs", + "description": "A tensor with shape [num_pairs, 2].", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The permuted input.", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveReduce", + "summary": "Mutually reduces multiple tensors of identical type and shape.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "merge_op", + "type": "string", + "description": "Must be one of the following: `Min`, `Max`, `Mul`, `Add`." + }, + { + "name": "final_op", + "type": "string", + "description": "Must be one of the following: `Id`, `Div`." + }, + { + "name": "subdiv_offsets", + "type": "int64[]" + }, + { + "name": "wait_for", + "type": "int64[]", + "default": [] + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveReduceScatterV2", + "summary": "Mutually reduces multiple tensors of identical type and shape and scatters the result.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "merge_op", + "type": "string", + "description": "Must be one of the following: `Min`, `Max`, `Mul`, `Add`." + }, + { + "name": "final_op", + "type": "string", + "description": "Must be one of the following: `Id`, `Div`." + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, + { + "name": "Nordering_token", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "max_subdivs_per_device", + "type": "int64", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + }, + { + "name": "ordering_token", + "numberAttr": "Nordering_token", + "type": 20 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveReduceV2", + "summary": "Mutually reduces multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "merge_op", + "type": "string", + "description": "Must be one of the following: `Min`, `Max`, `Mul`, `Add`." + }, + { + "name": "final_op", + "type": "string", + "description": "Must be one of the following: `Id`, `Div`." + }, + { + "name": "communication_hint", + "type": "string", + "default": "auto" + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, + { + "name": "Nordering_token", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "max_subdivs_per_device", + "type": "int64", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "group_size", + "type": 3 + }, + { + "name": "group_key", + "type": 3 + }, + { + "name": "instance_key", + "type": 3 + }, + { + "name": "ordering_token", + "numberAttr": "Nordering_token", + "type": 20 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CollectiveReduceV3", + "summary": "Mutually reduces multiple tensors of identical type and shape.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`." + }, + { + "name": "reduction", + "type": "string", + "description": "Must be one of the following: `Min`, `Max`, `Mul`, `Add`." + }, + { + "name": "timeout_seconds", + "type": "float32", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "communicator", + "type": 20 + }, + { + "name": "group_assignment", + "type": 3 + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "CombinedNonMaxSuppression", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "This operation performs non_max_suppression on the inputs per batch, across\nall classes.\nPrunes away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Also note that\nthis algorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is the final boxes, scores and classes tensor\nreturned after performing non_max_suppression.", + "attributes": [ + { + "name": "pad_per_class", + "type": "boolean", + "description": "If false, the output nmsed boxes, scores and classes\nare padded/clipped to `max_total_size`. If true, the\noutput nmsed boxes, scores and classes are padded to be of length\n`max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in\nwhich case it is clipped to `max_total_size`. Defaults to false.", + "default": false + }, + { + "name": "clip_boxes", + "type": "boolean", + "description": "If true, assume the box coordinates are between [0, 1] and clip the output boxes\nif they fall beyond [0, 1]. If false, do not do clipping and output the box\ncoordinates as it is.", + "default": true + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then\nsame boxes are used for all classes otherwise, if `q` is equal to number of\nclasses, class-specific boxes are used.", + "type": 1 + }, + { + "name": "scores", + "description": "A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`\nrepresenting a single score corresponding to each box (each row of boxes).", + "type": 1 + }, + { + "name": "max_output_size_per_class", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression per class", + "type": 3 + }, + { + "name": "max_total_size", + "description": "An int32 scalar representing the maximum number of boxes retained over all\nclasses. Note that setting this value to a large number may result in OOM error\ndepending on the system workload.", + "type": 3 + }, + { + "name": "iou_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "type": 1 + }, + { + "name": "score_threshold", + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "type": 1 + } + ], + "outputs": [ + { + "name": "nmsed_boxes", + "description": "A [batch_size, max_detections, 4] float32 tensor\ncontaining the non-max suppressed boxes.", + "type": 1 + }, + { + "name": "nmsed_scores", + "description": "A [batch_size, max_detections] float32 tensor\ncontaining the scores for the boxes.", + "type": 1 + }, + { + "name": "nmsed_classes", + "description": "A [batch_size, max_detections] float32 tensor\ncontaining the classes for the boxes.", + "type": 1 + }, + { + "name": "valid_detections", + "description": "A [batch_size] int32 tensor indicating the number of\nvalid detections per batch item. Only the top num_detections[i] entries in\nnms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the\nentries are zero paddings.", + "type": 3 + } + ] + }, + { + "name": "Complex", + "summary": "Converts two real numbers to a complex number.", + "description": "Given a tensor `real` representing the real part of a complex number, and a\ntensor `imag` representing the imaginary part of a complex number, this\noperation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n*a* represents the `real` part and *b* represents the `imag` part.\n\nThe input tensors `real` and `imag` must have the same shape.\n\nFor example:\n\n```\n# tensor 'real' is [2.25, 3.25]\n# tensor `imag` is [4.75, 5.75]\ntf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "real", + "typeAttr": "T" + }, + { + "name": "imag", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Tout" + } + ] + }, + { + "name": "ComplexAbs", + "summary": "Computes the complex absolute value of a tensor.", + "description": "Given a tensor `x` of complex numbers, this operation returns a tensor of type\n`float` or `double` that is the absolute value of each element in `x`. All\nelements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The absolute\nvalue is computed as \\\\( \\sqrt{a^2 + b^2}\\\\).\n\nFor example:\n\n>>> x = tf.complex(3.0, 4.0)\n>>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy())\n5.0\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "Tout" + } + ] + }, + { + "name": "CompositeTensorVariantFromComponents", + "summary": "Encodes an `ExtensionType` value into a `variant` scalar Tensor.", + "description": "Returns a scalar variant tensor containing a single `CompositeTensorVariant`\nwith the specified Tensor components and TypeSpec.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "description": "String serialization for the TypeSpec. (Note: the encoding for the TypeSpec\nmay change in future versions of TensorFlow.)" + }, + { + "name": "Tcomponents", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "components", + "description": "The component tensors for the extension type value.", + "typeListAttr": "Tcomponents" + } + ], + "outputs": [ + { + "name": "encoded", + "description": "A `variant` Tensor that containing the encoded value.", + "type": 21 + } + ] + }, + { + "name": "CompositeTensorVariantToComponents", + "summary": "Decodes a `variant` scalar Tensor into an `ExtensionType` value.", + "description": "Returns the Tensor components encoded in a `CompositeTensorVariant`.\n\nRaises an error if `type_spec_proto` doesn't match the TypeSpec\nin `encoded`.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "description": "String serialization for the TypeSpec. Must be compatible with the\n`TypeSpec` contained in `encoded`. (Note: the encoding for the TypeSpec\nmay change in future versions of TensorFlow.)" + }, + { + "name": "Tcomponents", + "type": "type[]", + "description": "Expected dtypes for components.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "encoded", + "description": "A scalar `variant` Tensor containing an encoded ExtensionType value.", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "description": "The component tensors for the ExtensionType value in `encoded`.", + "typeListAttr": "Tcomponents" + } + ] + }, + { + "name": "CompressElement", + "summary": "Compresses a dataset element.", + "attributes": [ + { + "name": "input_types", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "input_types" + } + ], + "outputs": [ + { + "name": "compressed", + "type": 21 + } + ] + }, + { + "name": "ComputeAccidentalHits", + "summary": "Computes the ids of the positions in sampled_candidates that match true_labels.", + "description": "When doing log-odds NCE, the result of this op should be passed through a\nSparseToDense op, then added to the logits of the sampled candidates. This has\nthe effect of 'removing' the sampled labels that match the true labels by\nmaking the classifier sure that they are sampled labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context." + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "The true_classes output of UnpackSparseLabels.", + "type": 9 + }, + { + "name": "sampled_candidates", + "description": "The sampled_candidates output of CandidateSampler.", + "type": 9 + } + ], + "outputs": [ + { + "name": "indices", + "description": "A vector of indices corresponding to rows of true_candidates.", + "type": 3 + }, + { + "name": "ids", + "description": "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices.", + "type": 9 + }, + { + "name": "weights", + "description": "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX.", + "type": 1 + } + ] + }, + { + "name": "ComputeBatchSize", + "summary": "Computes the static batch size of a dataset sans partial batches.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "batch_size", + "type": 9 + } + ] + }, + { + "name": "ComputeDedupDataSize", + "summary": "An op computes the size of the deduplication data from embedding core and returns the updated config.", + "description": "This op is to compute size of the deduplication data so to provide this\ninformation to the op that computes the tuple mask of deduplication data can\nhave static output shape.", + "attributes": [ + { + "name": "config", + "type": "string", + "description": "Serialized TPUEmbeddingConfiguration proto." + } + ], + "outputs": [ + { + "name": "num_elements", + "description": "The size of the deduplicated data from infeed.", + "type": 3 + } + ] + }, + { + "name": "ComputeDedupDataTupleMask", + "summary": "An op computes tuple mask of deduplication data from embedding core.", + "description": "The deduplication data receiving from embedding core is a Tensor with\ntype=DT_VARIANT. The tensor itself is an XLA nested tuple, whose elements are\nrank 1 tensors. This op is to represents types and length of these elements.", + "attributes": [ + { + "name": "config", + "type": "string", + "description": "Serialized TPUEmbeddingConfiguration proto." + } + ], + "outputs": [ + { + "name": "output_shape", + "description": "A 2-D int tensor represent mask of deduplication data tuple generated by\n`XlaRecvTPUEmbeddingDeduplicationData`. The tuple has several integer and float\ntype 1-D tensor tuple elements. The first dimenion of this output_shape 2-D\ntensor is tensor type of tuple elements, `0` represents integer tensor, `1`\nrepresents float tensor. The second dimension of `output_shape` gives length of\neach tuple element.", + "type": 3 + } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "summary": "Concatenates tensors along one dimension.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "concat_dim", + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values)).", + "type": 3 + }, + { + "name": "values", + "description": "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "typeAttr": "T" + } + ] + }, + { + "name": "ConcatOffset", + "summary": "Computes offsets of concat inputs within its output.", + "description": "For example:\n\n>>> x = [2, 2, 7]\n>>> y = [2, 3, 7]\n>>> z = [2, 9, 7]\n>>> offsets = concat_offset(1, [x, y, z])\n>>> [list(off.numpy()) for off in offsets]\n[[0, 0, 0], [0, 2, 0], [0, 5, 0]]\n\nThis is typically used by gradient computations for a concat operation.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "concat_dim", + "description": "The dimension along which to concatenate.", + "type": 3 + }, + { + "name": "shape", + "description": "The `N` int32 or int64 vectors representing shape of tensors being concatenated.", + "numberAttr": "N", + "typeAttr": "shape_type" + } + ], + "outputs": [ + { + "name": "offset", + "description": "The `N` vectors representing the starting offset\nof input tensors within the concatenated output with type matching `shape`.", + "numberAttr": "N", + "typeAttr": "shape_type" + } + ] + }, + { + "name": "ConcatV2", + "category": "Tensor", + "summary": "Concatenates tensors along one dimension.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "values", + "description": "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [-rank(values), rank(values)).", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "typeAttr": "T" + } + ] + }, + { + "name": "ConcatenateDataset", + "summary": "Creates a dataset that concatenates `input_dataset` with `another_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "another_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ConditionalAccumulator", + "summary": "A conditional accumulator for aggregating gradients.", + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the values, can be [], in which case shape is unknown." + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions.", + "default": "" + }, + { + "name": "reduction_type", + "type": "string", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "default": "MEAN" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the accumulator.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "ConfigureAndInitializeGlobalTPU", + "summary": "An op that sets up the centralized structures for a distributed TPU system.", + "attributes": [ + { + "name": "use_tfrt_host_runtime", + "type": "boolean", + "default": true + } + ], + "outputs": [ + { + "name": "output", + "description": "A vector containing the global TPU id of each TPU on the host.", + "type": 3 + } + ] + }, + { + "name": "ConfigureDistributedTPU", + "summary": "Sets up the centralized structures for a distributed TPU system.", + "attributes": [ + { + "name": "embedding_config", + "type": "string", + "description": "Reserved. Do not use.", + "default": "" + }, + { + "name": "tpu_embedding_config", + "type": "string", + "description": "Serialized tensorflow.tpu.TPUEmbeddingConfiguration that\ndescribes the embedding lookups of the program.", + "default": "" + }, + { + "name": "is_global_init", + "type": "boolean", + "description": "Reserved. Do not use.", + "default": false + }, + { + "name": "enable_whole_mesh_compilations", + "type": "boolean", + "default": false + }, + { + "name": "compilation_failure_closes_chips", + "type": "boolean", + "default": true + }, + { + "name": "tpu_cancellation_closes_chips", + "type": "int64", + "default": 0 + } + ], + "outputs": [ + { + "name": "topology", + "description": "A serialized tensorflow.tpu.TopologyProto that describes the TPU\ntopology.", + "type": 7 + } + ] + }, + { + "name": "ConfigureTPUEmbedding", + "summary": "Sets up TPUEmbedding in a distributed TPU system.", + "attributes": [ + { + "name": "config", + "type": "string", + "description": "Serialized tensorflow.tpu.TPUEmbeddingConfiguration that\ndescribes the embedding lookups of the program." + } + ] + }, + { + "name": "ConfigureTPUEmbeddingHost", + "summary": "An op that configures the TPUEmbedding software on a host.", + "attributes": [ + { + "name": "config", + "type": "string", + "description": "An TPUEmbeddingConfiguration proto serialized to a string,\ndescribing the desired TPUEmbedding configuration." + } + ], + "inputs": [ + { + "name": "common_config", + "description": "A string-encoded common configuration proto containing metadata\nabout the TPUEmbedding partitioner output.", + "type": 7 + }, + { + "name": "memory_config", + "description": "A string-encoded memory config proto containing metadata about\nthe memory allocations reserved for TPUEmbedding.", + "type": 7 + } + ], + "outputs": [ + { + "name": "network_config", + "description": "A string containing metadata about the hostname and RPC port\nused for communication with this host.", + "type": 7 + } + ] + }, + { + "name": "ConfigureTPUEmbeddingMemory", + "summary": "An op that configures the TPUEmbedding software on a host.", + "inputs": [ + { + "name": "common_config", + "description": "A string-encoded CommonConfiguration proto containing metadata\nabout the TPUEmbedding partitioner output and the HBM size (in bytes) required\nfor operation.", + "type": 7 + } + ], + "outputs": [ + { + "name": "memory_config", + "description": "A string-encoded memory configuration containing metadata about\nthe memory allocations reserved for TPUEmbedding.", + "type": 7 + } + ] + }, + { + "name": "Conj", + "summary": "Returns the complex conjugate of a complex number.", + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ncomplex numbers that are the complex conjugate of each element in `input`. The\ncomplex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\nreal part and *b* is the imaginary part.\n\nThe complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`, `variant`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "ConjugateTranspose", + "summary": "Shuffle dimensions of x according to a permutation and conjugate the result.", + "description": "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`\n `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tperm", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "perm", + "typeAttr": "Tperm" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "ConnectTPUEmbeddingHosts", + "summary": "An op that sets up communication between TPUEmbedding host software instances", + "description": "after ConfigureTPUEmbeddingHost has been called on each host.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "network_configs", + "description": "Strings containing metadata about the hostname and RPC port\nused for communication with all hosts.", + "numberAttr": "N", + "type": 7 + } + ] + }, + { + "name": "Const", + "category": "Constant", + "summary": "Returns a constant tensor.", + "attributes": [ + { + "name": "value", + "type": "tensor", + "description": "Attr `value` is the tensor to return." + }, + { + "name": "dtype", + "type": "type" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ConsumeMutexLock", + "summary": "This op consumes a lock created by `MutexLock`.", + "description": "This op exists to consume a tensor created by `MutexLock` (other than\ndirect control dependencies). It should be the only that consumes the tensor,\nand will raise an error if it is not. Its only purpose is to keep the\nmutex lock tensor alive until it is consumed by this op.\n\n**NOTE**: This operation must run on the same device as its input. This may\nbe enforced via the `colocate_with` mechanism.", + "inputs": [ + { + "name": "mutex_lock", + "description": "A tensor returned by `MutexLock`.", + "type": 21 + } + ] + }, + { + "name": "ControlTrigger", + "summary": "Does nothing. Serves as a control trigger for scheduling.", + "description": "Only useful as a placeholder for control edges." + }, + { + "name": "Conv", + "summary": "Computes a N-D convolution given (N+1+batch_dims)-D `input` and (N+2)-D `filter` tensors.", + "description": "General function for computing a N-D convolution. It is required that\n`1 <= N <= 3`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length `N+2`. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[N+1] = 1`." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Used to set the data format. By default `CHANNELS_FIRST`, uses \n`NHWC (2D) / NDHWC (3D)` or if `CHANNELS_LAST`, uses `NCHW (2D) / NCDHW (3D)`. Must be one of the following: `CHANNELS_FIRST`, `CHANNELS_LAST`.", + "default": "CHANNELS_LAST" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length `N+2`. The dilation factor for each dimension of\n`input`. If set to `k > 1`, there will be `k-1` skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `channels_last_format`, see above for details. Dilations in the batch\nand depth dimensions must be 1.", + "default": [] + }, + { + "name": "batch_dims", + "type": "int64", + "description": "A positive integer specifying the number of batch dimensions for the input\ntensor. Should be less than the rank of the input tensor.", + "default": 1 + }, + { + "name": "groups", + "type": "int64", + "description": "A positive integer specifying the number of groups in which the input is split\nalong the channel axis. Each group is convolved separately with\n`filters / groups` filters. The output is the concatenation of all the groups\nresults along the channel axis. Input channels and filters must both be\ndivisible by groups.", + "default": 1 + } + ], + "inputs": [ + { + "name": "input", + "description": "Tensor of type T and shape `batch_shape + spatial_shape + [in_channels]` in the\ncase that `channels_last_format = true` or shape\n`batch_shape + [in_channels] + spatial_shape` if `channels_last_format = false`.\nspatial_shape is N-dimensional with `N=2` or `N=3`.\nAlso note that `batch_shape` is dictated by the parameter `batch_dims`\nand defaults to 1.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "An `(N+2)-D` Tensor with the same type as `input` and shape\n`spatial_filter_shape + [in_channels, out_channels]`, where spatial_filter_shape\nis N-dimensional with `N=2` or `N=3`.\n", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A (N+1+batch_dims)-D tensor. The dimension order is determined by the value of\n`channels_last_format`, see below for details.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv2D", + "category": "Layer", + "summary": "Computes a 2-D convolution given 4-D `input` and `filter` tensors.", + "description": "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`, this op\nperforms the following:\n\n1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n2. Extracts image patches from the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\nIn detail, with the default NHWC format,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n filter[di, dj, q, k]\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 4. The stride of the sliding window for each\ndimension of `input`. The dimension order is determined by the value of\n`data_format`, see below for details." + }, + { + "name": "use_cudnn_on_gpu", + "type": "boolean", + "default": true + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "A 4-D tensor. The dimension order is interpreted according to the value\nof `data_format`, see below for details.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "A 4-D tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A 4-D tensor. The dimension order is determined by the value of\n`data_format`, see below for details.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv2DBackpropFilter", + "summary": "Computes the gradients of convolution with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." + }, + { + "name": "use_cudnn_on_gpu", + "type": "boolean", + "default": true + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter_sizes", + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor.", + "type": 3 + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv2DBackpropFilterV2", + "summary": "Computes the gradients of convolution with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." + }, + { + "name": "use_cudnn_on_gpu", + "type": "boolean", + "default": true + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "4-D with shape `[filter_height, filter_width, in_channels, out_channels]`.\nOnly shape of tensor is used.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv2DBackpropInput", + "summary": "Computes the gradients of convolution with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." + }, + { + "name": "use_cudnn_on_gpu", + "type": "boolean", + "default": true + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input_sizes", + "description": "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor.", + "type": 3 + }, + { + "name": "filter", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient\nw.r.t. the input of the convolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv2DBackpropInputV2", + "summary": "Computes the gradients of convolution with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." + }, + { + "name": "use_cudnn_on_gpu", + "type": "boolean", + "default": true + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.\nOnly shape of tensor is used.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient\nw.r.t. the input of the convolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv3D", + "summary": "Computes a 3-D convolution given 5-D `input` and `filter` tensors.", + "description": "In signal processing, cross-correlation is a measure of similarity of\ntwo waveforms as a function of a time-lag applied to one of them. This\nis also known as a sliding dot product or sliding inner-product.\n\nOur Conv3D implements a form of cross-correlation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, in_depth, in_height, in_width, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv3DBackpropFilter", + "summary": "Computes the gradients of 3-D convolution with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv3DBackpropFilterV2", + "summary": "Computes the gradients of 3-D convolution with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter_sizes", + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor.", + "type": 3 + }, + { + "name": "out_backprop", + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv3DBackpropInput", + "summary": "Computes the gradients of 3-D convolution with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Conv3DBackpropInputV2", + "summary": "Computes the gradients of 3-D convolution with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1, + 1 + ] + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input_sizes", + "description": "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor.", + "typeAttr": "Tshape" + }, + { + "name": "filter", + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "ConvertToCooTensor", + "attributes": [ + { + "name": "sample_count", + "type": "int64", + "minimum": 1 + }, + { + "name": "combiner", + "type": "string" + } + ], + "inputs": [ + { + "name": "indices_or_row_splits", + "type": 3 + }, + { + "name": "values", + "type": 3 + }, + { + "name": "weights", + "type": 1 + } + ], + "outputs": [ + { + "name": "row_ids", + "type": 3 + }, + { + "name": "col_ids", + "type": 3 + }, + { + "name": "gains", + "type": 1 + } + ] + }, + { + "name": "Copy", + "summary": "Copy a tensor from CPU-to-CPU or GPU-to-GPU.", + "description": "Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\ndevice on which the tensor is allocated.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the CopyHost Op, this op does not have HostMemory constraint on its\ninput or output.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "tensor_name", + "type": "string", + "description": "The name of the input tensor.", + "default": "" + }, + { + "name": "debug_ops_spec", + "type": "string[]", + "description": "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\".", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "CopyHost", + "summary": "Copy a tensor to host.", + "description": "Performs CPU-to-CPU deep-copying of tensor.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the Copy Op, this op has HostMemory constraint on its input or output.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "tensor_name", + "type": "string", + "description": "The name of the input tensor.", + "default": "" + }, + { + "name": "debug_ops_spec", + "type": "string[]", + "description": "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\".", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "CopyToMesh", + "attributes": [ + { + "name": "mesh", + "type": "string" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "CopyToMeshGrad", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "forward_input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Cos", + "summary": "Computes cos of x element-wise.", + "description": " Given an input tensor, this function computes cosine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Cosh", + "summary": "Computes hyperbolic cosine of x element-wise.", + "description": " Given an input tensor, this function computes hyperbolic cosine of every\n element in the tensor. Input range is `[-inf, inf]` and output range\n is `[1, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "CountUpTo", + "summary": "Increments 'ref' until it reaches 'limit'.", + "attributes": [ + { + "name": "limit", + "type": "int64", + "description": "If incrementing ref would bring it above limit, instead generates an\n'OutOfRange' error." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a scalar `Variable` node.", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct.", + "typeAttr": "T" + } + ] + }, + { + "name": "CreateSummaryDbWriter", + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "db_uri", + "type": 7 + }, + { + "name": "experiment_name", + "type": 7 + }, + { + "name": "run_name", + "type": 7 + }, + { + "name": "user_name", + "type": 7 + } + ] + }, + { + "name": "CreateSummaryFileWriter", + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "logdir", + "type": 7 + }, + { + "name": "max_queue", + "type": 3 + }, + { + "name": "flush_millis", + "type": 3 + }, + { + "name": "filename_suffix", + "type": 7 + } + ] + }, + { + "name": "CropAndResize", + "summary": "Extracts crops from the input image tensor and resizes them.", + "description": "Extracts crops from the input image tensor and resizes them using bilinear\nsampling or nearest neighbor sampling (possibly with aspect ratio change) to a\ncommon output size specified by `crop_size`. This is more general than the\n`crop_to_bounding_box` op which extracts a fixed size slice from the input image\nand does not allow resizing or aspect ratio change.\n\nReturns a tensor with `crops` from the input `image` at positions defined at the\nbounding box locations in `boxes`. The cropped boxes are all resized (with\nbilinear or nearest neighbor interpolation) to a fixed\n`size = [crop_height, crop_width]`. The result is a 4-D tensor\n`[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.\nIn particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical\nresults to using `tf.image.resize_bilinear()` or\n`tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with\n`align_corners=True`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `uint16`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`." + }, + { + "name": "method", + "type": "string", + "description": "A string specifying the sampling method for resizing. It can be either\n`\"bilinear\"` or `\"nearest\"` and default to `\"bilinear\"`. Currently two sampling\nmethods are supported: Bilinear and Nearest Neighbor. Must be one of the following: `bilinear`, `nearest`.", + "default": "bilinear" + }, + { + "name": "extrapolation_value", + "type": "float32", + "description": "Value used for extrapolation, when applicable.", + "default": 0.0 + } + ], + "inputs": [ + { + "name": "image", + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive.", + "typeAttr": "T" + }, + { + "name": "boxes", + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "type": 1 + }, + { + "name": "box_ind", + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "type": 3 + }, + { + "name": "crop_size", + "description": "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive.", + "type": 3 + } + ], + "outputs": [ + { + "name": "crops", + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "type": 1 + } + ] + }, + { + "name": "CropAndResizeGradBoxes", + "summary": "Computes the gradient of the crop_and_resize op wrt the input boxes tensor.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `uint16`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`." + }, + { + "name": "method", + "type": "string", + "description": "A string specifying the interpolation method. Only 'bilinear' is\nsupported for now. Must be one of the following: `bilinear`.", + "default": "bilinear" + } + ], + "inputs": [ + { + "name": "grads", + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "type": 1 + }, + { + "name": "image", + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive.", + "typeAttr": "T" + }, + { + "name": "boxes", + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "type": 1 + }, + { + "name": "box_ind", + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A 2-D tensor of shape `[num_boxes, 4]`.", + "type": 1 + } + ] + }, + { + "name": "CropAndResizeGradImage", + "summary": "Computes the gradient of the crop_and_resize op wrt the input image tensor.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float16`, `float64`." + }, + { + "name": "method", + "type": "string", + "description": "A string specifying the interpolation method. Only 'bilinear' is\nsupported for now. Must be one of the following: `bilinear`, `nearest`.", + "default": "bilinear" + } + ], + "inputs": [ + { + "name": "grads", + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "type": 1 + }, + { + "name": "boxes", + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "type": 1 + }, + { + "name": "box_ind", + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "type": 3 + }, + { + "name": "image_size", + "description": "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Cross", + "summary": "Compute the pairwise cross product.", + "description": "`a` and `b` must be the same shape; they can either be simple 3-element vectors,\nor any shape where the innermost dimension is 3. In the latter case, each pair\nof corresponding 3-element vectors is cross-multiplied independently.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "a", + "description": "A tensor containing 3-element vectors.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "Another tensor, of same type and shape as `a`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "description": "Pairwise cross product of the vectors in `a` and `b`.", + "typeAttr": "T" + } + ] + }, + { + "name": "CrossReplicaSum", + "summary": "An Op to sum inputs across replicated TPU instances.", + "description": "Each instance supplies its own input.\n\nFor example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.\nPassing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,\nand `B, D, F, H` as group 1. Thus we get the outputs:\n`[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The type of elements to be summed. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `uint32`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The local input to the sum.", + "typeAttr": "T" + }, + { + "name": "group_assignment", + "description": "An int32 tensor with shape\n[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the\nreplica ids in the ith subgroup.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The sum of all the distributed inputs.", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNN", + "summary": "A RNN backed by cuDNN.", + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is false.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "is_training", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNBackprop", + "summary": "Backprop step of CudnnRNN.", + "description": "Compute the backprop of both data and weights in a RNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\nreserve_space: The same reserve_space produced in for forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNBackpropV2", + "summary": "Backprop step of CudnnRNN.", + "description": "Compute the backprop of both data and weights in a RNN. Takes an extra\n \"host_reserved\" inupt than CudnnRNNBackprop, which is used to determine RNN\n cudnnRNNAlgo_t and cudnnMathType_t.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\nreserve_space: The same reserve_space produced in the forward operation.\nhost_reserved: The same host_reserved produced in the forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNBackpropV3", + "summary": "Backprop step of CudnnRNNV3.", + "description": "Compute the backprop of both data and weights in a RNN. Takes an extra\n \"sequence_lengths\" input than CudnnRNNBackprop.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, input_size]. If time_major is false, the shape is\n [batch_size, seq_length, input_size].\ninput_h: If time_major is true, this is a 3-D tensor with the shape of\n [num_layer * dir, batch_size, num_units]. If time_major is false, the shape\n is [batch_size, num_layer * dir, num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\nsequence_lengths: a vector of lengths of each input sequence.\noutput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, dir * num_units]. If time_major is false, the\n shape is [batch_size, seq_length, dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\ntime_major: Indicates whether the input/output format is time major or batch\n major.\nreserve_space: The same reserve_space produced in the forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "num_proj", + "type": "int64", + "default": 0 + }, + { + "name": "time_major", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "sequence_lengths", + "type": 3 + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNCanonicalToParams", + "summary": "Converts CudnnRNN params from canonical form to usable form.", + "description": "Writes a set of weights into the opaque params buffer so they can be used in\nupcoming training or inferences.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nnum_params: number of parameter sets for all layers.\n Each layer may contain multiple parameter sets, with each set consisting of\n a weight matrix and a bias vector.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "num_params", + "type": "int64", + "minimum": 1 + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "weights", + "numberAttr": "num_params", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "params", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNCanonicalToParamsV2", + "summary": "Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM.", + "description": "Writes a set of weights into the opaque params buffer so they can be used in\nupcoming training or inferences.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nnum_params_weights: number of weight parameter matrix for all layers.\nnum_params_biases: number of bias parameter vector for all layers.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nnum_proj: The output dimensionality for the projection matrices. If None or 0,\n no projection is performed.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "num_params_weights", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_params_biases", + "type": "int64", + "minimum": 1 + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "num_proj", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "weights", + "numberAttr": "num_params_weights", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params_biases", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "params", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNParamsSize", + "summary": "Computes size of weights that can be used by a Cudnn RNN model.", + "description": "Return the params size that can be used by the Cudnn RNN model. Subsequent\nweight allocation and initialization should use this size.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nparams_size: The size of the params buffer that should be allocated and\n initialized for this RNN model. Note that this params buffer may not be\n compatible across GPUs. Please use CudnnRNNParamsWeights and\n CudnnRNNParamsBiases to save and restore them in a way that is compatible\n across different runs.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "num_proj", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + } + ], + "outputs": [ + { + "name": "params_size", + "typeAttr": "S" + } + ] + }, + { + "name": "CudnnRNNParamsToCanonical", + "summary": "Retrieves CudnnRNN params in canonical form.", + "description": "Retrieves a set of weights from the opaque params buffer that can be saved and\nrestored in a way compatible with future runs.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nnum_params: number of parameter sets for all layers.\n Each layer may contain multiple parameter sets, with each set consisting of\n a weight matrix and a bias vector.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "num_params", + "type": "int64", + "minimum": 1 + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "weights", + "numberAttr": "num_params", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNParamsToCanonicalV2", + "summary": "Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM.", + "description": "Retrieves a set of weights from the opaque params buffer that can be saved and\nrestored in a way compatible with future runs.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nnum_params_weights: number of weight parameter matrix for all layers.\nnum_params_biases: number of bias parameter vector for all layers.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nnum_proj: The output dimensionality for the projection matrices. If None or 0,\n no projection is performed.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "num_params_weights", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_params_biases", + "type": "int64", + "minimum": 1 + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "num_proj", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "weights", + "numberAttr": "num_params_weights", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params_biases", + "typeAttr": "T" + } + ] + }, + { + "name": "CudnnRNNV2", + "summary": "A RNN backed by cuDNN.", + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer. Produces one extra output \"host_reserved\" than CudnnRNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is true.\nhost_reserved: An opaque tensor that can be used in backprop calculation. It is\n only produced if is_training is true. It is output on host memory rather than\n device memory.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "is_training", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ] + }, + { + "name": "CudnnRNNV3", + "summary": "A RNN backed by cuDNN.", + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer. Accepts one extra input \"sequence_lengths\" than CudnnRNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, input_size]. If time_major is false, the shape is\n [batch_size, seq_length, input_size].\ninput_h: If time_major is true, this is a 3-D tensor with the shape of\n [num_layer * dir, batch_size, num_units]. If time_major is false, the shape\n is [batch_size, num_layer * dir, num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\nsequence_lengths: a vector of lengths of each input sequence.\noutput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, dir * num_units]. If time_major is false, the\n shape is [batch_size, seq_length, dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\ntime_major: Indicates whether the input/output format is time major or batch\n major.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is true.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "rnn_mode", + "type": "string", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "default": "lstm" + }, + { + "name": "input_mode", + "type": "string", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "default": "linear_input" + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "default": "unidirectional" + }, + { + "name": "dropout", + "type": "float32", + "default": 0.0 + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "num_proj", + "type": "int64", + "default": 0 + }, + { + "name": "is_training", + "type": "boolean", + "default": true + }, + { + "name": "time_major", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "sequence_lengths", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ] + }, + { + "name": "Cumprod", + "summary": "Compute the cumulative product of the tensor `x` along `axis`.", + "description": "By default, this op performs an inclusive cumprod, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumprod([a, b, c]) # => [a, a * b, a * b * c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumprod is\nperformed instead:\n\n```python\ntf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumprod is performed in the\nopposite direction:\n\n```python\ntf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]\n```", + "attributes": [ + { + "name": "exclusive", + "type": "boolean", + "description": "If `True`, perform exclusive cumprod.", + "default": false + }, + { + "name": "reverse", + "type": "boolean", + "description": "A `bool` (default: False).", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ] + }, + { + "name": "Cumsum", + "summary": "Compute the cumulative sum of the tensor `x` along `axis`.", + "description": "By default, this op performs an inclusive cumsum, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumsum([a, b, c]) # => [a, a + b, a + b + c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumsum is\nperformed instead:\n\n```python\ntf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumsum is performed in the\nopposite direction:\n\n```python\ntf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]\n```", + "attributes": [ + { + "name": "exclusive", + "type": "boolean", + "description": "If `True`, perform exclusive cumsum.", + "default": false + }, + { + "name": "reverse", + "type": "boolean", + "description": "A `bool` (default: False).", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ] + }, + { + "name": "CumulativeLogsumexp", + "summary": "Compute the cumulative product of the tensor `x` along `axis`.", + "description": "By default, this op performs an inclusive cumulative log-sum-exp,\nwhich means that the first\nelement of the input is identical to the first element of the output:\n```python\ntf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is\nperformed instead:\n```python\ntf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))]\n```\nNote that the neutral element of the log-sum-exp operation is `-inf`,\nhowever, for performance reasons, the minimal value representable by the\nfloating point type is used instead.\n\nBy setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the\nopposite direction.", + "attributes": [ + { + "name": "exclusive", + "type": "boolean", + "description": "If `True`, perform exclusive cumulative log-sum-exp.", + "default": false + }, + { + "name": "reverse", + "type": "boolean", + "description": "A `bool` (default: False).", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ] + }, + { + "name": "DTensorRestoreV2", + "attributes": [ + { + "name": "input_shapes", + "type": "shape[]" + }, + { + "name": "input_layouts", + "type": "string[]" + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "prefix", + "type": 7 + }, + { + "name": "tensor_names", + "type": 7 + }, + { + "name": "shape_and_slices", + "type": 7 + } + ], + "outputs": [ + { + "name": "tensors", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "DTensorSetGlobalTPUArray", + "summary": "An op that informs a host of the global ids of all the of TPUs in the system.", + "inputs": [ + { + "name": "topology", + "description": "A serialized tensorflow.tpu.TopologyProto that describes the TPU topology.", + "type": 7 + } + ] + }, + { + "name": "DataFormatDimMap", + "summary": "Returns the dimension index in the destination data format given the one in", + "description": "the source data format.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "src_format", + "type": "string", + "description": "source data format.", + "default": "NHWC" + }, + { + "name": "dst_format", + "type": "string", + "description": "destination data format.", + "default": "NCHW" + } + ], + "inputs": [ + { + "name": "x", + "description": "A Tensor with each element as a dimension index in source data format.\nMust be in the range [-4, 4).", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "A Tensor with each element as a dimension index in destination data format.", + "typeAttr": "T" + } + ] + }, + { + "name": "DataFormatVecPermute", + "summary": "Permute input tensor from `src_format` to `dst_format`.", + "description": "Given source and destination format strings of length n=4 or 5, the input\ntensor must be a vector of size n or n-2, or a 2D tensor of shape\n(n, 2) or (n-2, 2).\n\nIf the first dimension of the input tensor is n-2, it is assumed that\nnon-spatial dimensions are omitted (i.e `N`, `C`).\n\nFor example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:\n```\n[1, 2, 3, 4]\n```\n, the output will be:\n```\n[1, 4, 2, 3]\n```\nWith `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input:\n```\n[[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]]\n```\n, the output will be:\n```\n[[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]]\n```\nWith `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:\n```\n[1, 2]\n```\n, the output will be:\n```\n[1, 2]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "src_format", + "type": "string", + "description": "source data format.", + "default": "NHWC" + }, + { + "name": "dst_format", + "type": "string", + "description": "destination data format.", + "default": "NCHW" + } + ], + "inputs": [ + { + "name": "x", + "description": "Tensor of rank 1 or 2 in source data format.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "Tensor of rank 1 or 2 in destination data format.", + "typeAttr": "T" + } + ] + }, + { + "name": "DataServiceDataset", + "summary": "Creates a dataset that reads data from the tf.data service.", + "attributes": [ + { + "name": "task_refresh_interval_hint_ms", + "type": "int64", + "default": -1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "data_transfer_protocol", + "type": "string", + "default": "" + }, + { + "name": "target_workers", + "type": "string", + "default": "AUTO" + }, + { + "name": "cross_trainer_cache_options", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset_id", + "type": 9 + }, + { + "name": "processing_mode", + "type": 7 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + }, + { + "name": "job_name", + "type": 7 + }, + { + "name": "max_outstanding_requests", + "type": 9 + }, + { + "name": "iteration_counter", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DataServiceDatasetV2", + "summary": "Creates a dataset that reads data from the tf.data service.", + "attributes": [ + { + "name": "task_refresh_interval_hint_ms", + "type": "int64", + "default": -1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "data_transfer_protocol", + "type": "string", + "default": "" + }, + { + "name": "target_workers", + "type": "string", + "default": "AUTO" + }, + { + "name": "cross_trainer_cache_options", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset_id", + "type": 9 + }, + { + "name": "processing_mode", + "type": 7 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + }, + { + "name": "job_name", + "type": 7 + }, + { + "name": "consumer_index", + "type": 9 + }, + { + "name": "num_consumers", + "type": 9 + }, + { + "name": "max_outstanding_requests", + "type": 9 + }, + { + "name": "iteration_counter", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DataServiceDatasetV3", + "summary": "Creates a dataset that reads data from the tf.data service.", + "attributes": [ + { + "name": "task_refresh_interval_hint_ms", + "type": "int64", + "default": -1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "data_transfer_protocol", + "type": "string", + "default": "" + }, + { + "name": "target_workers", + "type": "string", + "default": "AUTO" + }, + { + "name": "uncompress", + "type": "boolean", + "default": false + }, + { + "name": "uncompress_fn", + "type": "function" + }, + { + "name": "cross_trainer_cache_options", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset_id", + "type": 9 + }, + { + "name": "processing_mode", + "type": 7 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + }, + { + "name": "job_name", + "type": 7 + }, + { + "name": "consumer_index", + "type": 9 + }, + { + "name": "num_consumers", + "type": 9 + }, + { + "name": "max_outstanding_requests", + "type": 9 + }, + { + "name": "iteration_counter", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DataServiceDatasetV4", + "summary": "Creates a dataset that reads data from the tf.data service.", + "attributes": [ + { + "name": "task_refresh_interval_hint_ms", + "type": "int64", + "default": -1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "data_transfer_protocol", + "type": "string", + "default": "" + }, + { + "name": "target_workers", + "type": "string", + "default": "AUTO" + }, + { + "name": "uncompress", + "type": "boolean", + "default": false + }, + { + "name": "uncompress_fn", + "type": "function" + }, + { + "name": "cross_trainer_cache_options", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset_id", + "type": 7 + }, + { + "name": "processing_mode", + "type": 7 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + }, + { + "name": "job_name", + "type": 7 + }, + { + "name": "consumer_index", + "type": 9 + }, + { + "name": "num_consumers", + "type": 9 + }, + { + "name": "max_outstanding_requests", + "type": 9 + }, + { + "name": "iteration_counter", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DatasetCardinality", + "summary": "Returns the cardinality of `input_dataset`.", + "description": "Returns the cardinality of `input_dataset`.", + "attributes": [ + { + "name": "cardinality_options", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to return cardinality for.", + "type": 21 + } + ], + "outputs": [ + { + "name": "cardinality", + "description": "The cardinality of `input_dataset`. Named constants are used to represent\ninfinite and unknown cardinality.", + "type": 9 + } + ] + }, + { + "name": "DatasetFingerprint", + "summary": "Returns the fingerprint of `input_dataset`.", + "description": "Returns the fingerprint of `input_dataset`.", + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to return fingerprint for.", + "type": 21 + } + ], + "outputs": [ + { + "name": "fingerprint", + "description": "The fingerprint of `input_dataset` in `uint64`", + "type": 23 + } + ] + }, + { + "name": "DatasetFromGraph", + "summary": "Creates a dataset from the given `graph_def`.", + "description": "Creates a dataset from the provided `graph_def`.", + "inputs": [ + { + "name": "graph_def", + "description": "The graph representation of the dataset (as serialized GraphDef).", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A variant tensor representing the dataset.", + "type": 21 + } + ] + }, + { + "name": "DatasetToGraph", + "summary": "Returns a serialized GraphDef representing `input_dataset`.", + "description": "Returns a graph representation for `input_dataset`.", + "attributes": [ + { + "name": "stateful_whitelist", + "type": "string[]", + "minimum": 0, + "default": [] + }, + { + "name": "allow_stateful", + "type": "boolean", + "default": false + }, + { + "name": "strip_device_assignment", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to return the graph representation for.", + "type": 21 + } + ], + "outputs": [ + { + "name": "graph", + "description": "The graph representation of the dataset (as serialized GraphDef).", + "type": 7 + } + ] + }, + { + "name": "DatasetToGraphV2", + "summary": "Returns a serialized GraphDef representing `input_dataset`.", + "description": "Returns a graph representation for `input_dataset`.", + "attributes": [ + { + "name": "external_state_policy", + "type": "int64", + "default": 0 + }, + { + "name": "strip_device_assignment", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to return the graph representation for.", + "type": 21 + } + ], + "outputs": [ + { + "name": "graph", + "description": "The graph representation of the dataset (as serialized GraphDef).", + "type": 7 + } + ] + }, + { + "name": "DatasetToSingleElement", + "summary": "Outputs the single element from the given dataset.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset", + "description": "A handle to a dataset that contains a single element.", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "description": "The components of the single element of `input`.", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "DatasetToTFRecord", + "summary": "Writes the given dataset to the given file using the TFRecord format.", + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to write.", + "type": 21 + }, + { + "name": "filename", + "description": "A scalar string tensor representing the filename to use.", + "type": 7 + }, + { + "name": "compression_type", + "description": "A scalar string tensor containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "type": 7 + } + ] + }, + { + "name": "Dawsn", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "DebugGradientIdentity", + "summary": "Identity op for gradient debugging.", + "description": "This op is hidden from public in Python. It is used by TensorFlow Debugger to\nregister gradient tensors for gradient debugging.\nThis op operates on non-reference-type tensors.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DebugGradientRefIdentity", + "summary": "Identity op for gradient debugging.", + "description": "This op is hidden from public in Python. It is used by TensorFlow Debugger to\nregister gradient tensors for gradient debugging.\nThis op operates on reference-type tensors.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "DebugIdentity", + "summary": "Provides an identity mapping of the non-Ref type input tensor for debugging.", + "description": "Provides an identity mapping of the non-Ref type input tensor for debugging.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "device_name", + "type": "string", + "description": "Name of the device on which the tensor resides.", + "default": "" + }, + { + "name": "tensor_name", + "type": "string", + "description": "Name of the input tensor.", + "default": "" + }, + { + "name": "debug_urls", + "type": "string[]", + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011", + "default": [] + }, + { + "name": "gated_grpc", + "type": "boolean", + "description": "Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, non-Reference type", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DebugIdentityV2", + "summary": "Debug Identity V2 Op.", + "description": "Provides an identity mapping from input to output, while writing the content of\nthe input tensor by calling DebugEventsWriter.\n\nThe semantics of the input tensor depends on tensor_debug_mode. In typical\nusage, the input tensor comes directly from the user computation only when\ngraph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a\nlist of all the possible values of graph_debug_mode). For the other debug modes,\nthe input tensor should be produced by an additional op or subgraph that\ncomputes summary information about one or more tensors.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "tfdbg_context_id", + "type": "string", + "description": "A tfdbg-generated ID for the context that the op belongs to,\n e.g., a concrete compiled tf.function.", + "default": "" + }, + { + "name": "op_name", + "type": "string", + "description": "Optional. Name of the op that the debug op is concerned with.\n Used only for single-tensor trace.", + "default": "" + }, + { + "name": "output_slot", + "type": "int64", + "description": "Optional. Output slot index of the tensor that the debug op\n is concerned with. Used only for single-tensor trace.", + "default": -1 + }, + { + "name": "tensor_debug_mode", + "type": "int64", + "description": "TensorDebugMode enum value. See debug_event.proto for details.", + "default": -1 + }, + { + "name": "debug_urls", + "type": "string[]", + "description": "List of URLs to debug targets, e.g., file:///foo/tfdbg_dump.", + "default": [] + }, + { + "name": "circular_buffer_size", + "type": "int64", + "default": 1000 + }, + { + "name": "tfdbg_run_id", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, non-Reference type", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DebugIdentityV3", + "summary": "Provides an identity mapping of the non-Ref type input tensor for debugging.", + "description": "Provides an identity mapping of the non-Ref type input tensor for debugging.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "device_name", + "type": "string", + "description": "Name of the device on which the tensor resides.", + "default": "" + }, + { + "name": "tensor_name", + "type": "string", + "description": "Name of the input tensor.", + "default": "" + }, + { + "name": "io_of_node", + "type": "string", + "description": "Name of the node of which the tensor is an input or output.", + "default": "" + }, + { + "name": "is_input", + "type": "boolean", + "description": "If true, the tensor is an input of the node; otherwise the output.", + "default": false + }, + { + "name": "io_index", + "type": "int64", + "description": "The index of which the tensor is an input or output of the node.", + "default": -1 + }, + { + "name": "debug_urls", + "type": "string[]", + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011", + "default": [] + }, + { + "name": "gated_grpc", + "type": "boolean", + "description": "Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, non-Reference type", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DebugNanCount", + "summary": "Debug NaN Value Counter Op.", + "description": "Counts number of NaNs in the input tensor, for debugging.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "device_name", + "type": "string", + "default": "" + }, + { + "name": "tensor_name", + "type": "string", + "description": "Name of the input tensor.", + "default": "" + }, + { + "name": "debug_urls", + "type": "string[]", + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.", + "default": [] + }, + { + "name": "gated_grpc", + "type": "boolean", + "description": " Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, non-Reference type.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 9 + } + ] + }, + { + "name": "DebugNumericSummary", + "summary": "Debug Numeric Summary Op.", + "description": "Provide a basic summary of numeric value types, range and distribution.\n\noutput: A double tensor of shape [14 + nDimensions], where nDimensions is the\n number of dimensions of the tensor's shape. The elements of output are:\n [0]: is initialized (1.0) or not (0.0).\n [1]: total number of elements\n [2]: NaN element count\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n default.\n [4]: negative element count (excluding -inf), if lower_bound is the default\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n [5]: zero element count\n [6]: positive element count (excluding +inf), if upper_bound is the default\n +inf. Otherwise, this is the count of elements < upper_bound and > 0.\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n default.\nOutput elements [1:8] are all zero, if the tensor is uninitialized.\n [8]: minimum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: +inf.\n [9]: maximum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: -inf.\n [10]: mean of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [11]: variance of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\n proto for more details.\n [13]: Number of dimensions of the tensor (ndims).\n [14+]: Sizes of the dimensions.\n", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "device_name", + "type": "string", + "default": "" + }, + { + "name": "tensor_name", + "type": "string", + "description": "Name of the input tensor.", + "default": "" + }, + { + "name": "debug_urls", + "type": "string[]", + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.", + "default": [] + }, + { + "name": "lower_bound", + "type": "float32", + "description": "(float) The lower bound <= which values will be included in the\n generalized -inf count. Default: -inf.", + "default": "-NaN" + }, + { + "name": "upper_bound", + "type": "float32", + "description": "(float) The upper bound >= which values will be included in the\n generalized +inf count. Default: +inf.", + "default": "NaN" + }, + { + "name": "mute_if_healthy", + "type": "boolean", + "description": "(bool) Do not send data to the debug URLs unless at least one\n of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\n inf counts) is non-zero.", + "default": false + }, + { + "name": "gated_grpc", + "type": "boolean", + "description": "Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, non-Reference type.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 2 + } + ] + }, + { + "name": "DebugNumericSummaryV2", + "summary": "Debug Numeric Summary V2 Op.", + "description": "Computes a numeric summary of the input tensor. The shape of the output\ndepends on the tensor_debug_mode attribute.\nThis op is used internally by TensorFlow Debugger (tfdbg) v2.", + "attributes": [ + { + "name": "output_dtype", + "type": "type", + "description": "Optional. The type of the output. Can be float32 or float64 (default: float32). Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T", + "type": "type" + }, + { + "name": "tensor_debug_mode", + "type": "int64", + "description": "Tensor debug mode: the mode in which the input tensor is summarized\n by the op. See the TensorDebugMode enum in\n tensorflow/core/protobuf/debug_event.proto for details.\n\nSupported values:\n 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is a bit which is set to 1 if the input tensor has an\n infinity or nan value, or zero otherwise.\n\n 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The\n remaining four slots are the total number of elements, -infs,\n +infs, and nans in the input tensor respectively.\n\n 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is the device_id, if provided, and -1 otherwise. The 3rd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The remaining elements hold the total number of elements, -infs,\n +infs, nans, negative finite numbers, zeros, and positive finite\n numbers in the input tensor respectively.\n\n 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The 3rd element holds the rank of the tensor. The 4th element holds\n the number of elements within the tensor. Finally the remaining 6\n elements hold the shape of the tensor. If the rank of the tensor\n is lower than 6, the shape is right padded with zeros. If the rank\n is greater than 6, the head of the shape is truncated.\n\n 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is the device_id, if provided, and -1 otherwise. The 3rd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The 4th element holds the rank of the tensor. The 5th to 11th\n elements hold the shape of the tensor. If the rank of the tensor\n is lower than 6, the shape is right padded with zeros. If the rank\n is greater than 6, the head of the shape is truncated. The 12th to\n 18th elements hold the number of elements, -infs, +infs, nans,\n denormal floats, negative finite numbers, zeros, and positive\n finite numbers in the input tensor respectively. The final four\n elements hold the min value, max value, mean, and variance of the\n input tensor.\n\n 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape\n [3]. The 1st element is -inf if any elements of the input tensor\n is -inf, or zero otherwise. The 2nd element is +inf if any elements\n of the input tensor is +inf, or zero otherwise. The 3rd element is\n nan if any element of the input tensor is nan, or zero otherwise.", + "default": -1 + }, + { + "name": "tensor_id", + "type": "int64", + "description": "Optional. An integer identifier for the tensor being summarized by this op.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "Input tensor, to be summarized by the op.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_dtype" + } + ] + }, + { + "name": "DecodeAndCropJpeg", + "summary": "Decode and Crop a JPEG-encoded image to a uint8 tensor.", + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the JPEG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding. Allowed values are: 1, 2, 4, and 8. This is much faster than\ndownscaling the image later.\n\n\nIt is equivalent to a combination of decode and crop, but much faster by only\ndecoding partial jpeg image.", + "attributes": [ + { + "name": "channels", + "type": "int64", + "description": "Number of color channels for the decoded image.", + "default": 0 + }, + { + "name": "ratio", + "type": "int64", + "description": "Downscaling ratio.", + "default": 1 + }, + { + "name": "fancy_upscaling", + "type": "boolean", + "description": "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only).", + "default": true + }, + { + "name": "try_recover_truncated", + "type": "boolean", + "description": "If true try to recover an image from truncated input.", + "default": false + }, + { + "name": "acceptable_fraction", + "type": "float32", + "description": "The minimum required fraction of lines before a truncated\ninput is accepted.", + "default": 1.0 + }, + { + "name": "dct_method", + "type": "string", + "description": "string specifying a hint about the algorithm used for\ndecompression. Defaults to \"\" which maps to a system-specific\ndefault. Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"]. The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)", + "default": "" + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The JPEG-encoded image.", + "type": 7 + }, + { + "name": "crop_window", + "description": "1-D. The crop window: [crop_y, crop_x, crop_height, crop_width].", + "type": 3 + } + ], + "outputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`..", + "type": 4 + } + ] + }, + { + "name": "DecodeBase64", + "summary": "Decode web-safe base64-encoded strings.", + "description": "Input may or may not have padding at the end. See\n[EncodeBase64](https://www.tensorflow.org/api_docs/python/tf/io/encode_base64)\nfor padding. Web-safe means that input must use - and _ instead of + and /.", + "inputs": [ + { + "name": "input", + "description": "Base64 strings to decode.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "Decoded strings.", + "type": 7 + } + ] + }, + { + "name": "DecodeBmp", + "summary": "Decode the first frame of a BMP-encoded image to a uint8 tensor.", + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the BMP-encoded image.\n* 3: output an RGB image.\n* 4: output an RGBA image.", + "attributes": [ + { + "name": "channels", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The BMP-encoded image.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`. RGB order", + "type": 4 + } + ] + }, + { + "name": "DecodeCSV", + "summary": "Convert CSV records to tensors. Each column maps to one tensor.", + "description": "RFC 4180 format is expected for the CSV records.\n(https://tools.ietf.org/html/rfc4180)\nNote that we allow leading and trailing spaces with int or float field.", + "attributes": [ + { + "name": "OUT_TYPE", + "type": "type[]", + "minimum": 1, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`." + }, + { + "name": "field_delim", + "type": "string", + "description": "char delimiter to separate fields in a record.", + "default": "," + }, + { + "name": "use_quote_delim", + "type": "boolean", + "description": "If false, treats double quotation marks as regular\ncharacters inside of the string fields (ignoring RFC 4180, Section 2,\nBullet 5).", + "default": true + }, + { + "name": "na_value", + "type": "string", + "description": "Additional string to recognize as NA/NaN.", + "default": "" + }, + { + "name": "select_cols", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "records", + "description": "Each string is a record/row in the csv and all records should have\nthe same format.", + "type": 7 + }, + { + "name": "record_defaults", + "description": "One tensor per column of the input record, with either a\nscalar default value for that column or an empty vector if the column is\nrequired.", + "typeListAttr": "OUT_TYPE" + } + ], + "outputs": [ + { + "name": "output", + "description": "Each tensor will have the same shape as records.", + "typeListAttr": "OUT_TYPE" + } + ] + }, + { + "name": "DecodeCompressed", + "summary": "Decompress strings.", + "description": "This op decompresses each element of the `bytes` input `Tensor`, which\nis assumed to be compressed using the given `compression_type`.\n\nThe `output` is a string `Tensor` of the same shape as `bytes`,\neach element containing the decompressed data from the corresponding\nelement in `bytes`.", + "attributes": [ + { + "name": "compression_type", + "type": "string", + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "default": "" + } + ], + "inputs": [ + { + "name": "bytes", + "description": "A Tensor of string which is compressed.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor with the same shape as input `bytes`, uncompressed\nfrom bytes.", + "type": 7 + } + ] + }, + { + "name": "DecodeGif", + "summary": "Decode the frame(s) of a GIF-encoded image to a uint8 tensor.", + "description": "GIF images with frame or transparency compression are not supported.\nOn Linux and MacOS systems, convert animated GIFs from compressed to\nuncompressed by running:\n\n convert $src.gif -coalesce $dst.gif\n\nThis op also supports decoding JPEGs and PNGs, though it is cleaner to use\n`tf.io.decode_image`.", + "inputs": [ + { + "name": "contents", + "description": "0-D. The GIF-encoded image.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image", + "description": "4-D with shape `[num_frames, height, width, 3]`. RGB channel order.", + "type": 4 + } + ] + }, + { + "name": "DecodeImage", + "summary": "Function for decode_bmp, decode_gif, decode_jpeg, and decode_png.", + "description": "Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the\nappropriate operation to convert the input bytes string into a Tensor of type\ndtype.\n\n*NOTE*: decode_gif returns a 4-D array [num_frames, height, width, 3], as\nopposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays\n[height, width, num_channels]. Make sure to take this into account when\nconstructing your graph if you are intermixing GIF files with BMP, JPEG, and/or\nPNG files. Alternately, set the expand_animations argument of this function to\nFalse, in which case the op will return 3-dimensional tensors and will truncate\nanimated GIF files to the first frame.\n\n*NOTE*: If the first frame of an animated GIF does not occupy the entire\ncanvas (maximum frame width x maximum frame height), then it fills the\nunoccupied areas (in the first frame) with zeros (black). For frames after the\nfirst frame that does not occupy the entire canvas, it uses the previous\nframe to fill the unoccupied areas.", + "attributes": [ + { + "name": "channels", + "type": "int64", + "description": "Number of color channels for the decoded image.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "The desired DType of the returned Tensor. Must be one of the following: `uint8`, `uint16`, `float32`.", + "default": { + "type": "type", + "value": 4 + } + }, + { + "name": "expand_animations", + "type": "boolean", + "description": "Controls the output shape of the returned op. If True, the returned op will\nproduce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all\nGIFs, whether animated or not. If, False, the returned op will produce a 3-D\ntensor for all file types and will truncate animated GIFs to the first frame.", + "default": true + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The encoded image bytes.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]` or 4-D with shape\n`[frame, height, width, channels]`..", + "typeAttr": "dtype" + } + ] + }, + { + "name": "DecodeJSONExample", + "summary": "Convert JSON-encoded Example records to binary protocol buffer strings.", + "description": "\nNote: This is **not** a general purpose JSON parsing op.\n\nThis op converts JSON-serialized\n`tf.train.Example` (created with `json_format.MessageToJson`, following the\n[standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json))\nto a binary-serialized `tf.train.Example` (equivalent to\n`Example.SerializeToString()`) suitable for conversion to tensors with\n`tf.io.parse_example`.", + "inputs": [ + { + "name": "json_examples", + "description": "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto.", + "type": 7 + } + ], + "outputs": [ + { + "name": "binary_examples", + "description": "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`.", + "type": 7 + } + ] + }, + { + "name": "DecodeJpeg", + "summary": "Decode a JPEG-encoded image to a uint8 tensor.", + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the JPEG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding. Allowed values are: 1, 2, 4, and 8. This is much faster than\ndownscaling the image later.\n\n\nThis op also supports decoding PNGs and non-animated GIFs since the interface is\nthe same, though it is cleaner to use `tf.io.decode_image`.", + "attributes": [ + { + "name": "channels", + "type": "int64", + "description": "Number of color channels for the decoded image.", + "default": 0 + }, + { + "name": "ratio", + "type": "int64", + "description": "Downscaling ratio.", + "default": 1 + }, + { + "name": "fancy_upscaling", + "type": "boolean", + "description": "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only).", + "default": true + }, + { + "name": "try_recover_truncated", + "type": "boolean", + "description": "If true try to recover an image from truncated input.", + "default": false + }, + { + "name": "acceptable_fraction", + "type": "float32", + "description": "The minimum required fraction of lines before a truncated\ninput is accepted.", + "default": 1.0 + }, + { + "name": "dct_method", + "type": "string", + "description": "string specifying a hint about the algorithm used for\ndecompression. Defaults to \"\" which maps to a system-specific\ndefault. Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"]. The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)", + "default": "" + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The JPEG-encoded image.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`..", + "type": 4 + } + ] + }, + { + "name": "DecodePaddedRaw", + "summary": "Reinterpret the bytes of a string as a vector of numbers.", + "attributes": [ + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`." + }, + { + "name": "little_endian", + "type": "boolean", + "description": "Whether the input `input_bytes` is in little-endian order. Ignored for\n`out_type` values that are stored in a single byte, like `uint8`", + "default": true + } + ], + "inputs": [ + { + "name": "input_bytes", + "description": "Tensor of string to be decoded.", + "type": 7 + }, + { + "name": "fixed_length", + "description": "Length in bytes for each element of the decoded output. Must be a multiple\nof the size of the output type.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor with one more dimension than the input `bytes`. The added dimension\nwill have size equal to the length of the elements of `bytes` divided by the\nnumber of bytes to represent `out_type`.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "DecodePng", + "summary": "Decode a PNG-encoded image to a uint8 or uint16 tensor.", + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the PNG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the PNG-encoded image is transformed to match the requested number\nof color channels.\n\nThis op also supports decoding JPEGs and non-animated GIFs since the interface\nis the same, though it is cleaner to use `tf.io.decode_image`.", + "attributes": [ + { + "name": "channels", + "type": "int64", + "description": "Number of color channels for the decoded image.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `uint8`, `uint16`.", + "default": { + "type": "type", + "value": 4 + } + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The PNG-encoded image.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "DecodeProtoV2", + "summary": "The op extracts fields from a serialized protocol buffers message into tensors.", + "description": "Note: This API is designed for orthogonality rather than human-friendliness. It\ncan be used to parse input protos by hand, but it is intended for use in\ngenerated code.\n\nThe `decode_proto` op extracts fields from a serialized protocol buffers\nmessage into tensors. The fields in `field_names` are decoded and converted\nto the corresponding `output_types` if possible.\n\nA `message_type` name must be provided to give context for the field names.\nThe actual message descriptor can be looked up either in the linked-in\ndescriptor pool or a filename provided by the caller using the\n`descriptor_source` attribute.\n\nEach output tensor is a dense tensor. This means that it is padded to hold\nthe largest number of repeated elements seen in the input minibatch. (The\nshape is also padded by one to prevent zero-sized dimensions). The actual\nrepeat counts for each example in the minibatch can be found in the `sizes`\noutput. In many cases the output of `decode_proto` is fed immediately into\ntf.squeeze if missing values are not a concern. When using tf.squeeze, always\npass the squeeze dimension explicitly to avoid surprises.\n\nFor the most part, the mapping between Proto field types and TensorFlow dtypes\nis straightforward. However, there are a few special cases:\n\n- A proto field that contains a submessage or group can only be converted\nto `DT_STRING` (the serialized submessage). This is to reduce the complexity\nof the API. The resulting string can be used as input to another instance of\nthe decode_proto op.\n\n- TensorFlow lacks support for unsigned integers. The ops represent uint64\ntypes as a `DT_INT64` with the same twos-complement bit pattern (the obvious\nway). Unsigned int32 values can be represented exactly by specifying type\n`DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in\nthe `output_types` attribute.\n\n- `map` fields are not directly decoded. They are treated as `repeated` fields,\nof the appropriate entry type. The proto-compiler defines entry types for each\nmap field. The type-name is the field name, converted to \"CamelCase\" with\n\"Entry\" appended. The `tf.train.Features.FeatureEntry` message is an example of\none of these implicit `Entry` types.\n\n- `enum` fields should be read as int32.\n\nBoth binary and text proto serializations are supported, and can be\nchosen using the `format` attribute.\n\nThe `descriptor_source` attribute selects the source of protocol\ndescriptors to consult when looking up `message_type`. This may be:\n\n- An empty string or \"local://\", in which case protocol descriptors are\ncreated for C++ (not Python) proto definitions linked to the binary.\n\n- A file, in which case protocol descriptors are created from the file,\nwhich is expected to contain a `FileDescriptorSet` serialized as a string.\nNOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`\nand `--include_imports` options to the protocol compiler `protoc`.\n\n- A \"bytes://\", in which protocol descriptors are created from ``,\nwhich is expected to be a `FileDescriptorSet` serialized as a string.", + "attributes": [ + { + "name": "message_type", + "type": "string", + "description": "Name of the proto message type to decode." + }, + { + "name": "field_names", + "type": "string[]", + "description": "List of strings containing proto field names. An extension field can be decoded\nby using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME." + }, + { + "name": "output_types", + "type": "type[]", + "description": "List of TF types to use for the respective field in field_names.", + "minimum": 0 + }, + { + "name": "descriptor_source", + "type": "string", + "description": "Either the special value `local://` or a path to a file containing\na serialized `FileDescriptorSet`.", + "default": "local://" + }, + { + "name": "message_format", + "type": "string", + "description": "Either `binary` or `text`.", + "default": "binary" + }, + { + "name": "sanitize", + "type": "boolean", + "description": "Whether to sanitize the result or not.", + "default": false + } + ], + "inputs": [ + { + "name": "bytes", + "description": "Tensor of serialized protos with shape `batch_shape`.", + "type": 7 + } + ], + "outputs": [ + { + "name": "sizes", + "description": "Tensor of int32 with shape `[batch_shape, len(field_names)]`.\nEach entry is the number of values found for the corresponding field.\nOptional fields may have 0 or 1 values.", + "type": 3 + }, + { + "name": "values", + "description": "List of tensors containing values for the corresponding field.\n`values[i]` has datatype `output_types[i]`\nand shape `[batch_shape, max(sizes[...,i])]`.", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "DecodeRaw", + "summary": "Reinterpret the bytes of a string as a vector of numbers.", + "attributes": [ + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`, `complex64`, `complex128`, `bool`, `bfloat16`." + }, + { + "name": "little_endian", + "type": "boolean", + "description": "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`.", + "default": true + } + ], + "inputs": [ + { + "name": "bytes", + "description": "All the elements must have the same length.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor with one more dimension than the input `bytes`. The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "DecodeWav", + "summary": "Decode a 16-bit PCM WAV file to a float tensor.", + "description": "The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.\n\nWhen desired_channels is set, if the input contains fewer channels than this\nthen the last channel will be duplicated to give the requested number, else if\nthe input has more channels than requested then the additional channels will be\nignored.\n\nIf desired_samples is set, then the audio will be cropped or padded with zeroes\nto the requested length.\n\nThe first output contains a Tensor with the content of the audio samples. The\nlowest dimension will be the number of channels, and the second will be the\nnumber of samples. For example, a ten-sample-long stereo WAV file should give an\noutput shape of [10, 2].", + "attributes": [ + { + "name": "desired_channels", + "type": "int64", + "description": "Number of sample channels wanted.", + "default": -1 + }, + { + "name": "desired_samples", + "type": "int64", + "description": "Length of audio requested.", + "default": -1 + } + ], + "inputs": [ + { + "name": "contents", + "description": "The WAV-encoded audio, usually from a file.", + "type": 7 + } + ], + "outputs": [ + { + "name": "audio", + "description": "2-D with shape `[length, channels]`.", + "type": 1 + }, + { + "name": "sample_rate", + "description": "Scalar holding the sample rate found in the WAV header.", + "type": 3 + } + ] + }, + { + "name": "DeepCopy", + "summary": "Makes a copy of `x`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "description": "The source tensor of type `T`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": " y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`\n is not an alias of `x`.", + "typeAttr": "T" + } + ] + }, + { + "name": "DeleteIterator", + "summary": "A container for an iterator resource.", + "inputs": [ + { + "name": "handle", + "description": "A handle to the iterator to delete.", + "type": 20 + }, + { + "name": "deleter", + "description": "A variant deleter.", + "type": 21 + } + ] + }, + { + "name": "DeleteMemoryCache", + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "DeleteMultiDeviceIterator", + "summary": "A container for an iterator resource.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "multi_device_iterator", + "description": "A handle to the multi device iterator to delete.", + "type": 20 + }, + { + "name": "iterators", + "description": "A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.", + "numberAttr": "N", + "type": 20 + }, + { + "name": "deleter", + "description": "A variant deleter.", + "type": 21 + } + ] + }, + { + "name": "DeleteRandomSeedGenerator", + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "DeleteSeedGenerator", + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + }, + { + "name": "DeleteSessionTensor", + "summary": "Delete the tensor specified by its handle in the session.", + "inputs": [ + { + "name": "handle", + "description": "The handle for a tensor stored in the session state.", + "type": 7 + } + ] + }, + { + "name": "DenseBincount", + "summary": "Counts the number of occurrences of each value in an integer array.", + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "binary_output", + "type": "boolean", + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "1D or 2D int `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "size", + "description": "non-negative int scalar `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "weights", + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "typeAttr": "T" + } + ] + }, + { + "name": "DenseCountSparseOutput", + "summary": "Performs sparse-output bin counting for a tf.tensor input.", + "description": " Counts the number of times each value occurs in the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Dtype of the input values tensor. Must be one of the following: `int32`, `int64`." + }, + { + "name": "minlength", + "type": "int64", + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "default": -1 + }, + { + "name": "maxlength", + "type": "int64", + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "default": -1 + }, + { + "name": "binary_output", + "type": "boolean", + "description": "Whether to output the number of occurrences of each value or 1." + }, + { + "name": "output_type", + "type": "type", + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "values", + "description": "Tensor containing data to count.", + "typeAttr": "T" + }, + { + "name": "weights", + "description": "A Tensor of the same shape as indices containing per-index weight values. May\nalso be the empty tensor if no weights are used.", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "Indices tensor for the resulting sparse tensor object.", + "type": 9 + }, + { + "name": "output_values", + "description": "Values tensor for the resulting sparse tensor object.", + "typeAttr": "output_type" + }, + { + "name": "output_dense_shape", + "description": "Shape tensor for the resulting sparse tensor object.", + "type": 9 + } + ] + }, + { + "name": "DenseToCSRSparseMatrix", + "summary": "Converts a dense tensor to a (possibly batched) CSRSparseMatrix.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "dense_input", + "description": "A Dense tensor.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Indices of nonzero elements.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_output", + "description": "A (possibly batched) CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "DenseToDenseSetOperation", + "summary": "Applies set operation along last dimension of 2 `Tensor` inputs.", + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`." + } + ], + "inputs": [ + { + "name": "set1", + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "typeAttr": "T" + }, + { + "name": "set2", + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "result_indices", + "description": "2D indices of a `SparseTensor`.", + "type": 9 + }, + { + "name": "result_values", + "description": "1D values of a `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "result_shape", + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "type": 9 + } + ] + }, + { + "name": "DenseToSparseBatchDataset", + "summary": "Creates a dataset that batches input elements into a SparseTensor.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A handle to an input dataset. Must have a single component.", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "type": 9 + }, + { + "name": "row_shape", + "description": "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DenseToSparseSetOperation", + "summary": "Applies set operation along last dimension of `Tensor` and `SparseTensor`.", + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set2`\nindices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`." + } + ], + "inputs": [ + { + "name": "set1", + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "typeAttr": "T" + }, + { + "name": "set2_indices", + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "type": 9 + }, + { + "name": "set2_values", + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "typeAttr": "T" + }, + { + "name": "set2_shape", + "description": "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\nmax set size across `n-1` dimensions.", + "type": 9 + } + ], + "outputs": [ + { + "name": "result_indices", + "description": "2D indices of a `SparseTensor`.", + "type": 9 + }, + { + "name": "result_values", + "description": "1D values of a `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "result_shape", + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "type": 9 + } + ] + }, + { + "name": "DepthToSpace", + "summary": "DepthToSpace for tensors of type T.", + "description": "Rearranges data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically,\nthis op outputs a copy of the input tensor where values from the `depth`\ndimension are moved in spatial blocks to the `height` and `width` dimensions.\nThe attr `block_size` indicates the input block size and how the data is moved.\n\n * Chunks of data of size `block_size * block_size` from depth are rearranged\n into non-overlapping blocks of size `block_size x block_size`\n * The width of the output tensor is `input_depth * block_size`, whereas the\n height is `input_height * block_size`.\n * The Y, X coordinates within each block of the output image are determined\n by the high order component of the input channel index.\n * The depth of the input tensor must be divisible by\n `block_size * block_size`.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n \"NHWC\": `[ batch, height, width, channels ]`\n \"NCHW\": `[ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n Each element in the input tensor can be specified via 6 coordinates,\n ordered by decreasing memory layout significance as:\n n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates\n within the input image, bX, bY means coordinates\n within the output block, oC means output channels).\n The output would be the input transposed to the following layout:\n n,iY,bY,iX,bX,oC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 1, 1, 4]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1, 2, 3, 4]]]]\n\n```\n\nThis operation will output a tensor of shape `[1, 2, 2, 1]`:\n\n```\n [[[[1], [2]],\n [[3], [4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,\nthe corresponding output will have 2x2 elements and will have a depth of\n1 channel (1 = `4 / (block_size * block_size)`).\nThe output element shape is `[2, 2, 1]`.\n\nFor an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.\n\n```\nx = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nThis operation, for block size of 2, will return the following tensor of shape\n`[1, 2, 2, 3]`\n\n```\n [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n\n```\n\nSimilarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:\n\n```\nx = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 4 4 1]`:\n\n```\nx = [[[ [1], [2], [5], [6]],\n [ [3], [4], [7], [8]],\n [ [9], [10], [13], [14]],\n [ [11], [12], [15], [16]]]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "block_size", + "type": "int64", + "description": "The size of the spatial block, same as in Space2Depth.", + "minimum": 2 + }, + { + "name": "data_format", + "type": "string", + "description": "Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DepthwiseConv2dNative", + "category": "Layer", + "summary": "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.", + "description": "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, channel_multiplier]`, containing\n`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies\na different filter to each input channel (expanding from 1 channel to\n`channel_multiplier` channels for each), then concatenates the results\ntogether. Thus, the output has `in_channels * channel_multiplier` channels.\n\n```\nfor k in 0..in_channels-1\n for q in 0..channel_multiplier-1\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[di, dj, k, q]\n```\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DepthwiseConv2dNativeBackpropFilter", + "summary": "Computes the gradients of depthwise convolution with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape based on `data_format`. For example, if\n`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,\nin_width, in_channels]` tensor.", + "typeAttr": "T" + }, + { + "name": "filter_sizes", + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.", + "type": 3 + }, + { + "name": "out_backprop", + "description": "4-D with shape based on `data_format`.\nFor example, if `data_format` is 'NHWC' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "DepthwiseConv2dNativeBackpropInput", + "summary": "Computes the gradients of depthwise convolution with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\nof the convolution." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input_sizes", + "description": "An integer vector representing the shape of `input`, based\non `data_format`. For example, if `data_format` is 'NHWC' then\n `input` is a 4-D `[batch, height, width, channels]` tensor.", + "type": 3 + }, + { + "name": "filter", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape based on `data_format`.\nFor example, if `data_format` is 'NHWC' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape according to `data_format`. For example, if\n`data_format` is 'NHWC', output shape is `[batch, in_height,\nin_width, in_channels]`. Gradient w.r.t. the input of the\nconvolution.", + "typeAttr": "T" + } + ] + }, + { + "name": "Dequantize", + "category": "Tensor", + "summary": "Dequantize the 'input' tensor into a float or bfloat16 Tensor.", + "description": "[min_range, max_range] are scalar floats that specify the range for\nthe output. The 'mode' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents.\n\nIn 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\n\n```\nif T == qint8: in[i] += (range(T) + 1)/ 2.0\nout[i] = min_range + (in[i]* (max_range - min_range) / range(T))\n```\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nIf the input comes from a QuantizedRelu6, the output type is\nquint8 (range of 0-255) but the possible range of QuantizedRelu6 is\n0-6. The min_range and max_range values are therefore 0.0 and 6.0.\nDequantize on quint8 will take each value, cast to float, and multiply\nby 6 / 255.\nNote that if quantizedtype is qint8, the operation will additionally add\neach value by 128 prior to casting.\n\nIf the mode is 'MIN_FIRST', then this approach is used:\n\n```c++\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = range / num_discrete_values\nconst double offset_input = static_cast(input) - lowest_quantized;\nresult = range_min + ((input - numeric_limits::min()) * range_scale)\n```\n\nIf the mode is `SCALED`, dequantization is performed by multiplying each\ninput value by a scaling_factor. (Thus an input of 0 always maps to 0.0).\n\nThe scaling_factor is determined from `min_range`, `max_range`, and\n`narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`\nand `QuantizeV2`, using the following algorithm:\n\n```c++\n\n const int min_expected_T = std::numeric_limits::min() +\n (narrow_range ? 1 : 0);\n const int max_expected_T = std::numeric_limits::max();\n const float max_expected_T = std::numeric_limits::max();\n\n const float scale_factor =\n (std::numeric_limits::min() == 0) ? (max_range / max_expected_T)\n : std::max(min_range / min_expected_T,\n max_range / max_expected_T);\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "mode", + "type": "string", + "description": "Must be one of the following: `MIN_COMBINED`, `MIN_FIRST`, `SCALED`.", + "default": "MIN_COMBINED" + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + }, + { + "name": "axis", + "type": "int64", + "default": -1 + }, + { + "name": "dtype", + "type": "type", + "description": "Type of the output tensor. Currently Dequantize supports float and bfloat16.\nIf 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. Must be one of the following: `bfloat16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "min_range", + "description": "The minimum scalar value possibly produced for the input.", + "type": 1 + }, + { + "name": "max_range", + "description": "The maximum scalar value possibly produced for the input.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "DeserializeIterator", + "summary": "Converts the given variant tensor to an iterator and stores it in the given resource.", + "inputs": [ + { + "name": "resource_handle", + "description": "A handle to an iterator resource.", + "type": 20 + }, + { + "name": "serialized", + "description": "A variant tensor storing the state of the iterator contained in the\nresource.", + "type": 21 + } + ] + }, + { + "name": "DeserializeManySparse", + "summary": "Deserialize and concatenate `SparseTensors` from a serialized minibatch.", + "description": "The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where\n`N` is the minibatch size and the rows correspond to packed outputs of\n`SerializeSparse`. The ranks of the original `SparseTensor` objects\nmust all match. When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension).\n\nThe output `SparseTensor` object's shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects' shape values\nfor the corresponding dimensions. Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the serialized input is a `[2 x 3]` matrix representing two\noriginal `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\nand\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\nthen the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The `dtype` of the serialized `SparseTensor` objects." + } + ], + "inputs": [ + { + "name": "serialized_sparse", + "description": "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns.", + "type": 7 + } + ], + "outputs": [ + { + "name": "sparse_indices", + "type": 9 + }, + { + "name": "sparse_values", + "typeAttr": "dtype" + }, + { + "name": "sparse_shape", + "type": 9 + } + ] + }, + { + "name": "DeserializeSparse", + "summary": "Deserialize `SparseTensor` objects.", + "description": "The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where\nthe last dimension stores serialized `SparseTensor` objects and the other N\ndimensions (N >= 0) correspond to a batch. The ranks of the original\n`SparseTensor` objects must all match. When the final `SparseTensor` is\ncreated, its rank is the rank of the incoming `SparseTensor` objects plus N;\nthe sparse tensors have been concatenated along new dimensions, one for each\nbatch.\n\nThe output `SparseTensor` object's shape values for the original dimensions\nare the max across the input `SparseTensor` objects' shape values for the\ncorresponding dimensions. The new dimensions match the size of the batch.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the serialized input is a `[2 x 3]` matrix representing two\noriginal `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\nand\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\nthen the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The `dtype` of the serialized `SparseTensor` objects." + }, + { + "name": "Tserialized", + "type": "type", + "description": "Must be one of the following: `string`, `variant`.", + "default": { + "type": "type", + "value": 7 + } + } + ], + "inputs": [ + { + "name": "serialized_sparse", + "description": "The serialized `SparseTensor` objects. The last dimension\nmust have 3 columns.", + "typeAttr": "Tserialized" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "type": 9 + }, + { + "name": "sparse_values", + "typeAttr": "dtype" + }, + { + "name": "sparse_shape", + "type": 9 + } + ] + }, + { + "name": "DestroyResourceOp", + "summary": "Deletes the resource specified by the handle.", + "description": "All subsequent operations using the resource will result in a NotFound\nerror status.", + "attributes": [ + { + "name": "ignore_lookup_error", + "type": "boolean", + "description": "whether to ignore the error when the resource\ndoesn't exist.", + "default": true + } + ], + "inputs": [ + { + "name": "resource", + "description": "handle to the resource to delete.", + "type": 20 + } + ] + }, + { + "name": "DestroyTemporaryVariable", + "summary": "Destroys the temporary variable and returns its final value.", + "description": "Sets output to the value of the Tensor pointed to by 'ref', then destroys\nthe temporary variable called 'var_name'.\nAll other uses of 'ref' *must* have executed before this op.\nThis is typically achieved by chaining the ref through each assign op, or by\nusing control dependencies.\n\nOutputs the final value of the tensor pointed to by 'ref'.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "var_name", + "type": "string", + "description": "Name of the temporary variable, usually the name of the matching\n'TemporaryVariable' op." + } + ], + "inputs": [ + { + "name": "ref", + "description": "A reference to the temporary variable tensor.", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "T" + } + ] + }, + { + "name": "DeviceIndex", + "summary": "Return the index of device the op runs.", + "description": "Given a list of device names, this operation returns the index of the device\nthis op runs. The length of the list is returned in two cases:\n(1) Device does not exist in the given device list.\n(2) It is in XLA compilation.", + "attributes": [ + { + "name": "device_names", + "type": "string[]" + } + ], + "outputs": [ + { + "name": "index", + "type": 3 + } + ] + }, + { + "name": "Diag", + "summary": "Returns a diagonal tensor with a given diagonal values.", + "description": "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of\nrank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:\n\n`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.\n\nFor example:\n\n```\n# 'diagonal' is [1, 2, 3, 4]\ntf.diag(diagonal) ==> [[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "diagonal", + "description": "Rank k tensor where k is at most 1.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "DiagPart", + "summary": "Returns the diagonal part of the tensor.", + "description": "This operation returns a tensor with the `diagonal` part\nof the `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a\ntensor of rank `k` with dimensions `[D1,..., Dk]` where:\n\n`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.\n\nFor example:\n\n```\n# 'input' is [[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]]\n\ntf.diag_part(input) ==> [1, 2, 3, 4]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank k tensor where k is even and not zero.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "description": "The extracted diagonal.", + "typeAttr": "T" + } + ] + }, + { + "name": "Digamma", + "summary": "Computes Psi, the derivative of Lgamma (the log of the absolute value of", + "description": "`Gamma(x)`), element-wise.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Dilation2D", + "summary": "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.", + "description": "The `input` tensor has shape `[batch, in_height, in_width, depth]` and the\n`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each\ninput channel is processed independently of the others with its own structuring\nfunction. The `output` tensor has shape\n`[batch, out_height, out_width, depth]`. The spatial dimensions of the output\ntensor depend on the `padding` algorithm. We currently only support the default\n\"NHWC\" `data_format`.\n\nIn detail, the grayscale morphological 2-D dilation is the max-sum correlation\n(for consistency with `conv2d`, we use unmirrored filters):\n\n output[b, y, x, c] =\n max_{dy, dx} input[b,\n strides[1] * y + rates[1] * dy,\n strides[2] * x + rates[2] * dx,\n c] +\n filter[dy, dx, c]\n\nMax-pooling is a special case when the filter has size equal to the pooling\nkernel size and contains all zeros.\n\nNote on duality: The dilation of `input` by the `filter` is equal to the\nnegation of the erosion of `-input` by the reflected `filter`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4 + }, + { + "name": "rates", + "type": "int64[]", + "description": "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Dilation2DBackpropFilter", + "summary": "Computes the gradient of morphological 2-D dilation with respect to the filter.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4 + }, + { + "name": "rates", + "type": "int64[]", + "description": "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "filter_backprop", + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Dilation2DBackpropInput", + "summary": "Computes the gradient of morphological 2-D dilation with respect to the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4 + }, + { + "name": "rates", + "type": "int64[]", + "description": "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "typeAttr": "T" + }, + { + "name": "filter", + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "in_backprop", + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "DirectedInterleaveDataset", + "summary": "A substitute for `InterleaveDataset` on a fixed list of `N` datasets.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "stop_on_empty_dataset", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "selector_input_dataset", + "description": "A dataset of scalar `DT_INT64` elements that determines which of the\n`N` data inputs should produce the next output element.", + "type": 21 + }, + { + "name": "data_input_datasets", + "description": "`N` datasets with the same type that will be interleaved according to\nthe values of `selector_input_dataset`.", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "DisableCopyOnRead", + "summary": "Turns off the copy-on-read mode.", + "description": "Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect. ", + "inputs": [ + { + "name": "resource", + "description": "The resource handle of the resource variable.", + "type": 20 + } + ] + }, + { + "name": "DistributedSave", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "directory", + "type": 7 + }, + { + "name": "address", + "type": 7 + } + ] + }, + { + "name": "Div", + "summary": "Returns x / y element-wise.", + "description": "*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "DivNoNan", + "summary": "Returns 0 if the denominator is zero.", + "description": "\n*NOTE*: `DivNoNan` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `bfloat16`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "DrawBoundingBoxes", + "summary": "Draw bounding boxes on a batch of images.", + "description": "Outputs a copy of `images` but draws on top of the pixels zero or more bounding\nboxes specified by the locations in `boxes`. The coordinates of the each\nbounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example, if an image is 100 x 200 pixels (height x width) and the bounding\nbox is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\nthe bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).\n\nParts of the bounding box may fall outside the image.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float16`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, depth]`. A batch of images.", + "typeAttr": "T" + }, + { + "name": "boxes", + "description": "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images.", + "typeAttr": "T" + } + ] + }, + { + "name": "DrawBoundingBoxesV2", + "summary": "Draw bounding boxes on a batch of images.", + "description": "Outputs a copy of `images` but draws on top of the pixels zero or more bounding\nboxes specified by the locations in `boxes`. The coordinates of the each\nbounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example, if an image is 100 x 200 pixels (height x width) and the bounding\nbox is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\nthe bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).\n\nParts of the bounding box may fall outside the image.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float16`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, depth]`. A batch of images.", + "typeAttr": "T" + }, + { + "name": "boxes", + "description": "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes.", + "type": 1 + }, + { + "name": "colors", + "description": "2-D. A list of RGBA colors to cycle through for the boxes.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images.", + "typeAttr": "T" + } + ] + }, + { + "name": "DummyIterationCounter", + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "DummyMemoryCache", + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "DummySeedGenerator", + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup_sparse().", + "description": "embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature.\n\nThe tensors at corresponding positions in the three input lists (sample_indices,\nembedding_indices and aggregation_weights) must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_indices_or_row_splits", + "description": "A list of rank 2 Tensors specifying the training example to which the\ncorresponding embedding_indices and aggregation_weights values belong.\nIf the size of its first dimension is 0, we assume each embedding_indices\nbelongs to a different sample. Both int32 and int64 are allowed and will\nbe converted to int32 internally.\n\nOr a list of rank 1 Tensors specifying the row splits for splitting\nembedding_indices and aggregation_weights into rows. It corresponds to\nids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When\nenqueuing N-D ragged tensor, only the last dimension is allowed to be ragged.\nthe row splits is 1-D dense tensor. When empty, we assume a dense tensor is\npassed to the op Both int32 and int64 are allowed and will be converted to\nint32 internally.", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "description": "A list of rank 1 Tensors, indices into the embedding\ntables. Both int32 and int64 are allowed and will be converted to\nint32 internally.", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "description": "A list of rank 1 Tensors containing per training\nexample aggregation weights. Both float32 and float64 are allowed and will\nbe converted to float32 internally.", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + }, + { + "name": "device_ordinal", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "type": 3 + } + ] + }, + { + "name": "DynamicEnqueueTPUEmbeddingRaggedTensorBatch", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "combiners", + "type": "string[]", + "default": [] + }, + { + "name": "table_ids", + "type": "int64[]" + }, + { + "name": "max_sequence_lengths", + "type": "int64[]", + "default": [] + }, + { + "name": "num_features", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_splits", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "type": 7 + }, + { + "name": "device_ordinal", + "type": 3 + } + ] + }, + { + "name": "DynamicPartition", + "summary": "Partitions `data` into `num_partitions` tensors using indices from `partitions`.", + "description": "For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`\nbecomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`\nare placed in `outputs[i]` in lexicographic order of `js`, and the first\ndimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.\nIn detail,\n\n```python\n outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]\n\n outputs[i] = pack([data[js, ...] for js if partitions[js] == i])\n```\n\n`data.shape` must start with `partitions.shape`.\n\nFor example:\n\n```python\n # Scalar partitions.\n partitions = 1\n num_partitions = 2\n data = [10, 20]\n outputs[0] = [] # Empty with shape [0, 2]\n outputs[1] = [[10, 20]]\n\n # Vector partitions.\n partitions = [0, 0, 1, 1, 0]\n num_partitions = 2\n data = [10, 20, 30, 40, 50]\n outputs[0] = [10, 20, 50]\n outputs[1] = [30, 40]\n```\n\nSee `dynamic_stitch` for an example on how to merge partitions back.\n\n
\n\n
\n\n\nRaises:\n * `InvalidArgumentError` in following cases:\n - If partitions is not in range `[0, num_partiions)`\n - If `partitions.shape` does not match prefix of `data.shape` argument.\n", + "attributes": [ + { + "name": "num_partitions", + "type": "int64", + "description": "The number of partitions to output.", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "partitions", + "description": "Any shape. Indices in the range `[0, num_partitions)`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "outputs", + "numberAttr": "num_partitions", + "typeAttr": "T" + } + ] + }, + { + "name": "DynamicStitch", + "summary": "Interleave the values from the `data` tensors into a single tensor.", + "description": "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices) + 1] + constant\n\nValues are merged in order, so if an index appears in both `indices[m][i]` and\n`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the\nmerged result. If you do not need this guarantee, ParallelDynamicStitch might\nperform better on some devices.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
\n\n
", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "numberAttr": "N", + "type": 3 + }, + { + "name": "data", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "merged", + "typeAttr": "T" + } + ] + }, + { + "name": "EagerPyFunc", + "summary": "Eagerly executes a python function to compute func(input)->output. The", + "description": "semantics of the input, output, and attributes are the same as those for\nPyFunc.", + "attributes": [ + { + "name": "token", + "type": "string" + }, + { + "name": "is_async", + "type": "boolean", + "default": false + }, + { + "name": "Tin", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "EditDistance", + "summary": "Computes the (possibly normalized) Levenshtein Edit Distance.", + "description": "The inputs are variable-length sequences provided by SparseTensors\n (hypothesis_indices, hypothesis_values, hypothesis_shape)\nand\n (truth_indices, truth_values, truth_shape).\n\nThe inputs are:", + "attributes": [ + { + "name": "normalize", + "type": "boolean", + "description": "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:", + "default": true + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "hypothesis_indices", + "description": "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix.", + "type": 9 + }, + { + "name": "hypothesis_values", + "description": "The values of the hypothesis list SparseTensor.\nThis is an N-length vector.", + "typeAttr": "T" + }, + { + "name": "hypothesis_shape", + "description": "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector.", + "type": 9 + }, + { + "name": "truth_indices", + "description": "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix.", + "type": 9 + }, + { + "name": "truth_values", + "description": "The values of the truth list SparseTensor.\nThis is an M-length vector.", + "typeAttr": "T" + }, + { + "name": "truth_shape", + "description": "truth indices, vector.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output", + "description": "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n // hypothesis represents a 2x1 matrix with variable-length values:\n // (0,0) = [\"a\"]\n // (1,0) = [\"b\"]\n hypothesis_indices = [[0, 0, 0],\n [1, 0, 0]]\n hypothesis_values = [\"a\", \"b\"]\n hypothesis_shape = [2, 1, 1]\n\n // truth represents a 2x2 matrix with variable-length values:\n // (0,0) = []\n // (0,1) = [\"a\"]\n // (1,0) = [\"b\", \"c\"]\n // (1,1) = [\"a\"]\n truth_indices = [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n truth_values = [\"a\", \"b\", \"c\", \"a\"]\n truth_shape = [2, 2, 2]\n normalize = true\n\nThe output will be:\n\n // output is a 2x2 matrix with edit distances normalized by truth lengths.\n output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis", + "type": 1 + } + ] + }, + { + "name": "Eig", + "summary": "Computes the eigen decomposition of one or more square matrices.", + "description": "Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues\nare sorted in non-decreasing order.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = eig(a)\ne = eig(a, compute_v=False)\n```", + "attributes": [ + { + "name": "compute_v", + "type": "boolean", + "description": "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "`Tensor` input of shape `[N, N]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "e", + "description": "Eigenvalues. Shape is `[N]`.", + "typeAttr": "Tout" + }, + { + "name": "v", + "description": "Eigenvectors. Shape is `[N, N]`.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "Einsum", + "summary": "Tensor contraction according to Einstein summation convention.", + "description": "Implements generalized Tensor contraction and reduction. Each input Tensor must\nhave a corresponding input subscript appearing in the comma-separated left-hand\nside of the equation. The right-hand side of the equation consists of the\noutput subscript. The input subscripts and the output subscript should consist\nof zero or more named axis labels and at most one ellipsis (`...`).\n\nThe named axis labels may be any single character other than those having\nspecial meaning, namely `,.->`. The behavior of this Op is undefined if it\nreceives an ill-formatted equation; since the validation is done at\ngraph-building time, we omit format validation checks at runtime.\n\nNote: This Op is *not* intended to be called by the user; instead users should\ncall `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.\n\nOperations are applied to the input(s) according to the following rules:\n\n (a) Generalized Diagonals: For input dimensions corresponding to axis labels\n appearing more than once in the same input subscript, we take the\n generalized (`k`-dimensional) diagonal.\n For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the\n generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,\n `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.\n\n (b) Reduction: Axes corresponding to labels appearing only in one input\n subscript but not in the output subscript are summed over prior to Tensor\n contraction.\n For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are\n the reduction axis labels.\n\n (c) Batch Dimensions: Axes corresponding to labels appearing in each of the\n input subscripts and also in the output subscript make up the batch\n dimensions in Tensor contraction. Unnamed axis labels corresponding to\n ellipsis (`...`) also correspond to batch dimensions.\n For example, for the equation denoting batch matrix multiplication,\n `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.\n\n (d) Contraction: In case of binary einsum, axes corresponding to labels\n appearing in two different inputs (and not in the output) are contracted\n against each other.\n Considering the batch matrix multiplication equation again\n (`bij,bjk->bik`), the contracted axis label is `j`.\n\n (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis\n labels, the opposite operation of (a) is applied. For example, in the\n equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`\n are all zeros, except for the (generalized) diagonal which is populated\n with values from the input.\n Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is\n provided to enable computing the symbolic gradient of `tf.einsum`.\n\nThe output subscripts must contain only labels appearing in at least one of the\ninput subscripts. Furthermore, all dimensions mapping to the same axis label\nmust be equal.\n\nAny of the input and output subscripts may contain at most a single ellipsis\n(`...`). These ellipsis are mapped against dimensions not corresponding to any\nnamed axis label. If two inputs contain ellipsis, then they are broadcasted\naccording to standard NumPy broadcasting\n[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n\nThe broadcasted dimensions are placed in the corresponding location of the\nellipsis in the output subscript. If the broadcasted dimensions are non-empty\nand the output subscripts do not contain ellipsis, then an InvalidArgument error\nis raised.\n\n@compatibility(numpy)\nSimilar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).\n\nComparison with `numpy.einsum`:\n\n * This Op only supports unary and binary forms of `numpy.einsum`.\n * This Op does not support implicit form. (i.e. equations without `->`).\n * This Op also supports repeated indices in the output subscript, which is not\n supported by `numpy.einsum`.\n@end_compatibility\n", + "attributes": [ + { + "name": "equation", + "type": "string", + "description": "String describing the Einstein Summation operation; in the format of np.einsum." + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "inputs", + "description": "List of 1 or 2 Tensors.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Output Tensor with shape depending upon `equation`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Elu", + "category": "Activation", + "summary": "Computes the exponential linear function.", + "description": "The ELU function is defined as:\n\n * $ e ^ x - 1 $ if $ x < 0 $\n * $ x $ if $ x >= 0 $\n\nExamples:\n\n>>> tf.nn.elu(1.0)\n\n>>> tf.nn.elu(0.0)\n\n>>> tf.nn.elu(-1000.0)\n\n\nSee [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)\n](http://arxiv.org/abs/1511.07289)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "EluGrad", + "summary": "Computes gradients for the exponential linear (Elu) operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding Elu operation.", + "typeAttr": "T" + }, + { + "name": "outputs", + "description": "The outputs of the corresponding Elu operation.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise.", + "typeAttr": "T" + } + ] + }, + { + "name": "Empty", + "summary": "Creates a tensor with the given shape.\n\nThis operation creates a tensor of `shape` and `dtype`.", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "init", + "type": "boolean", + "description": "If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content.", + "default": false + } + ], + "inputs": [ + { + "name": "shape", + "description": "1-D. Represents the shape of the output tensor.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` of type `T`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "EmptyTensorList", + "summary": "Creates and returns an empty tensor list.", + "description": "All list elements must be tensors of dtype element_dtype and shape compatible\nwith element_shape.\n\nhandle: an empty tensor list.\nelement_dtype: the type of elements in the list.\nelement_shape: a shape compatible with that of elements in the list.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "max_num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "EmptyTensorMap", + "summary": "Creates and returns an empty tensor map.", + "description": "handle: an empty tensor map", + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "EncodeBase64", + "summary": "Encode strings into web-safe base64 format.", + "description": "Refer to [this article](https://en.wikipedia.org/wiki/Base64) for more information on\nbase64 format. Base64 strings may have padding with '=' at the\nend so that the encoded has length multiple of 4. See Padding section of the\nlink above.\n\nWeb-safe means that the encoder uses - and _ instead of + and /.", + "attributes": [ + { + "name": "pad", + "type": "boolean", + "description": "Bool whether padding is applied at the ends.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "Strings to be encoded.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "Input strings encoded in base64.", + "type": 7 + } + ] + }, + { + "name": "EncodeJpeg", + "summary": "JPEG-encode an image.", + "description": "`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.\n\nThe attr `format` can be used to override the color format of the encoded\noutput. Values can be:\n\n* `''`: Use a default format based on the number of channels in the image.\n* `grayscale`: Output a grayscale JPEG image. The `channels` dimension\n of `image` must be 1.\n* `rgb`: Output an RGB JPEG image. The `channels` dimension\n of `image` must be 3.\n\nIf `format` is not specified or is the empty string, a default format is picked\nin function of the number of channels in `image`:\n\n* 1: Output a grayscale image.\n* 3: Output an RGB image.", + "attributes": [ + { + "name": "format", + "type": "string", + "description": "Per pixel image format. Must be one of the following: ``, `grayscale`, `rgb`.", + "default": "" + }, + { + "name": "quality", + "type": "int64", + "description": "Quality of the compression from 0 to 100 (higher is better and slower).", + "default": 95 + }, + { + "name": "progressive", + "type": "boolean", + "description": "If True, create a JPEG that loads progressively (coarse to fine).", + "default": false + }, + { + "name": "optimize_size", + "type": "boolean", + "description": "If True, spend CPU/RAM to reduce size with no quality change.", + "default": false + }, + { + "name": "chroma_downsampling", + "type": "boolean", + "description": "See http://en.wikipedia.org/wiki/Chroma_subsampling.", + "default": true + }, + { + "name": "density_unit", + "type": "string", + "description": "Unit used to specify `x_density` and `y_density`:\npixels per inch (`'in'`) or centimeter (`'cm'`). Must be one of the following: `in`, `cm`.", + "default": "in" + }, + { + "name": "x_density", + "type": "int64", + "description": "Horizontal pixels per density unit.", + "default": 300 + }, + { + "name": "y_density", + "type": "int64", + "description": "Vertical pixels per density unit.", + "default": 300 + }, + { + "name": "xmp_metadata", + "type": "string", + "description": "If not empty, embed this XMP metadata in the image header.", + "default": "" + } + ], + "inputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`.", + "type": 4 + } + ], + "outputs": [ + { + "name": "contents", + "description": "0-D. JPEG-encoded image.", + "type": 7 + } + ] + }, + { + "name": "EncodeJpegVariableQuality", + "summary": "JPEG encode input image with provided compression quality.", + "description": "`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.\n`quality` is an int32 jpeg compression quality value between 0 and 100.\n", + "inputs": [ + { + "name": "images", + "description": "Images to adjust. At least 3-D.", + "type": 4 + }, + { + "name": "quality", + "description": "An int quality to encode to.", + "type": 3 + } + ], + "outputs": [ + { + "name": "contents", + "description": "0-D. JPEG-encoded image.", + "type": 7 + } + ] + }, + { + "name": "EncodePng", + "summary": "PNG-encode an image.", + "description": "`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`\nwhere `channels` is:\n\n* 1: for grayscale.\n* 2: for grayscale + alpha.\n* 3: for RGB.\n* 4: for RGBA.\n\nThe ZLIB compression level, `compression`, can be -1 for the PNG-encoder\ndefault or a value from 0 to 9. 9 is the highest compression level, generating\nthe smallest output, but is slower.", + "attributes": [ + { + "name": "compression", + "type": "int64", + "description": "Compression level.", + "default": -1 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `uint16`.", + "default": { + "type": "type", + "value": 4 + } + } + ], + "inputs": [ + { + "name": "image", + "description": "3-D with shape `[height, width, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "contents", + "description": "0-D. PNG-encoded image.", + "type": 7 + } + ] + }, + { + "name": "EncodeProto", + "summary": "The op serializes protobuf messages provided in the input tensors.", + "description": "The types of the tensors in `values` must match the schema for the fields\nspecified in `field_names`. All the tensors in `values` must have a common\nshape prefix, *batch_shape*.\n\nThe `sizes` tensor specifies repeat counts for each field. The repeat count\n(last dimension) of a each tensor in `values` must be greater than or equal\nto corresponding repeat count in `sizes`.\n\nA `message_type` name must be provided to give context for the field names.\nThe actual message descriptor can be looked up either in the linked-in\ndescriptor pool or a filename provided by the caller using the\n`descriptor_source` attribute.\n\nFor the most part, the mapping between Proto field types and TensorFlow dtypes\nis straightforward. However, there are a few special cases:\n\n- A proto field that contains a submessage or group can only be converted\nto `DT_STRING` (the serialized submessage). This is to reduce the complexity\nof the API. The resulting string can be used as input to another instance of\nthe decode_proto op.\n\n- TensorFlow lacks support for unsigned integers. The ops represent uint64\ntypes as a `DT_INT64` with the same twos-complement bit pattern (the obvious\nway). Unsigned int32 values can be represented exactly by specifying type\n`DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in\nthe `output_types` attribute.\n\nThe `descriptor_source` attribute selects the source of protocol\ndescriptors to consult when looking up `message_type`. This may be:\n\n- An empty string or \"local://\", in which case protocol descriptors are\ncreated for C++ (not Python) proto definitions linked to the binary.\n\n- A file, in which case protocol descriptors are created from the file,\nwhich is expected to contain a `FileDescriptorSet` serialized as a string.\nNOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`\nand `--include_imports` options to the protocol compiler `protoc`.\n\n- A \"bytes://\", in which protocol descriptors are created from ``,\nwhich is expected to be a `FileDescriptorSet` serialized as a string.", + "attributes": [ + { + "name": "field_names", + "type": "string[]", + "description": "List of strings containing proto field names." + }, + { + "name": "message_type", + "type": "string", + "description": "Name of the proto message type to decode." + }, + { + "name": "descriptor_source", + "type": "string", + "default": "local://" + }, + { + "name": "Tinput_types", + "type": "type[]", + "description": "The input types.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "sizes", + "description": "Tensor of int32 with shape `[batch_shape, len(field_names)]`.", + "type": 3 + }, + { + "name": "values", + "description": "List of tensors containing values for the corresponding field.", + "typeListAttr": "Tinput_types" + } + ], + "outputs": [ + { + "name": "bytes", + "description": "Tensor of serialized protos with shape `batch_shape`.", + "type": 7 + } + ] + }, + { + "name": "EncodeWav", + "summary": "Encode audio data using the WAV file format.", + "description": "This operation will generate a string suitable to be saved out to create a .wav\naudio file. It will be encoded in the 16-bit PCM format. It takes in float\nvalues in the range -1.0f to 1.0f, and any outside that value will be clamped to\nthat range.\n\n`audio` is a 2-D float Tensor of shape `[length, channels]`.\n`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).", + "inputs": [ + { + "name": "audio", + "description": "2-D with shape `[length, channels]`.", + "type": 1 + }, + { + "name": "sample_rate", + "description": "Scalar containing the sample frequency.", + "type": 3 + } + ], + "outputs": [ + { + "name": "contents", + "description": "0-D. WAV-encoded file contents.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingArbitraryTensorBatch", + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup_sparse().", + "description": "embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature.\n\nThe tensors at corresponding positions in the three input lists (sample_indices,\nembedding_indices and aggregation_weights) must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "default": -1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_indices_or_row_splits", + "description": "A list of rank 2 Tensors specifying the training example to which the\ncorresponding embedding_indices and aggregation_weights values belong.\nIf the size of its first dimension is 0, we assume each embedding_indices\nbelongs to a different sample. Both int32 and int64 are allowed and will\nbe converted to int32 internally.\n\nOr a list of rank 1 Tensors specifying the row splits for splitting\nembedding_indices and aggregation_weights into rows. It corresponds to\nids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When\nenqueuing N-D ragged tensor, only the last dimension is allowed to be ragged.\nthe row splits is 1-D dense tensor. When empty, we assume a dense tensor is\npassed to the op Both int32 and int64 are allowed and will be converted to\nint32 internally.", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "description": "A list of rank 1 Tensors, indices into the embedding\ntables. Both int32 and int64 are allowed and will be converted to\nint32 internally.", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "description": "A list of rank 1 Tensors containing per training\nexample aggregation weights. Both float32 and float64 are allowed and will\nbe converted to float32 internally.", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingBatch", + "summary": "An op that enqueues a list of input batch tensors to TPUEmbedding.", + "description": "An op that enqueues a list of input batch tensors to TPUEmbedding.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "default": -1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + } + ], + "inputs": [ + { + "name": "batch", + "description": "A list of 1D tensors, one for each embedding table, containing the\nbatch inputs encoded as dist_belief.SparseFeatures protos. If the weight\nfield in the SparseFeatures proto is not populated for an ID, a weight of\n1.0 is assumed.", + "numberAttr": "N", + "type": 7 + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingIntegerBatch", + "summary": "An op that enqueues a list of input batch tensors to TPUEmbedding.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "default": -1 + } + ], + "inputs": [ + { + "name": "batch", + "description": "A list of 1D tensors, one for each embedding table, containing the\nindices into the tables.", + "numberAttr": "N", + "type": 3 + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingRaggedTensorBatch", + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup().", + "description": "sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature. table_ids[i] indicates which embedding table to look up ith\nfeature.\n\nThe tensors at corresponding positions in two of the input lists,\nembedding_indices and aggregation_weights, must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "default": -1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + }, + { + "name": "table_ids", + "type": "int64[]", + "description": "A list of integers specifying the identifier of the embedding table\n(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the\ncorresponding input. The ith input is looked up using table_ids[i]. The size\nof the table_ids list must be equal to that of sample_indices,\nembedding_indices and aggregation_weights." + }, + { + "name": "max_sequence_lengths", + "type": "int64[]", + "default": [] + }, + { + "name": "num_features", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_splits", + "description": "A list of rank 1 Tensors specifying the break points for splitting\nembedding_indices and aggregation_weights into rows.\nIt corresponds to ids.row_splits in embedding_lookup(), when ids is a\nRaggedTensor.", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "description": "A list of rank 1 Tensors, indices into the embedding tables.\nIt corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "description": "A list of rank 1 Tensors containing per training example\naggregation weights. It corresponds to the values field of a RaggedTensor\nwith the same row_splits as ids in embedding_lookup(), when ids is a\nRaggedTensor.", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingSparseBatch", + "summary": "An op that enqueues TPUEmbedding input indices from a SparseTensor.", + "description": "This Op eases the porting of code that uses embedding_lookup_sparse(),\nalthough some Python preprocessing of the SparseTensor arguments to\nembedding_lookup_sparse() is required to produce the arguments to this Op,\nsince only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training\nstep.\n\nThe tensors at corresponding positions in the three input lists\nmust have the same shape, i.e. rank 1 with dim_size() equal to the total\nnumber of lookups into the table described by the corresponding table_id.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "default": -1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_indices", + "description": "A list of rank 1 Tensors specifying the training example and\nfeature to which the corresponding embedding_indices and aggregation_weights\nvalues belong. sample_indices[i] must equal b * nf + f, where nf is the\nnumber of features from the corresponding table, f is in [0, nf), and\nb is in [0, batch size).", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "description": "A list of rank 1 Tensors, indices into the embedding tables.", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "description": "A list of rank 1 Tensors containing per sample -- i.e. per\n(training example, feature) -- aggregation weights.", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnqueueTPUEmbeddingSparseTensorBatch", + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup_sparse().", + "description": "sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature. table_ids[i] indicates which embedding table to look up ith\nfeature.\n\nThe tensors at corresponding positions in the three input lists (sample_indices,\nembedding_indices and aggregation_weights) must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T3", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "default": -1 + }, + { + "name": "combiners", + "type": "string[]", + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "default": [] + }, + { + "name": "table_ids", + "type": "int64[]", + "description": "A list of integers specifying the identifier of the embedding table\n(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the\ncorresponding input. The ith input is looked up using table_ids[i]. The size\nof the table_ids list must be equal to that of sample_indices,\nembedding_indices and aggregation_weights." + }, + { + "name": "max_sequence_lengths", + "type": "int64[]", + "default": [] + }, + { + "name": "num_features", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "sample_indices", + "description": "A list of rank 1 Tensors specifying the training example to\nwhich the corresponding embedding_indices and aggregation_weights values\nbelong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse().", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "name": "embedding_indices", + "description": "A list of rank 1 Tensors, indices into the embedding tables.\nIt corresponds to sp_ids.values in embedding_lookup_sparse().", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "name": "aggregation_weights", + "description": "A list of rank 1 Tensors containing per training example\naggregation weights. It corresponds to sp_weights.values in\nembedding_lookup_sparse().", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "name": "mode_override", + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "type": 7 + } + ] + }, + { + "name": "EnsureShape", + "summary": "Ensures that the tensor's shape matches the expected shape.", + "description": "Raises an error if the input tensor's shape does not match the specified shape.\nReturns the input tensor otherwise.", + "attributes": [ + { + "name": "shape", + "type": "shape", + "description": "The expected (possibly partially specified) shape of the input tensor." + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor, whose shape is to be validated.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor with the same shape and contents as the input tensor or value.", + "typeAttr": "T" + } + ] + }, + { + "name": "Enter", + "summary": "Creates or finds a child frame, and makes `data` available to the child frame.", + "description": "This op is used together with `Exit` to create loops in the graph.\nThe unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "frame_name", + "type": "string", + "description": "The name of the child frame." + }, + { + "name": "is_constant", + "type": "boolean", + "description": "If true, the output is constant within the child frame.", + "default": false + }, + { + "name": "parallel_iterations", + "type": "int64", + "description": "The number of iterations allowed to run in parallel.", + "default": 10 + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the child frame.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Equal", + "summary": "Returns the truth value of (x == y) element-wise.", + "description": "*NOTE*: `Equal` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n```python\nx = tf.constant([2, 4])\ny = tf.constant(2)\ntf.math.equal(x, y) ==> array([True, False])\n\nx = tf.constant([2, 4])\ny = tf.constant([2, 4])\ntf.math.equal(x, y) ==> array([True, True])\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "incompatible_shape_error", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "Erf", + "summary": "Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[\u2212x, x]$.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Erfc", + "summary": "Computes the complementary error function of `x` element-wise.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Erfinv", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "EuclideanNorm", + "summary": "Computes the euclidean norm of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "ExecuteTPUEmbeddingPartitioner", + "summary": "An op that executes the TPUEmbedding partitioner on the central configuration", + "description": "device and computes the HBM size (in bytes) required for TPUEmbedding operation.", + "attributes": [ + { + "name": "config", + "type": "string", + "description": "An TPUEmbeddingConfiguration proto serialized to a string,\ndescribing the desired TPUEmbedding configuration." + } + ], + "outputs": [ + { + "name": "common_config", + "description": "A string-encoded common configuration proto\ncontaining metadata about the TPUEmbedding partitioner output and\nthe HBM size (in bytes) required for operation.", + "type": 7 + } + ] + }, + { + "name": "Exit", + "summary": "Exits the current frame to its parent frame.", + "description": "Exit makes its input `data` available to the parent frame.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the parent frame.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Exp", + "summary": "Computes exponential of x element-wise. \\\\(y = e^x\\\\).", + "description": " This function computes the exponential of every element in the input tensor.\n i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.\n `e` denotes Euler's number and is approximately equal to 2.718281.\n Output is positive for any real input.\n\n ```python\n x = tf.constant(2.0)\n tf.math.exp(x) ==> 7.389056\n\n x = tf.constant([2.0, 8.0])\n tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)\n ```\n\n For complex numbers, the exponential value is calculated as follows:\n\n ```\n e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)\n ```\n\n Let's consider complex number 1+1j as an example.\n e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)\n\n ```python\n x = tf.constant(1 + 1j)\n tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "ExpandDims", + "summary": "Inserts a dimension of 1 into a tensor's shape.", + "description": "Given a tensor `input`, this operation inserts a dimension of 1 at the\ndimension index `dim` of `input`'s shape. The dimension index `dim` starts at\nzero; if you specify a negative number for `dim` it is counted backward from\nthe end.\n\nThis operation is useful if you want to add a batch dimension to a single\nelement. For example, if you have a single image of shape `[height, width,\nchannels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,\nwhich will make the shape `[1, height, width, channels]`.\n\nOther examples:\n\n```\n# 't' is a tensor of shape [2]\nshape(expand_dims(t, 0)) ==> [1, 2]\nshape(expand_dims(t, 1)) ==> [2, 1]\nshape(expand_dims(t, -1)) ==> [2, 1]\n\n# 't2' is a tensor of shape [2, 3, 5]\nshape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]\nshape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]\nshape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]\n```\n\nThis operation requires that:\n\n`-1-input.dims() <= dim <= input.dims()`\n\nThis operation is related to `squeeze()`, which removes dimensions of\nsize 1.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tdim", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "dim", + "description": "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`. Must be in the range\n`[-rank(input) - 1, rank(input)]`.", + "typeAttr": "Tdim" + } + ], + "outputs": [ + { + "name": "output", + "description": "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added.", + "typeAttr": "T" + } + ] + }, + { + "name": "ExperimentalAssertNextDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "transformations", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalAutoShardDataset", + "summary": "Creates a dataset that shards the input dataset.", + "description": "Creates a dataset that shards the input dataset by num_workers, returning a\nsharded dataset for the index-th worker. This attempts to automatically shard\na dataset by examining the Dataset graph and inserting a shard op before the\ninputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).\n\nThis dataset will throw a NotFound error if we cannot shard the dataset\nautomatically.", + "attributes": [ + { + "name": "auto_shard_policy", + "type": "int64", + "default": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "num_workers", + "description": "A scalar representing the number of workers to distribute this dataset across.", + "type": 9 + }, + { + "name": "index", + "description": "A scalar representing the index of the current worker out of num_workers.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalBytesProducedStatsDataset", + "summary": "Records the bytes size of each element of `input_dataset` in a StatsAggregator.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalCSVDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`." + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "compression_type", + "type": 7 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "header", + "type": 10 + }, + { + "name": "field_delim", + "type": 7 + }, + { + "name": "use_quote_delim", + "type": 10 + }, + { + "name": "na_value", + "type": 7 + }, + { + "name": "select_cols", + "type": 9 + }, + { + "name": "record_defaults", + "typeListAttr": "output_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalChooseFastestDataset", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "num_experiments", + "type": "int64" + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalDatasetCardinality", + "summary": "Returns the cardinality of `input_dataset`.", + "description": "Returns the cardinality of `input_dataset`.", + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to return cardinality for.", + "type": 21 + } + ], + "outputs": [ + { + "name": "cardinality", + "description": "The cardinality of `input_dataset`. Named constants are used to represent\ninfinite and unknown cardinality.", + "type": 9 + } + ] + }, + { + "name": "ExperimentalDatasetToTFRecord", + "summary": "Writes the given dataset to the given file using the TFRecord format.", + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the dataset to write.", + "type": 21 + }, + { + "name": "filename", + "description": "A scalar string tensor representing the filename to use.", + "type": 7 + }, + { + "name": "compression_type", + "description": "A scalar string tensor containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "type": 7 + } + ] + }, + { + "name": "ExperimentalDenseToSparseBatchDataset", + "summary": "Creates a dataset that batches input elements into a SparseTensor.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A handle to an input dataset. Must have a single component.", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "type": 9 + }, + { + "name": "row_shape", + "description": "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalDirectedInterleaveDataset", + "summary": "A substitute for `InterleaveDataset` on a fixed list of `N` datasets.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "selector_input_dataset", + "description": "A dataset of scalar `DT_INT64` elements that determines which of the\n`N` data inputs should produce the next output element.", + "type": 21 + }, + { + "name": "data_input_datasets", + "description": "`N` datasets with the same type that will be interleaved according to\nthe values of `selector_input_dataset`.", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalGroupByReducerDataset", + "summary": "Creates a dataset that computes a group-by on `input_dataset`.", + "description": "Creates a dataset that computes a group-by on `input_dataset`.", + "attributes": [ + { + "name": "key_func", + "type": "function", + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64." + }, + { + "name": "init_func", + "type": "function", + "description": "A function mapping a key of type DT_INT64, concatenated with\n`init_func_other_arguments` to the initial reducer state." + }, + { + "name": "reduce_func", + "type": "function", + "description": "A function mapping the current reducer state and an element of `input_dataset`,\nconcatenated with `reduce_func_other_arguments` to a new reducer state." + }, + { + "name": "finalize_func", + "type": "function", + "description": "A function mapping the final reducer state to an output element." + }, + { + "name": "Tkey_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tinit_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Treduce_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tfinalize_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `key_func`.", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "init_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `init_func`.", + "typeListAttr": "Tinit_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `reduce_func`.", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "finalize_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `finalize_func`.", + "typeListAttr": "Tfinalize_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalGroupByWindowDataset", + "summary": "Creates a dataset that computes a windowed group-by on `input_dataset`.", + "description": "// TODO(mrry): Support non-int64 keys.", + "attributes": [ + { + "name": "key_func", + "type": "function", + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64." + }, + { + "name": "reduce_func", + "type": "function" + }, + { + "name": "window_size_func", + "type": "function" + }, + { + "name": "Tkey_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Treduce_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Twindow_size_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "window_size_func_other_arguments", + "typeListAttr": "Twindow_size_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalIgnoreErrorsDataset", + "summary": "Creates a dataset that contains the elements of `input_dataset` ignoring errors.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "log_warning", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalIteratorGetDevice", + "summary": "Returns the name of the device on which `resource` has been placed.", + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "device", + "type": 7 + } + ] + }, + { + "name": "ExperimentalLMDBDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalLatencyStatsDataset", + "summary": "Records the latency of producing `input_dataset` elements in a StatsAggregator.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalMapAndBatchDataset", + "summary": "Creates a dataset that fuses mapping with batching.", + "description": "Creates a dataset that applies `f` to the outputs of `input_dataset` and then\nbatches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function to apply to the outputs of `input_dataset`." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when building a closure\nfor `f`.", + "typeListAttr": "Targuments" + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "type": 9 + }, + { + "name": "num_parallel_calls", + "description": "A scalar representing the maximum number of parallel invocations of the `map_fn`\nfunction. Applying the `map_fn` on consecutive input elements in parallel has\nthe potential to improve input pipeline throughput.", + "type": 9 + }, + { + "name": "drop_remainder", + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalMapDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_inter_op_parallelism", + "type": "boolean", + "default": true + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalMatchingFilesDataset", + "inputs": [ + { + "name": "patterns", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalMaxIntraOpParallelismDataset", + "summary": "Creates a dataset that overrides the maximum intra-op parallelism.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "max_intra_op_parallelism", + "description": "Identifies the maximum intra-op parallelism to use.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalNonSerializableDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalParallelInterleaveDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + }, + { + "name": "sloppy", + "type": 10 + }, + { + "name": "buffer_output_elements", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalParseExampleDataset", + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.", + "attributes": [ + { + "name": "sparse_keys", + "type": "string[]", + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0 + }, + { + "name": "dense_keys", + "type": "string[]", + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "Tdense", + "type": "type[]", + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 1 + }, + { + "name": "sloppy", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "name": "dense_defaults", + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalPrivateThreadPoolDataset", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_threads", + "description": "Identifies the number of threads to use for the private threadpool.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalRandomDataset", + "summary": "Creates a Dataset that returns pseudorandom numbers.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "seed", + "description": "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "type": 9 + }, + { + "name": "seed2", + "description": "A second scalar seed to avoid seed collision.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalRebatchDataset", + "summary": "Creates a dataset that changes the batch size.", + "description": "Creates a dataset that changes the batch size of the dataset to current batch\nsize // num_replicas.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_fallback", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "num_replicas", + "description": "A scalar representing the number of replicas to distribute this batch across. As\na result of this transformation the current batch size would end up being\ndivided by this parameter.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalScanDataset", + "summary": "Creates a dataset successively reduces `f` over the elements of `input_dataset`.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Tstate", + "type": "type[]", + "minimum": 1 + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "initial_state", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalSetStatsAggregatorDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "counter_prefix", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalSleepDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "sleep_microseconds", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalSlidingWindowDataset", + "summary": "Creates a dataset that passes a sliding window over `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "window_size", + "description": "A scalar representing the number of elements in the\nsliding window.", + "type": 9 + }, + { + "name": "window_shift", + "description": "A scalar representing the steps moving the sliding window\nforward in one iteration. It must be positive.", + "type": 9 + }, + { + "name": "window_stride", + "description": "A scalar representing the stride of the input elements of the sliding window.\nIt must be positive.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalSqlDataset", + "summary": "Creates a dataset that executes a SQL query and emits rows of the result set.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "driver_name", + "description": "The database type. Currently, the only supported type is 'sqlite'.", + "type": 7 + }, + { + "name": "data_source_name", + "description": "A connection string to connect to the database.", + "type": 7 + }, + { + "name": "query", + "description": "A SQL query to execute.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalStatsAggregatorHandle", + "summary": "Creates a statistics manager resource.", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "ExperimentalStatsAggregatorSummary", + "summary": "Produces a summary of any statistics recorded by the given statistics manager.", + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ] + }, + { + "name": "ExperimentalTakeWhileDataset", + "summary": "Creates a dataset that stops iteration when predicate` is false.", + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "attributes": [ + { + "name": "predicate", + "type": "function", + "description": "A function returning a scalar boolean." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalThreadPoolDataset", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "thread_pool", + "description": "A resource produced by the ThreadPoolHandle op.", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalThreadPoolHandle", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "num_threads", + "type": "int64", + "description": "The number of threads in the thread pool." + }, + { + "name": "max_intra_op_parallelism", + "type": "int64", + "description": "The maximum degree of parallelism to use within operations that execute on this\nthreadpool.", + "default": 1 + }, + { + "name": "display_name", + "type": "string", + "description": "A human-readable name for the threads that may be visible in some\nvisualizations.\nthreadpool." + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "A resource that can be consumed by one or more ExperimentalThreadPoolDataset\nops.", + "type": 20 + } + ] + }, + { + "name": "ExperimentalUnbatchDataset", + "summary": "A dataset that splits the elements of its input into multiple elements.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ExperimentalUniqueDataset", + "summary": "Creates a dataset that contains the unique elements of `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Expint", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Expm1", + "summary": "Computes `exp(x) - 1` element-wise.", + "description": " i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.\n `e` denotes Euler's number and is approximately equal to 2.718281.\n\n ```python\n x = tf.constant(2.0)\n tf.math.expm1(x) ==> 6.389056\n\n x = tf.constant([2.0, 8.0])\n tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)\n\n x = tf.constant(1 + 1j)\n tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "ExtractGlimpse", + "summary": "Extracts a glimpse from the input tensor.", + "description": "Returns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels.", + "attributes": [ + { + "name": "centered", + "type": "boolean", + "description": "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images.", + "default": true + }, + { + "name": "normalized", + "type": "boolean", + "description": "indicates if the offset coordinates are normalized.", + "default": true + }, + { + "name": "uniform_noise", + "type": "boolean", + "description": "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution.", + "default": true + }, + { + "name": "noise", + "type": "string", + "description": "indicates if the noise should `uniform`, `gaussian`, or\n`zero`. The default is `uniform` which means the noise type\nwill be decided by `uniform_noise`.", + "default": "uniform" + } + ], + "inputs": [ + { + "name": "input", + "description": "A 4-D float tensor of shape `[batch_size, height, width, channels]`.", + "type": 1 + }, + { + "name": "size", + "description": "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width.", + "type": 3 + }, + { + "name": "offsets", + "description": "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window.", + "type": 1 + } + ], + "outputs": [ + { + "name": "glimpse", + "description": "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`.", + "type": 1 + } + ] + }, + { + "name": "ExtractGlimpseV2", + "summary": "Extracts a glimpse from the input tensor.", + "description": "Returns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels.", + "attributes": [ + { + "name": "centered", + "type": "boolean", + "description": "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images.", + "default": true + }, + { + "name": "normalized", + "type": "boolean", + "description": "indicates if the offset coordinates are normalized.", + "default": true + }, + { + "name": "uniform_noise", + "type": "boolean", + "description": "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution.", + "default": true + }, + { + "name": "noise", + "type": "string", + "description": "indicates if the noise should `uniform`, `gaussian`, or\n`zero`. The default is `uniform` which means the noise type\nwill be decided by `uniform_noise`.", + "default": "uniform" + } + ], + "inputs": [ + { + "name": "input", + "description": "A 4-D float tensor of shape `[batch_size, height, width, channels]`.", + "type": 1 + }, + { + "name": "size", + "description": "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width.", + "type": 3 + }, + { + "name": "offsets", + "description": "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window.", + "type": 1 + } + ], + "outputs": [ + { + "name": "glimpse", + "description": "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`.", + "type": 1 + } + ] + }, + { + "name": "ExtractImagePatches", + "summary": "Extract `patches` from `images` and put them in the \"depth\" output dimension.", + "attributes": [ + { + "name": "ksizes", + "type": "int64[]", + "description": "The size of the sliding window for each dimension of `images`.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`.", + "minimum": 4 + }, + { + "name": "rates", + "type": "int64[]", + "description": "Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`. This is equivalent to\n`rate` in dilated (a.k.a. Atrous) convolutions.", + "minimum": 4 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "patches", + "description": "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension. Note\n`out_rows` and `out_cols` are the dimensions of the output patches.", + "typeAttr": "T" + } + ] + }, + { + "name": "ExtractJpegShape", + "summary": "Extract the shape information of a JPEG-encoded image.", + "description": "This op only parses the image header, so it is much faster than DecodeJpeg.", + "attributes": [ + { + "name": "output_type", + "type": "type", + "description": "(Optional) The output type of the operation (int32 or int64).\nDefaults to int32. Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "contents", + "description": "0-D. The JPEG-encoded image.", + "type": 7 + } + ], + "outputs": [ + { + "name": "image_shape", + "description": "1-D. The image shape with format [height, width, channels].", + "typeAttr": "output_type" + } + ] + }, + { + "name": "ExtractVolumePatches", + "summary": "Extract `patches` from `input` and put them in the `\"depth\"` output dimension. 3D extension of `extract_image_patches`.", + "attributes": [ + { + "name": "ksizes", + "type": "int64[]", + "description": "The size of the sliding window for each dimension of `input`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 5. How far the centers of two consecutive patches are in\n`input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.", + "minimum": 5 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use.\n\nThe size-related attributes are specified as follows:\n\n```python\nksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]\nstrides = [1, stride_planes, strides_rows, strides_cols, 1]\n``` Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "patches", + "description": "5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,\nksize_planes * ksize_rows * ksize_cols * depth]` containing patches\nwith size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized\nin the \"depth\" dimension. Note `out_planes`, `out_rows` and `out_cols`\nare the dimensions of the output patches.", + "typeAttr": "T" + } + ] + }, + { + "name": "FFT", + "summary": "Fast Fourier transform.", + "description": "Computes the 1-dimensional discrete Fourier transform over the inner-most\ndimension of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "FFT2D", + "summary": "2D fast Fourier transform.", + "description": "Computes the 2-dimensional discrete Fourier transform over the inner-most\n2 dimensions of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft2\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "FFT3D", + "summary": "3D fast Fourier transform.", + "description": "Computes the 3-dimensional discrete Fourier transform over the inner-most 3\ndimensions of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn with 3 dimensions.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "FFTND", + "summary": "ND fast Fourier transform.", + "description": "Computes the n-dimensional discrete Fourier transform over\ndesignated dimensions of `input`. The designated dimensions of\n`input` are assumed to be the result of `FFTND`.\n\nIf fft_length[i]shape(input)[i], the input is padded with zeros. If fft_length\nis not given, the default shape(input) is used.\n\nAxes mean the dimensions to perform the transform on. Default is to perform on\nall axes.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor. The FFT length for each dimension.", + "type": 3 + }, + { + "name": "axes", + "description": "An int32 tensor with a same shape as fft_length. Axes to perform the transform.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The designated\ndimensions of `input` are replaced with their Fourier transforms.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "FIFOQueue", + "summary": "A queue that produces elements in first-in first-out order.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "FIFOQueueV2", + "summary": "A queue that produces elements in first-in first-out order.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 20 + } + ] + }, + { + "name": "Fact", + "summary": "Output a fact about factorials.", + "outputs": [ + { + "name": "fact", + "type": 7 + } + ] + }, + { + "name": "FakeParam", + "summary": " This op is used as a placeholder in If branch functions. It doesn't provide a\n valid output when run, so must either be removed (e.g. replaced with a\n function input) or guaranteed not to be used (e.g. if mirroring an\n intermediate output needed for the gradient computation of the other branch).", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output." + }, + { + "name": "shape", + "type": "shape", + "description": " The purported shape of the output. This is only used for shape inference;\n the output will not necessarily have this shape. Can be a partial shape." + } + ], + "outputs": [ + { + "name": "output", + "description": " \\\"Fake\\\" output value. This should not be consumed by another op.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "FakeQuantWithMinMaxArgs", + "summary": "Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same shape and type.", + "description": "\n Quantization is called fake since the output is still in floating point.\n The API converts inputs into values within the range [min and max] and returns\n as output.\n\nAttributes\n\n* `[min; max]` define the clamping range for the `inputs` data.\n* `inputs` values are quantized into the quantization range (\n`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`\nwhen it is true) and then de-quantized and output as floats in `[min; max]`\ninterval.\n* `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\n\n* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\n* If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\n* If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\n\nExamples\n\n```python\n\ninp = tf.constant ([10.03, -10.23, 3])\nout = tf.quantization.fake_quant_with_min_max_args(inp, min=-5, max=5,\n num_bits=16)\nprint(out)\n\n# Output:\n# tf.Tensor([ 4.9999237 -5.0000763 3.0000763], shape=(3,), dtype=float32)\n```\n\nRaises:\n * InvalidArgumentError:\n - If num_bits are outside of range [2, 16].\n - If min >= max.\n * ValueError: If `inputs` are of any other type than float32.\n", + "attributes": [ + { + "name": "min", + "type": "float32", + "default": -6.0 + }, + { + "name": "max", + "type": "float32", + "default": 6.0 + }, + { + "name": "num_bits", + "type": "int64", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ] + }, + { + "name": "FakeQuantWithMinMaxArgsGradient", + "summary": "Compute gradients for a FakeQuantWithMinMaxArgs operation.", + "attributes": [ + { + "name": "min", + "type": "float32", + "default": -6.0 + }, + { + "name": "max", + "type": "float32", + "default": 6.0 + }, + { + "name": "num_bits", + "type": "int64", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "gradients", + "description": "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.", + "type": 1 + }, + { + "name": "inputs", + "description": "Values passed as inputs to the FakeQuantWithMinMaxArgs operation.", + "type": 1 + } + ], + "outputs": [ + { + "name": "backprops", + "description": "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`.", + "type": 1 + } + ] + }, + { + "name": "FakeQuantWithMinMaxVars", + "summary": "Fake-quantize the 'inputs' tensor of type float via global float scalars", + "description": "Fake-quantize the `inputs` tensor of type float via global float scalars\n`min` and `max` to `outputs` tensor of same shape as `inputs`.\n\nAttributes\n\n* `[min; max]` define the clamping range for the `inputs` data.\n* `inputs` values are quantized into the quantization range (\n`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`\nwhen it is true) and then de-quantized and output as floats in `[min; max]`\ninterval.\n* `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\n\n* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\n* If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\n* If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues.", + "attributes": [ + { + "name": "num_bits", + "type": "int64", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ] + }, + { + "name": "FakeQuantWithMinMaxVarsGradient", + "summary": "Compute gradients for a FakeQuantWithMinMaxVars operation.", + "attributes": [ + { + "name": "num_bits", + "type": "int64", + "description": "The bitwidth of the quantization; between 2 and 8, inclusive.", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "description": "Whether to quantize into 2^num_bits - 1 distinct values.", + "default": false + } + ], + "inputs": [ + { + "name": "gradients", + "description": "Backpropagated gradients above the FakeQuantWithMinMaxVars operation.", + "type": 1 + }, + { + "name": "inputs", + "description": "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats.", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "backprops_wrt_input", + "description": "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`.", + "type": 1 + }, + { + "name": "backprop_wrt_min", + "description": "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`.", + "type": 1 + }, + { + "name": "backprop_wrt_max", + "description": "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`.", + "type": 1 + } + ] + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannel", + "summary": "Fake-quantize the 'inputs' tensor of type float via per-channel floats", + "description": "Fake-quantize the `inputs` tensor of type float per-channel and one of the\nshapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`\nof shape `[d]` to `outputs` tensor of same shape as `inputs`.\n\nAttributes\n\n* `[min; max]` define the clamping range for the `inputs` data.\n* `inputs` values are quantized into the quantization range (\n`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`\nwhen it is true) and then de-quantized and output as floats in `[min; max]`\ninterval.\n* `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\n\n* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\n* If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\n* If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues.", + "attributes": [ + { + "name": "num_bits", + "type": "int64", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ] + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannelGradient", + "summary": "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.", + "attributes": [ + { + "name": "num_bits", + "type": "int64", + "description": "The bitwidth of the quantization; between 2 and 16, inclusive.", + "default": 8 + }, + { + "name": "narrow_range", + "type": "boolean", + "description": "Whether to quantize into 2^num_bits - 1 distinct values.", + "default": false + } + ], + "inputs": [ + { + "name": "gradients", + "description": "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.", + "type": 1 + }, + { + "name": "inputs", + "description": "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`.", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "backprops_wrt_input", + "description": "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n `gradients * (inputs >= min && inputs <= max)`.", + "type": 1 + }, + { + "name": "backprop_wrt_min", + "description": "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`.", + "type": 1 + }, + { + "name": "backprop_wrt_max", + "description": "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`.", + "type": 1 + } + ] + }, + { + "name": "FakeQueue", + "summary": "Deprecated. Do not use.", + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "FileSystemSetConfiguration", + "summary": "Set configuration of the file system.", + "inputs": [ + { + "name": "scheme", + "description": "File system scheme.", + "type": 7 + }, + { + "name": "key", + "description": "The name of the configuration option.", + "type": 7 + }, + { + "name": "value", + "description": "The value of the configuration option.", + "type": 7 + } + ] + }, + { + "name": "Fill", + "summary": "Creates a tensor filled with a scalar value.", + "description": "This operation creates a tensor of shape `dims` and fills it with `value`.\n\nFor example:\n\n```\n# Output tensor has shape [2, 3].\nfill([2, 3], 9) ==> [[9, 9, 9]\n [9, 9, 9]]\n```\n\n`tf.fill` differs from `tf.constant` in a few ways:\n\n* `tf.fill` only supports scalar contents, whereas `tf.constant` supports\n Tensor values.\n* `tf.fill` creates an Op in the computation graph that constructs the actual\n Tensor value at runtime. This is in contrast to `tf.constant` which embeds\n the entire Tensor into the graph with a `Const` node.\n* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes\n based on other runtime Tensors, unlike `tf.constant`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "index_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "dims", + "description": "1-D. Represents the shape of the output tensor.", + "typeAttr": "index_type" + }, + { + "name": "value", + "description": "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "FilterByLastComponentDataset", + "summary": "Creates a dataset containing elements of first component of `input_dataset` having true in the last component.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ] + }, + { + "name": "FilterDataset", + "summary": "Creates a dataset containing elements of `input_dataset` matching `predicate`.", + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "attributes": [ + { + "name": "predicate", + "type": "function", + "description": "A function returning a scalar boolean." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "FinalizeDataset", + "summary": "Creates a dataset by applying `tf.data.Options` to `input_dataset`.", + "attributes": [ + { + "name": "has_captured_ref", + "type": "boolean", + "default": false + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "FinalizeTPUEmbedding", + "summary": "An op that finalizes the TPUEmbedding configuration.", + "inputs": [ + { + "name": "common_config", + "description": "A string-encoded common configuration proto containing metadata\nabout the TPUEmbedding partitioner output and the HBM size (in bytes) required\nfor operation.", + "type": 7 + }, + { + "name": "memory_config", + "description": "A string-encoded memory config proto containing metadata about\nthe memory allocations reserved for TPUEmbedding.", + "type": 7 + } + ] + }, + { + "name": "Fingerprint", + "summary": "Generates fingerprint values.", + "description": "Generates fingerprint values of `data`.\n\nFingerprint op considers the first dimension of `data` as the batch dimension,\nand `output[i]` contains the fingerprint value generated from contents in\n`data[i, ...]` for all `i`.\n\nFingerprint op writes fingerprint values as byte arrays. For example, the\ndefault method `farmhash64` generates a 64-bit fingerprint value at a time.\nThis 8-byte value is written out as an `uint8` array of size 8, in little-endian\norder.\n\nFor example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),\nand that the fingerprint method is `farmhash64`. In this case, the output shape\nis (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of\neach fingerprint value in bytes. `output[0, :]` is generated from 12 integers in\n`data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers\nin `data[1, :, :]`.\n\nNote that this op fingerprints the raw underlying buffer, and it does not\nfingerprint Tensor's metadata such as data type and/or shape. For example, the\nfingerprint values are invariant under reshapes and bitcasts as long as the\nbatch dimension remain the same:\n\n```\nFingerprint(data) == Fingerprint(Reshape(data, ...))\nFingerprint(data) == Fingerprint(Bitcast(data, ...))\n```\n\nFor string data, one should expect `Fingerprint(data) !=\nFingerprint(ReduceJoin(data))` in general.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "This can be a POD-type or string type." + } + ], + "inputs": [ + { + "name": "data", + "description": "Must have rank 1 or higher.", + "typeAttr": "T" + }, + { + "name": "method", + "description": "Fingerprint method used by this op. Currently available method is\n`farmhash::fingerprint64`.", + "type": 7 + } + ], + "outputs": [ + { + "name": "fingerprint", + "description": "A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n`data`'s first dimension, and the second dimension size depends on the\nfingerprint algorithm.", + "type": 4 + } + ] + }, + { + "name": "FixedLengthRecordDataset", + "summary": "Creates a dataset that emits the records from one or more binary files.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "filenames", + "description": "A scalar or a vector containing the name(s) of the file(s) to be\nread.", + "type": 7 + }, + { + "name": "header_bytes", + "description": "A scalar representing the number of bytes to skip at the\nbeginning of a file.", + "type": 9 + }, + { + "name": "record_bytes", + "description": "A scalar representing the number of bytes in each record.", + "type": 9 + }, + { + "name": "footer_bytes", + "description": "A scalar representing the number of bytes to skip at the end\nof a file.", + "type": 9 + }, + { + "name": "buffer_size", + "description": "A scalar representing the number of bytes to buffer. Must be > 0.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "FixedLengthRecordDatasetV2", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "header_bytes", + "type": 9 + }, + { + "name": "record_bytes", + "type": 9 + }, + { + "name": "footer_bytes", + "type": 9 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "compression_type", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "FixedLengthRecordReader", + "summary": "A Reader that outputs fixed-length records from a file.", + "attributes": [ + { + "name": "header_bytes", + "type": "int64", + "description": "Number of bytes in the header, defaults to 0.", + "default": 0 + }, + { + "name": "record_bytes", + "type": "int64", + "description": "Number of bytes in the record." + }, + { + "name": "footer_bytes", + "type": "int64", + "description": "Number of bytes in the footer, defaults to 0.", + "default": 0 + }, + { + "name": "hop_bytes", + "type": "int64", + "description": "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "FixedLengthRecordReaderV2", + "summary": "A Reader that outputs fixed-length records from a file.", + "attributes": [ + { + "name": "header_bytes", + "type": "int64", + "description": "Number of bytes in the header, defaults to 0.", + "default": 0 + }, + { + "name": "record_bytes", + "type": "int64", + "description": "Number of bytes in the record." + }, + { + "name": "footer_bytes", + "type": "int64", + "description": "Number of bytes in the footer, defaults to 0.", + "default": 0 + }, + { + "name": "hop_bytes", + "type": "int64", + "description": "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + }, + { + "name": "encoding", + "type": "string", + "description": "The type of encoding for the file. Currently ZLIB and GZIP\nare supported. Defaults to none.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 20 + } + ] + }, + { + "name": "FixedUnigramCandidateSampler", + "summary": "Generates labels for candidate sampling with a learned unigram distribution.", + "description": "A unigram sampler could use a fixed unigram distribution read from a\nfile or passed in as an in-memory array instead of building up the distribution\nfrom data on the fly. There is also an option to skew the distribution by\napplying a distortion power to the weights.\n\nThe vocabulary file should be in CSV-like format, with the last field\nbeing the weight associated with the word.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to randomly sample.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "range_max", + "type": "int64", + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1 + }, + { + "name": "vocab_file", + "type": "string", + "description": "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op.", + "default": "" + }, + { + "name": "distortion", + "type": "float32", + "description": "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion's power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution.", + "default": 1.0 + }, + { + "name": "num_reserved_ids", + "type": "int64", + "description": "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0.", + "default": 0 + }, + { + "name": "num_shards", + "type": "int64", + "description": "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with 'shard') indicates the number of partitions that are being\nused in the overall computation.", + "minimum": 1, + "default": 1 + }, + { + "name": "shard", + "type": "int64", + "description": "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with 'num_shards') indicates the particular partition number of a\nsampler op, when partitioning is being used.", + "minimum": 0, + "default": 0 + }, + { + "name": "unigrams", + "type": "float32[]", + "description": "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op.", + "default": [] + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "FlatMapDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "Unlike MapDataset, the `f` in FlatMapDataset is expected to return a\nDataset variant, and FlatMapDataset will flatten successive results\ninto a single Dataset.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Floor", + "summary": "Returns element-wise largest integer not greater than x.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "FloorDiv", + "summary": "Returns x // y element-wise.", + "description": "*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "FloorMod", + "summary": "Returns element-wise remainder of division.", + "description": "This follows Python semantics in that the\nresult here is consistent with a flooring divide. E.g.\n`floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y.\n\n*NOTE*: `FloorMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "FlushSummaryWriter", + "inputs": [ + { + "name": "writer", + "type": 20 + } + ] + }, + { + "name": "For", + "summary": "Applies a for loop.", + "description": " ```python\n output = input;\n for i in range(start, limit, delta)\n output = body(i, output);\n ```", + "attributes": [ + { + "name": "T", + "type": "type[]", + "description": "A list of dtypes.", + "minimum": 0 + }, + { + "name": "body", + "type": "function", + "description": " A function that takes a list of tensors (int32, T) and returns another\n list of tensors (T)." + } + ], + "inputs": [ + { + "name": "start", + "description": "The lower bound. An int32", + "type": 3 + }, + { + "name": "limit", + "description": "The upper bound. An int32", + "type": 3 + }, + { + "name": "delta", + "description": "The increment. An int32", + "type": 3 + }, + { + "name": "input", + "description": "A list of input tensors whose types are T.", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of output tensors whose types are T.", + "typeListAttr": "T" + } + ] + }, + { + "name": "FractionalAvgPool", + "summary": "Performs fractional average pooling on the input.", + "description": "Fractional average pooling is similar to Fractional max pooling in the pooling\nregion generation step. The only difference is that after pooling regions are\ngenerated, a mean operation is performed instead of a max operation in each\npooling region.", + "attributes": [ + { + "name": "pooling_ratio", + "type": "float32[]", + "description": "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don't allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively.", + "minimum": 4 + }, + { + "name": "pseudo_random", + "type": "boolean", + "description": "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random.", + "default": false + }, + { + "name": "overlapping", + "type": "boolean", + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling.", + "default": false + }, + { + "name": "deterministic", + "type": "boolean", + "description": "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic.", + "default": false + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "value", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "output tensor after fractional avg pooling.", + "typeAttr": "T" + }, + { + "name": "row_pooling_sequence", + "description": "row pooling sequence, needed to calculate gradient.", + "type": 9 + }, + { + "name": "col_pooling_sequence", + "description": "column pooling sequence, needed to calculate gradient.", + "type": 9 + } + ] + }, + { + "name": "FractionalAvgPoolGrad", + "summary": "Computes gradient of the FractionalAvgPool function.", + "description": "Unlike FractionalMaxPoolGrad, we don't need to find arg_max for\nFractionalAvgPoolGrad, we just need to evenly back-propagate each element of\nout_backprop to those indices that form the same pooling cell. Therefore, we\njust need to know the shape of original input tensor, instead of the whole\ntensor.", + "attributes": [ + { + "name": "overlapping", + "type": "boolean", + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "orig_input_tensor_shape", + "description": "Original input tensor shape for `fractional_avg_pool`", + "type": 9 + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_avg_pool`.", + "typeAttr": "T" + }, + { + "name": "row_pooling_sequence", + "description": "row pooling sequence, form pooling region with\ncol_pooling_sequence.", + "type": 9 + }, + { + "name": "col_pooling_sequence", + "description": "column pooling sequence, form pooling region with\nrow_pooling sequence.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D. Gradients w.r.t. the input of `fractional_avg_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "FractionalMaxPool", + "summary": "Performs fractional max pooling on the input.", + "description": "Fractional max pooling is slightly different than regular max pooling. In\nregular max pooling, you downsize an input set by taking the maximum value of\nsmaller N x N subsections of the set (often 2x2), and try to reduce the set by\na factor of N, where N is an integer. Fractional max pooling, as you might\nexpect from the word \"fractional\", means that the overall reduction ratio N\ndoes not have to be an integer.\n\nThe sizes of the pooling regions are generated randomly but are fairly uniform.\nFor example, let's look at the height dimension, and the constraints on the\nlist of rows that will be pool boundaries.\n\nFirst we define the following:\n\n1. input_row_length : the number of rows from the input set\n2. output_row_length : which will be smaller than the input\n3. alpha = input_row_length / output_row_length : our reduction ratio\n4. K = floor(alpha)\n5. row_pooling_sequence : this is the result list of pool boundary rows\n\nThen, row_pooling_sequence should satisfy:\n\n1. a[0] = 0 : the first value of the sequence is 0\n2. a[end] = input_row_length : the last value of the sequence is the size\n3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size\n4. length(row_pooling_sequence) = output_row_length+1\n\nFor more details on fractional max pooling, see this paper:\n[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)", + "attributes": [ + { + "name": "pooling_ratio", + "type": "float32[]", + "description": "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don't allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively.", + "minimum": 4 + }, + { + "name": "pseudo_random", + "type": "boolean", + "description": "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random.", + "default": false + }, + { + "name": "overlapping", + "type": "boolean", + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling.", + "default": false + }, + { + "name": "deterministic", + "type": "boolean", + "description": "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic.", + "default": false + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "value", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "output tensor after fractional max pooling.", + "typeAttr": "T" + }, + { + "name": "row_pooling_sequence", + "description": "row pooling sequence, needed to calculate gradient.", + "type": 9 + }, + { + "name": "col_pooling_sequence", + "description": "column pooling sequence, needed to calculate gradient.", + "type": 9 + } + ] + }, + { + "name": "FractionalMaxPoolGrad", + "summary": "Computes gradient of the FractionalMaxPool function.", + "attributes": [ + { + "name": "overlapping", + "type": "boolean", + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "Original input for `fractional_max_pool`", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "Original output for `fractional_max_pool`", + "typeAttr": "T" + }, + { + "name": "out_backprop", + "description": "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_max_pool`.", + "typeAttr": "T" + }, + { + "name": "row_pooling_sequence", + "description": "row pooling sequence, form pooling region with\ncol_pooling_sequence.", + "type": 9 + }, + { + "name": "col_pooling_sequence", + "description": "column pooling sequence, form pooling region with\nrow_pooling sequence.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D. Gradients w.r.t. the input of `fractional_max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "FresnelCos", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "FresnelSin", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "FusedBatchNorm", + "category": "Normalization", + "summary": "Batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "exponential_avg_factor", + "type": "float32", + "default": 1.0 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "typeAttr": "T" + }, + { + "name": "offset", + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "typeAttr": "T" + }, + { + "name": "mean", + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "typeAttr": "T" + }, + { + "name": "variance", + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "A 4D Tensor for output data.", + "typeAttr": "T" + }, + { + "name": "batch_mean", + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "typeAttr": "T" + }, + { + "name": "batch_variance", + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "typeAttr": "T" + }, + { + "name": "reserve_space_1", + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "typeAttr": "T" + }, + { + "name": "reserve_space_2", + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "typeAttr": "T" + } + ] + }, + { + "name": "FusedBatchNormGrad", + "summary": "Gradient for batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "y_backprop", + "description": "A 4D Tensor for the gradient with respect to y.", + "typeAttr": "T" + }, + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "typeAttr": "T" + }, + { + "name": "reserve_space_1", + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "typeAttr": "T" + }, + { + "name": "reserve_space_2", + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "x_backprop", + "description": "A 4D Tensor for the gradient with respect to x.", + "typeAttr": "T" + }, + { + "name": "scale_backprop", + "description": "A 1D Tensor for the gradient with respect to scale.", + "typeAttr": "T" + }, + { + "name": "offset_backprop", + "description": "A 1D Tensor for the gradient with respect to offset.", + "typeAttr": "T" + }, + { + "name": "reserve_space_3", + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "typeAttr": "T" + }, + { + "name": "reserve_space_4", + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "typeAttr": "T" + } + ] + }, + { + "name": "FusedBatchNormGradV2", + "summary": "Gradient for batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "U", + "type": "type", + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "y_backprop", + "description": "A 4D Tensor for the gradient with respect to y.", + "typeAttr": "T" + }, + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "type": 1 + }, + { + "name": "reserve_space_1", + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_2", + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "typeAttr": "U" + } + ], + "outputs": [ + { + "name": "x_backprop", + "description": "A 4D Tensor for the gradient with respect to x.", + "typeAttr": "T" + }, + { + "name": "scale_backprop", + "description": "A 1D Tensor for the gradient with respect to scale.", + "typeAttr": "U" + }, + { + "name": "offset_backprop", + "description": "A 1D Tensor for the gradient with respect to offset.", + "typeAttr": "U" + }, + { + "name": "reserve_space_3", + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "typeAttr": "U" + }, + { + "name": "reserve_space_4", + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "typeAttr": "U" + } + ] + }, + { + "name": "FusedBatchNormGradV3", + "summary": "Gradient for batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "U", + "type": "type", + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`, `NDHWC`, `NCDHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "y_backprop", + "description": "A 4D Tensor for the gradient with respect to y.", + "typeAttr": "T" + }, + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "type": 1 + }, + { + "name": "reserve_space_1", + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_2", + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_3", + "description": "When is_training is True, a 1D Tensor for some intermediate results to be reused\nin gradient computation. When is_training is False, a dummy empty Tensor will be\ncreated.", + "typeAttr": "U" + } + ], + "outputs": [ + { + "name": "x_backprop", + "description": "A 4D Tensor for the gradient with respect to x.", + "typeAttr": "T" + }, + { + "name": "scale_backprop", + "description": "A 1D Tensor for the gradient with respect to scale.", + "typeAttr": "U" + }, + { + "name": "offset_backprop", + "description": "A 1D Tensor for the gradient with respect to offset.", + "typeAttr": "U" + }, + { + "name": "reserve_space_4", + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "typeAttr": "U" + }, + { + "name": "reserve_space_5", + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "typeAttr": "U" + } + ] + }, + { + "name": "FusedBatchNormV2", + "category": "Normalization", + "summary": "Batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "U", + "type": "type", + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "exponential_avg_factor", + "type": "float32", + "default": 1.0 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "typeAttr": "U" + }, + { + "name": "offset", + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "typeAttr": "U" + }, + { + "name": "mean", + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "typeAttr": "U" + }, + { + "name": "variance", + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "typeAttr": "U" + } + ], + "outputs": [ + { + "name": "y", + "description": "A 4D Tensor for output data.", + "typeAttr": "T" + }, + { + "name": "batch_mean", + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "typeAttr": "U" + }, + { + "name": "batch_variance", + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "typeAttr": "U" + }, + { + "name": "reserve_space_1", + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_2", + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "typeAttr": "U" + } + ] + }, + { + "name": "FusedBatchNormV3", + "category": "Normalization", + "summary": "Batch normalization.", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "U", + "type": "type", + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `bfloat16`, `float32`." + }, + { + "name": "epsilon", + "type": "float32", + "description": "A small float number added to the variance of x.", + "default": 9.999999747378752e-05 + }, + { + "name": "exponential_avg_factor", + "type": "float32", + "default": 1.0 + }, + { + "name": "data_format", + "type": "string", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`, `NDHWC`, `NCDHW`.", + "default": "NHWC" + }, + { + "name": "is_training", + "type": "boolean", + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "default": true + } + ], + "inputs": [ + { + "name": "x", + "description": "A 4D Tensor for input data.", + "typeAttr": "T" + }, + { + "name": "scale", + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "typeAttr": "U" + }, + { + "name": "offset", + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "typeAttr": "U" + }, + { + "name": "mean", + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "typeAttr": "U" + }, + { + "name": "variance", + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "typeAttr": "U" + } + ], + "outputs": [ + { + "name": "y", + "description": "A 4D Tensor for output data.", + "typeAttr": "T" + }, + { + "name": "batch_mean", + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "typeAttr": "U" + }, + { + "name": "batch_variance", + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "typeAttr": "U" + }, + { + "name": "reserve_space_1", + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_2", + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "typeAttr": "U" + }, + { + "name": "reserve_space_3", + "description": "A 1D Tensor for some intermediate results, to be reused in the gradient\ncomputation for better efficiency.", + "typeAttr": "U" + } + ] + }, + { + "name": "FusedPadConv2D", + "summary": "Performs a padding as a preprocess during a convolution.", + "description": "Similar to FusedResizeAndPadConv2d, this op allows for an optimized\nimplementation where the spatial padding transformation stage is fused with the\nim2col lookup, but in this case without the bilinear filtering required for\nresizing. Fusing the padding prevents the need to write out the intermediate\nresults as whole tensors, reducing memory pressure, and we can get some latency\ngains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn't supported by this op, and 'NHWC'\norder is used instead.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "mode", + "type": "string", + "description": "Must be one of the following: `REFLECT`, `SYMMETRIC`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "paddings", + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "type": 3 + }, + { + "name": "filter", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "FusedResizeAndPadConv2D", + "summary": "Performs a resize and padding as a preprocess during a convolution.", + "description": "It's often possible to do spatial transformations more efficiently as part of\nthe packing stage of a convolution, so this op allows for an optimized\nimplementation where these stages are fused together. This prevents the need to\nwrite out the intermediate results as whole tensors, reducing memory pressure,\nand we can get some latency gains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn't supported by this op, and defaults to\n'NHWC' order.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + }, + { + "name": "resize_align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + }, + { + "name": "mode", + "type": "string", + "description": "Must be one of the following: `REFLECT`, `SYMMETRIC`." + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + }, + { + "name": "paddings", + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "type": 3 + }, + { + "name": "filter", + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "GRUBlockCell", + "summary": "Computes the GRU cell forward propagation for 1 time step.", + "description": "Args\n x: Input to the GRU cell.\n h_prev: State input from the previous GRU cell.\n w_ru: Weight matrix for the reset and update gate.\n w_c: Weight matrix for the cell connection gate.\n b_ru: Bias vector for the reset and update gate.\n b_c: Bias vector for the cell connection gate.\n\nReturns\n r: Output of the reset gate.\n u: Output of the update gate.\n c: Output of the cell connection gate.\n h: Current state of the GRU cell.\n\nNote on notation of the variables:\n\nConcatenation of a and b is represented by a_b\nElement-wise dot product of a and b is represented by ab\nElement-wise dot product is represented by \\circ\nMatrix multiplication is represented by *\n\nBiases are initialized with :\n`b_ru` - constant_initializer(1.0)\n`b_c` - constant_initializer(0.0)\n\nThis kernel op implements the following mathematical equations:\n\n```\nx_h_prev = [x, h_prev]\n\n[r_bar u_bar] = x_h_prev * w_ru + b_ru\n\nr = sigmoid(r_bar)\nu = sigmoid(u_bar)\n\nh_prevr = h_prev \\circ r\n\nx_h_prevr = [x h_prevr]\n\nc_bar = x_h_prevr * w_c + b_c\nc = tanh(c_bar)\n\nh = (1-u) \\circ c + u \\circ h_prev\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "h_prev", + "typeAttr": "T" + }, + { + "name": "w_ru", + "typeAttr": "T" + }, + { + "name": "w_c", + "typeAttr": "T" + }, + { + "name": "b_ru", + "typeAttr": "T" + }, + { + "name": "b_c", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "c", + "typeAttr": "T" + }, + { + "name": "h", + "typeAttr": "T" + } + ] + }, + { + "name": "GRUBlockCellGrad", + "summary": "Computes the GRU cell back-propagation for 1 time step.", + "description": "Args\n x: Input to the GRU cell.\n h_prev: State input from the previous GRU cell.\n w_ru: Weight matrix for the reset and update gate.\n w_c: Weight matrix for the cell connection gate.\n b_ru: Bias vector for the reset and update gate.\n b_c: Bias vector for the cell connection gate.\n r: Output of the reset gate.\n u: Output of the update gate.\n c: Output of the cell connection gate.\n d_h: Gradients of the h_new wrt to objective function.\n\nReturns\n d_x: Gradients of the x wrt to objective function.\n d_h_prev: Gradients of the h wrt to objective function.\n d_c_bar Gradients of the c_bar wrt to objective function.\n d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.\n\nThis kernel op implements the following mathematical equations:\n\nNote on notation of the variables:\n\nConcatenation of a and b is represented by a_b\nElement-wise dot product of a and b is represented by ab\nElement-wise dot product is represented by \\circ\nMatrix multiplication is represented by *\n\nAdditional notes for clarity:\n\n`w_ru` can be segmented into 4 different matrices.\n```\nw_ru = [w_r_x w_u_x\n w_r_h_prev w_u_h_prev]\n```\nSimilarly, `w_c` can be segmented into 2 different matrices.\n```\nw_c = [w_c_x w_c_h_prevr]\n```\nSame goes for biases.\n```\nb_ru = [b_ru_x b_ru_h]\nb_c = [b_c_x b_c_h]\n```\nAnother note on notation:\n```\nd_x = d_x_component_1 + d_x_component_2\n\nwhere d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T\nand d_x_component_2 = d_c_bar * w_c_x^T\n\nd_h_prev = d_h_prev_component_1 + d_h_prevr \\circ r + d_h \\circ u\nwhere d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T\n```\n\nMathematics behind the Gradients below:\n```\nd_c_bar = d_h \\circ (1-u) \\circ (1-c \\circ c)\nd_u_bar = d_h \\circ (h-c) \\circ u \\circ (1-u)\n\nd_r_bar_u_bar = [d_r_bar d_u_bar]\n\n[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T\n\n[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T\n\nd_x = d_x_component_1 + d_x_component_2\n\nd_h_prev = d_h_prev_component_1 + d_h_prevr \\circ r + u\n```\nBelow calculation is performed in the python wrapper for the Gradients\n(not in the gradient kernel.)\n```\nd_w_ru = x_h_prevr^T * d_c_bar\n\nd_w_c = x_h_prev^T * d_r_bar_u_bar\n\nd_b_ru = sum of d_r_bar_u_bar along axis = 0\n\nd_b_c = sum of d_c_bar along axis = 0\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "h_prev", + "typeAttr": "T" + }, + { + "name": "w_ru", + "typeAttr": "T" + }, + { + "name": "w_c", + "typeAttr": "T" + }, + { + "name": "b_ru", + "typeAttr": "T" + }, + { + "name": "b_c", + "typeAttr": "T" + }, + { + "name": "r", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "c", + "typeAttr": "T" + }, + { + "name": "d_h", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "d_x", + "typeAttr": "T" + }, + { + "name": "d_h_prev", + "typeAttr": "T" + }, + { + "name": "d_c_bar", + "typeAttr": "T" + }, + { + "name": "d_r_bar_u_bar", + "typeAttr": "T" + } + ] + }, + { + "name": "Gather", + "category": "Transform", + "summary": "Gather slices from `params` according to `indices`.", + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```\n\nIf `indices` is a permutation and `len(indices) == params.shape[0]` then\nthis operation will permute `params` accordingly.\n\n`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in\n`indices` are always validated to be within range. If assigned to GPU,\nout-of-bound indices result in safe but unspecified behavior, which may include\nraising an error.\n\n
\n\n
", + "attributes": [ + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "Tparams", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "params", + "typeAttr": "Tparams" + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tparams" + } + ] + }, + { + "name": "GatherNd", + "summary": "Gather slices from `params` into a Tensor with shape specified by `indices`.", + "description": "`indices` is a K-dimensional integer tensor, best thought of as a\n(K-1)-dimensional tensor of indices into `params`, where each element defines a\nslice of `params`:\n\n output[\\\\(i_0, ..., i_{K-2}\\\\)] = params[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\n\nWhereas in `tf.gather` `indices` defines slices into the `axis`\ndimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\nfirst `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\nThe last dimension of `indices` can be at most the rank of\n`params`:\n\n indices.shape[-1] <= params.rank\n\nThe last dimension of `indices` corresponds to elements\n(if `indices.shape[-1] == params.rank`) or slices\n(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\nof `params`. The output tensor has shape\n\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, a 0 is stored in the\ncorresponding output value.\n\nSome examples below.\n\nSimple indexing into a matrix:\n\n```python\n indices = [[0, 0], [1, 1]]\n params = [['a', 'b'], ['c', 'd']]\n output = ['a', 'd']\n```\n\nSlice indexing into a matrix:\n\n```python\n indices = [[1], [0]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['c', 'd'], ['a', 'b']]\n```\n\nIndexing into a 3-tensor:\n\n```python\n indices = [[1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['a1', 'b1'], ['c1', 'd1']]]\n\n\n indices = [[0, 1], [1, 0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n\n indices = [[0, 0, 1], [1, 0, 1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = ['b0', 'b1']\n```\n\nBatched indexing into a matrix:\n\n```python\n indices = [[[0, 0]], [[0, 1]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['a'], ['b']]\n```\n\nBatched slice indexing into a matrix:\n\n```python\n indices = [[[1]], [[0]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [[['c', 'd']], [['a', 'b']]]\n```\n\nBatched indexing into a 3-tensor:\n\n```python\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[[['a1', 'b1'], ['c1', 'd1']]],\n [[['a0', 'b0'], ['c0', 'd0']]]]\n\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0'], ['a1', 'b1']],\n [['a0', 'b0'], ['c1', 'd1']]]\n\n\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['b0', 'b1'], ['d0', 'c1']]\n```\n\nSee also `tf.gather` and `tf.batch_gather`.", + "attributes": [ + { + "name": "Tparams", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "params", + "description": "The tensor from which to gather values.", + "typeAttr": "Tparams" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Values from `params` gathered from indices given by `indices`, with\nshape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.", + "typeAttr": "Tparams" + } + ] + }, + { + "name": "GatherV2", + "summary": "Gather slices from `params` axis `axis` according to `indices`.", + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `params.shape[:axis] +\nindices.shape[batch_dims:] + params.shape[axis + 1:]` where:\n\n```python\n # Scalar indices (output is rank(params) - 1).\n output[a_0, ..., a_n, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices, b_0, ..., b_n]\n\n # Vector indices (output is rank(params)).\n output[a_0, ..., a_n, i, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices[i], b_0, ..., b_n]\n\n # Higher rank indices (output is rank(params) + rank(indices) - 1).\n output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =\n params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]\n```\n\n
\n\n
\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, a 0 is stored in the\ncorresponding output value.\n\nSee also `tf.batch_gather` and `tf.gather_nd`.", + "attributes": [ + { + "name": "batch_dims", + "type": "int64", + "default": 0 + }, + { + "name": "Tparams", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`." + }, + { + "name": "Taxis", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "params", + "description": "The tensor from which to gather values. Must be at least rank\n`axis + 1`.", + "typeAttr": "Tparams" + }, + { + "name": "indices", + "description": "Index tensor. Must be in range `[0, params.shape[axis])`.", + "typeAttr": "Tindices" + }, + { + "name": "axis", + "description": "The axis in `params` to gather `indices` from. Defaults to the first\ndimension. Supports negative indexes.", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "name": "output", + "description": "Values from `params` gathered from indices given by `indices`, with\nshape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.", + "typeAttr": "Tparams" + } + ] + }, + { + "name": "GenerateBoundingBoxProposals", + "summary": "This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497", + "description": " The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,\n applies non-maximal suppression on overlapping boxes with higher than\n `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter\n side is less than `min_size`.\n Inputs:\n `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position\n `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor\n `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.\n Outputs:\n `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.\n `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.", + "attributes": [ + { + "name": "post_nms_topn", + "type": "int64", + "description": "An integer. Maximum number of rois in the output.", + "default": 300 + } + ], + "inputs": [ + { + "name": "scores", + "description": "A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted.", + "type": 1 + }, + { + "name": "bbox_deltas", + "description": "A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.\nCoordinates are given in the form [dy, dx, dh, dw].", + "type": 1 + }, + { + "name": "image_info", + "description": "A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale.", + "type": 1 + }, + { + "name": "anchors", + "description": "A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2].", + "type": 1 + }, + { + "name": "nms_threshold", + "description": "A scalar float tensor for non-maximal-suppression threshold.", + "type": 1 + }, + { + "name": "pre_nms_topn", + "description": "A scalar int tensor for the number of top scoring boxes to be used as input.", + "type": 3 + }, + { + "name": "min_size", + "description": "A scalar float tensor. Any box that has a smaller size than min_size will be discarded.", + "type": 1 + } + ], + "outputs": [ + { + "name": "rois", + "description": "A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected\nregion of interest boxes. Sorted in descending order in scores.", + "type": 1 + }, + { + "name": "roi_probabilities", + "description": "A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the\nregion of interest box in `rois` tensor at the same index.", + "type": 1 + } + ] + }, + { + "name": "GenerateVocabRemapping", + "summary": "Given a path to new and old vocabulary files, returns a remapping Tensor of", + "description": "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. The old vocabulary is\nconstrained to the first `old_vocab_size` entries if `old_vocab_size` is not the\ndefault value of -1.\n\n`num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable).", + "attributes": [ + { + "name": "new_vocab_offset", + "type": "int64", + "description": "How many entries into the new vocab file to start reading.", + "minimum": 0 + }, + { + "name": "num_new_vocab", + "type": "int64", + "description": "Number of entries in the new vocab file to remap.", + "minimum": 0 + }, + { + "name": "old_vocab_size", + "type": "int64", + "description": "Number of entries in the old vocab file to consider. If -1,\nuse the entire old vocabulary.", + "minimum": -1, + "default": -1 + } + ], + "inputs": [ + { + "name": "new_vocab_file", + "description": "Path to the new vocab file.", + "type": 7 + }, + { + "name": "old_vocab_file", + "description": "Path to the old vocab file.", + "type": 7 + } + ], + "outputs": [ + { + "name": "remapping", + "description": "A Tensor of length num_new_vocab where the element at index i\nis equal to the old ID that maps to the new ID i. This element is -1 for any\nnew ID that is not found in the old vocabulary.", + "type": 9 + }, + { + "name": "num_present", + "description": "Number of new vocab entries found in old vocab.", + "type": 3 + } + ] + }, + { + "name": "GeneratorDataset", + "summary": "Creates a dataset that invokes a function to generate elements.", + "attributes": [ + { + "name": "init_func", + "type": "function" + }, + { + "name": "next_func", + "type": "function" + }, + { + "name": "finalize_func", + "type": "function" + }, + { + "name": "Tinit_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tnext_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tfinalize_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "init_func_other_args", + "typeListAttr": "Tinit_func_args" + }, + { + "name": "next_func_other_args", + "typeListAttr": "Tnext_func_args" + }, + { + "name": "finalize_func_other_args", + "typeListAttr": "Tfinalize_func_args" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "GetElementAtIndex", + "summary": "Gets the element at the specified index in a dataset.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "index", + "type": 9 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "GetMinibatchSplitsWithPhysicalReplica", + "attributes": [ + { + "name": "sample_count", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_replica", + "type": "int64", + "minimum": 1 + }, + { + "name": "table_vocab_size", + "type": "int64", + "minimum": 1 + }, + { + "name": "feature_width", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_sc_per_chip", + "type": "int64", + "minimum": 1 + }, + { + "name": "table_name", + "type": "string" + }, + { + "name": "mini_batch_splits", + "type": "string" + } + ], + "inputs": [ + { + "name": "program_key", + "type": 7 + }, + { + "name": "row_ids", + "type": 3 + }, + { + "name": "col_ids", + "type": 3 + }, + { + "name": "gains", + "type": 1 + } + ], + "outputs": [ + { + "name": "sorted_row_ids", + "type": 3 + }, + { + "name": "sorted_col_ids", + "type": 3 + }, + { + "name": "sorted_gains", + "type": 1 + }, + { + "name": "splits", + "type": 9 + }, + { + "name": "id_counts", + "type": 3 + }, + { + "name": "max_ids", + "type": 3 + }, + { + "name": "max_uniques", + "type": 3 + } + ] + }, + { + "name": "GetMinibatchesInCsrWithPhysicalReplica", + "attributes": [ + { + "name": "sample_count", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_replica", + "type": "int64", + "minimum": 1 + }, + { + "name": "max_minibatches_per_sc", + "type": "int64", + "minimum": 1 + }, + { + "name": "max_ids_per_chip_per_sample", + "type": "int64", + "minimum": 1 + }, + { + "name": "table_vocab_size", + "type": "int64", + "minimum": 1 + }, + { + "name": "feature_width", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_sc_per_chip", + "type": "int64", + "minimum": 1 + }, + { + "name": "table_name", + "type": "string" + }, + { + "name": "mini_batch_in_csr", + "type": "string" + } + ], + "inputs": [ + { + "name": "program_key", + "type": 7 + }, + { + "name": "row_ids", + "type": 3 + }, + { + "name": "col_ids", + "type": 3 + }, + { + "name": "gains", + "type": 1 + }, + { + "name": "splits", + "type": 9 + }, + { + "name": "id_counts", + "type": 3 + } + ], + "outputs": [ + { + "name": "row_pointers", + "type": 3 + }, + { + "name": "sorted_sample_ids", + "type": 3 + }, + { + "name": "sorted_token_ids", + "type": 3 + }, + { + "name": "sorted_gains", + "type": 1 + }, + { + "name": "row_pointers_unpadded_size", + "type": 3 + }, + { + "name": "ids_unpadded_size", + "type": 3 + }, + { + "name": "num_minibatches_per_physical_sparse_core", + "type": 3 + } + ] + }, + { + "name": "GetOptions", + "summary": "Returns the `tf.data.Options` attached to `input_dataset`.", + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + } + ], + "outputs": [ + { + "name": "serialized_options", + "type": 7 + } + ] + }, + { + "name": "GetSessionHandle", + "summary": "Store the input tensor in the state of the current session.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "value", + "description": "The tensor to be stored.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle for the tensor stored in the session state, represented\nas a string.", + "type": 7 + } + ] + }, + { + "name": "GetSessionHandleV2", + "summary": "Store the input tensor in the state of the current session.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "value", + "description": "The tensor to be stored.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle for the tensor stored in the session state, represented\nas a ResourceHandle object.", + "type": 20 + } + ] + }, + { + "name": "GetSessionTensor", + "summary": "Get the value of the tensor specified by its handle.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output value." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle for a tensor stored in the session state.", + "type": 7 + } + ], + "outputs": [ + { + "name": "value", + "description": "The tensor for the given handle.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "GlobalIterId", + "outputs": [ + { + "name": "iter_id", + "type": 9 + } + ] + }, + { + "name": "Greater", + "summary": "Returns the truth value of (x > y) element-wise.", + "description": "*NOTE*: `Greater` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 2, 5])\ntf.math.greater(x, y) ==> [False, True, True]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.greater(x, y) ==> [False, False, True]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "GreaterEqual", + "summary": "Returns the truth value of (x >= y) element-wise.", + "description": "*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6, 7])\ny = tf.constant([5, 2, 5, 10])\ntf.math.greater_equal(x, y) ==> [True, True, True, False]\n\nx = tf.constant([5, 4, 6, 7])\ny = tf.constant([5])\ntf.math.greater_equal(x, y) ==> [True, False, True, True]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "GroupByReducerDataset", + "summary": "Creates a dataset that computes a group-by on `input_dataset`.", + "description": "Creates a dataset that computes a group-by on `input_dataset`.", + "attributes": [ + { + "name": "key_func", + "type": "function", + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64." + }, + { + "name": "init_func", + "type": "function", + "description": "A function mapping a key of type DT_INT64, concatenated with\n`init_func_other_arguments` to the initial reducer state." + }, + { + "name": "reduce_func", + "type": "function", + "description": "A function mapping the current reducer state and an element of `input_dataset`,\nconcatenated with `reduce_func_other_arguments` to a new reducer state." + }, + { + "name": "finalize_func", + "type": "function", + "description": "A function mapping the final reducer state to an output element." + }, + { + "name": "Tkey_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tinit_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Treduce_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tfinalize_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `key_func`.", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "init_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `init_func`.", + "typeListAttr": "Tinit_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `reduce_func`.", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "finalize_func_other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `finalize_func`.", + "typeListAttr": "Tfinalize_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "GroupByWindowDataset", + "summary": "Creates a dataset that computes a windowed group-by on `input_dataset`.", + "description": "// TODO(mrry): Support non-int64 keys.", + "attributes": [ + { + "name": "key_func", + "type": "function", + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64." + }, + { + "name": "reduce_func", + "type": "function" + }, + { + "name": "window_size_func", + "type": "function" + }, + { + "name": "Tkey_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Treduce_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Twindow_size_func_other_arguments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "window_size_func_other_arguments", + "typeListAttr": "Twindow_size_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "GuaranteeConst", + "summary": "Gives a guarantee to the TF runtime that the input tensor is a constant.", + "description": "The runtime is then free to make optimizations based on this.\n\nOnly accepts value typed tensors as inputs and rejects resource variable handles\nas input.\n\nReturns the input tensor without modification.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "HSVToRGB", + "summary": "Convert one or more images from HSV to RGB.", + "description": "Outputs a tensor of the same shape as the `images` tensor, containing the RGB\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\nSee `rgb_to_hsv` for a description of the HSV encoding.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "1-D or higher rank. HSV data to convert. Last dimension must be size 3.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "`images` converted to RGB.", + "typeAttr": "T" + } + ] + }, + { + "name": "HashTable", + "summary": "Creates a non-initialized hash table.", + "description": "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "HashTableV2", + "summary": "Creates a non-initialized hash table.", + "description": "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 20 + } + ] + }, + { + "name": "HistogramFixedWidth", + "summary": "Return histogram of values.", + "description": "Given the tensor `values`, this operation returns a rank 1 histogram counting\nthe number of entries in `values` that fall into every bin. The bins are\nequal width and determined by the arguments `value_range` and `nbins`.\n\n```python\n# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\nnbins = 5\nvalue_range = [0.0, 5.0]\nnew_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n\nwith tf.get_default_session() as sess:\n hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)\n variables.global_variables_initializer().run()\n sess.run(hist) => [2, 1, 1, 0, 2]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "values", + "description": "Numeric `Tensor`.", + "typeAttr": "T" + }, + { + "name": "value_range", + "description": "Shape [2] `Tensor` of same `dtype` as `values`.\nvalues <= value_range[0] will be mapped to hist[0],\nvalues >= value_range[1] will be mapped to hist[-1].", + "typeAttr": "T" + }, + { + "name": "nbins", + "description": "Scalar `int32 Tensor`. Number of histogram bins.", + "type": 3 + } + ], + "outputs": [ + { + "name": "out", + "description": "A 1-D `Tensor` holding histogram of values.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "HistogramSummary", + "summary": "Outputs a `Summary` protocol buffer with a histogram.", + "description": "The generated\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nhas one summary value containing a histogram for `values`.\n\nThis op reports an `InvalidArgument` error if any value is not finite.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "tag", + "description": "Scalar. Tag to use for the `Summary.Value`.", + "type": 7 + }, + { + "name": "values", + "description": "Any shape. Values to use to build the histogram.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "HostConst", + "summary": "Returns a constant tensor on the host. Only for writing C++ tests.", + "attributes": [ + { + "name": "value", + "type": "tensor", + "description": "Attr `value` is the tensor to return." + }, + { + "name": "dtype", + "type": "type" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "IFFT", + "summary": "Inverse fast Fourier transform.", + "description": "Computes the inverse 1-dimensional discrete Fourier transform over the\ninner-most dimension of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its inverse 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "IFFT2D", + "summary": "Inverse 2D fast Fourier transform.", + "description": "Computes the inverse 2-dimensional discrete Fourier transform over the\ninner-most 2 dimensions of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft2\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "IFFT3D", + "summary": "Inverse 3D fast Fourier transform.", + "description": "Computes the inverse 3-dimensional discrete Fourier transform over the\ninner-most 3 dimensions of `input`.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their inverse 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifftn with 3 dimensions.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "IFFTND", + "summary": "ND inverse fast Fourier transform.", + "description": "Computes the n-dimensional inverse discrete Fourier transform over designated\ndimensions of `input`. The designated dimensions of `input` are assumed to be\nthe result of `IFFTND`.\n\nIf fft_length[i]shape(input)[i], the input is padded with zeros. If fft_length\nis not given, the default shape(input) is used.\n\nAxes mean the dimensions to perform the transform on. Default is to perform on\nall axes.", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor. The FFT length for each dimension.", + "type": 3 + }, + { + "name": "axes", + "description": "An int32 tensor with a same shape as fft_length. Axes to perform the transform.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The designated dimensions of\n`input` are replaced with their inverse Fourier\ntransforms.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "IRFFT", + "summary": "Inverse real-valued fast Fourier transform.", + "description": "Computes the inverse 1-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most dimension of `input`.\n\nThe inner-most dimension of `input` is assumed to be the result of `RFFT`: the\n`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If\n`fft_length` is not provided, it is computed from the size of the inner-most\ndimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to\ncompute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller\nthan the corresponding dimension of `input`, the dimension is cropped. If it is\nlarger, the dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [1]. The FFT length.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A float32 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length` samples of its inverse\n 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft\n@end_compatibility", + "typeAttr": "Treal" + } + ] + }, + { + "name": "IRFFT2D", + "summary": "Inverse 2D real-valued fast Fourier transform.", + "description": "Computes the inverse 2-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 2 dimensions of `input`.\n\nThe inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 2 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT2D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [2]. The FFT length for each dimension.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A float32 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft2\n@end_compatibility", + "typeAttr": "Treal" + } + ] + }, + { + "name": "IRFFT3D", + "summary": "Inverse 3D real-valued fast Fourier transform.", + "description": "Computes the inverse 3-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 3 dimensions of `input`.\n\nThe inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 3 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT3D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [3]. The FFT length for each dimension.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A float32 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 3D real Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.irfftn with 3 dimensions.\n@end_compatibility", + "typeAttr": "Treal" + } + ] + }, + { + "name": "IRFFTND", + "summary": "ND inverse real fast Fourier transform.", + "description": "Computes the n-dimensional inverse real discrete Fourier transform over\ndesignated dimensions of `input`. The designated dimensions of `input` are\nassumed to be the result of `IRFFTND`. The inner-most dimension contains the\n`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. \n\nIf fft_length[i]shape(input)[i], the input is padded with zeros. If fft_length\nis not given, the default shape(input) is used.\n\nAxes mean the dimensions to perform the transform on. Default is to perform on\nall axes.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Tcomplex" + }, + { + "name": "fft_length", + "description": "An int32 tensor. The FFT length for each dimension.", + "type": 3 + }, + { + "name": "axes", + "description": "An int32 tensor with a same shape as fft_length. Axes to perform the transform.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The designated dimensions of\n`input` are replaced with their inverse real Fourier transforms.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfftn.\n@end_compatibility", + "typeAttr": "Treal" + } + ] + }, + { + "name": "Identity", + "category": "Control", + "summary": "Return a tensor with the same shape and contents as the input tensor or value.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "IdentityN", + "summary": "Returns a list of tensors with the same shapes and contents as the input", + "description": "tensors.\n\nThis op can be used to override the gradient for complicated functions. For\nexample, suppose y = f(x) and we wish to apply a custom function g for backprop\nsuch that dx = g(dy). In Python,\n\n```python\nwith tf.get_default_graph().gradient_override_map(\n {'IdentityN': 'OverrideGradientWithG'}):\n y, _ = identity_n([f(x), x])\n\n@tf.RegisterGradient('OverrideGradientWithG')\ndef ApplyG(op, dy, _):\n return [None, g(dy)] # Do not backprop to f(x).\n```", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "T" + } + ] + }, + { + "name": "IdentityReader", + "summary": "A Reader that outputs the queued work as both the key and value.", + "description": "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work).", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "IdentityReaderV2", + "summary": "A Reader that outputs the queued work as both the key and value.", + "description": "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work).", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 20 + } + ] + }, + { + "name": "If", + "summary": "output = cond ? then_branch(input) : else_branch(input)", + "attributes": [ + { + "name": "Tcond", + "type": "type" + }, + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "then_branch", + "type": "function", + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns." + }, + { + "name": "else_branch", + "type": "function", + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns." + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + } + ], + "inputs": [ + { + "name": "cond", + "description": " A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.", + "typeAttr": "Tcond" + }, + { + "name": "input", + "description": "A list of input tensors.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "Igamma", + "summary": "Compute the lower regularized incomplete Gamma function `P(a, x)`.", + "description": "The lower regularized incomplete Gamma function is defined as:\n\n\n\\\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\\\)\n\nwhere\n\n\\\\(gamma(a, x) = \\\\int_{0}^{x} t^{a-1} exp(-t) dt\\\\)\n\nis the lower incomplete Gamma function.\n\nNote, above `Q(a, x)` (`Igammac`) is the upper regularized complete\nGamma function.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "IgammaGradA", + "summary": "Computes the gradient of `igamma(a, x)` wrt `a`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Igammac", + "summary": "Compute the upper regularized incomplete Gamma function `Q(a, x)`.", + "description": "The upper regularized incomplete Gamma function is defined as:\n\n\\\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\\\)\n\nwhere\n\n\\\\(Gamma(a, x) = \\int_{x}^{\\infty} t^{a-1} exp(-t) dt\\\\)\n\nis the upper incomplete Gamma function.\n\nNote, above `P(a, x)` (`Igamma`) is the lower regularized complete\nGamma function.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "IgnoreErrorsDataset", + "summary": "Creates a dataset that contains the elements of `input_dataset` ignoring errors.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "log_warning", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Imag", + "summary": "Returns the imaginary part of a complex number.", + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the imaginary part of each element in `input`. All\nelements in `input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part returned by this operation.\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.imag(input) ==> [4.75, 5.75]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ] + }, + { + "name": "ImageProjectiveTransformV2", + "summary": "Applies the given transform to each of the images.", + "description": "If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps\nthe *output* point `(x, y)` to a transformed *input* point\n`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where\n`k = c0 x + c1 y + 1`. If the transformed point lays outside of the input\nimage, the output pixel is set to 0.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Input dtype. Must be one of the following: `uint8`, `int32`, `int64`, `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "interpolation", + "type": "string", + "description": "Interpolation method, \"NEAREST\" or \"BILINEAR\"." + }, + { + "name": "fill_mode", + "type": "string", + "description": "Fill mode, \"REFLECT\", \"WRAP\", or \"CONSTANT\".", + "default": "CONSTANT" + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "dtype" + }, + { + "name": "transforms", + "description": "2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3\nprojective transformation matrix, with the last entry assumed to be 1. If there\nis one row, the same transformation will be applied to all images.", + "type": 1 + }, + { + "name": "output_shape", + "description": "1-D Tensor [new_height, new_width].", + "type": 3 + } + ], + "outputs": [ + { + "name": "transformed_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ImageProjectiveTransformV3", + "summary": "Applies the given transform to each of the images.", + "description": "If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps\nthe *output* point `(x, y)` to a transformed *input* point\n`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where\n`k = c0 x + c1 y + 1`. If the transformed point lays outside of the input\nimage, the output pixel is set to fill_value.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Input dtype. Must be one of the following: `uint8`, `int32`, `int64`, `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "interpolation", + "type": "string", + "description": "Interpolation method, \"NEAREST\" or \"BILINEAR\"." + }, + { + "name": "fill_mode", + "type": "string", + "description": "Fill mode, \"REFLECT\", \"WRAP\", \"CONSTANT\", or \"NEAREST\".", + "default": "CONSTANT" + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "dtype" + }, + { + "name": "transforms", + "description": "2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3\nprojective transformation matrix, with the last entry assumed to be 1. If there\nis one row, the same transformation will be applied to all images.", + "type": 1 + }, + { + "name": "output_shape", + "description": "1-D Tensor [new_height, new_width].", + "type": 3 + }, + { + "name": "fill_value", + "description": "float, the value to be filled when fill_mode is constant\".", + "type": 1 + } + ], + "outputs": [ + { + "name": "transformed_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ImageSummary", + "summary": "Outputs a `Summary` protocol buffer with images.", + "description": "The summary has up to `max_images` summary values containing images. The\nimages are built from `tensor` which must be 4-D with shape `[batch_size,\nheight, width, channels]` and where `channels` can be:\n\n* 1: `tensor` is interpreted as Grayscale.\n* 3: `tensor` is interpreted as RGB.\n* 4: `tensor` is interpreted as RGBA.\n\nThe images have the same number of channels as the input tensor. For float\ninput, the values are normalized one image at a time to fit in the range\n`[0, 255]`. `uint8` values are unchanged. The op uses two different\nnormalization algorithms:\n\n* If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n* If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_images` is 1, the summary value tag is '*tag*/image'.\n* If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\nThe `bad_color` argument is the color to use in the generated images for\nnon-finite input values. It is a `uint8` 1-D tensor of length `channels`.\nEach element must be in the range `[0, 255]` (It represents the value of a\npixel in the output image). Non-finite values in the input tensor are\nreplaced by this tensor in the output image. The default value is the color\nred.", + "attributes": [ + { + "name": "max_images", + "type": "int64", + "description": "Max number of batch elements to generate images for.", + "minimum": 1, + "default": 3 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `float32`, `float16`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "bad_color", + "type": "tensor", + "description": "Color to use for pixels with non-finite values.", + "default": { + "type": "tensor", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "tag", + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "type": 7 + }, + { + "name": "tensor", + "description": "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "ImmutableConst", + "summary": "Returns immutable tensor from memory region.", + "description": "The current implementation memmaps the tensor from a file.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Type of the returned tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "Shape of the returned tensor." + }, + { + "name": "memory_region_name", + "type": "string", + "description": "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env." + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ImportEvent", + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "event", + "type": 7 + } + ] + }, + { + "name": "InTopK", + "summary": "Says whether the targets are in the top `K` predictions.", + "description": "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$", + "attributes": [ + { + "name": "k", + "type": "int64", + "description": "Number of top elements to look at for computing precision." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "predictions", + "description": "A `batch_size` x `classes` tensor.", + "type": 1 + }, + { + "name": "targets", + "description": "A `batch_size` vector of class ids.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "precision", + "description": "Computed Precision at `k` as a `bool Tensor`.", + "type": 10 + } + ] + }, + { + "name": "InTopKV2", + "summary": "Says whether the targets are in the top `K` predictions.", + "description": "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "predictions", + "description": "A `batch_size` x `classes` tensor.", + "type": 1 + }, + { + "name": "targets", + "description": "A `batch_size` vector of class ids.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Number of top elements to look at for computing precision.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "precision", + "description": "Computed precision at `k` as a `bool Tensor`.", + "type": 10 + } + ] + }, + { + "name": "InfeedDequeue", + "summary": "A placeholder op for a value that will be fed into the computation.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor." + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor that will be provided using the infeed mechanism.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "InfeedDequeueTuple", + "summary": "Fetches multiple values from infeed as an XLA tuple.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "The element types of each element in `outputs`.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shapes of each tensor in `outputs`." + } + ], + "outputs": [ + { + "name": "outputs", + "description": "A list of tensors that will be provided using the infeed mechanism.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "InfeedEnqueue", + "summary": "An op which feeds a single Tensor value into the computation.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "layout", + "type": "int64[]", + "description": "A vector holding the requested layout in minor-to-major sequence.\nIf a layout attribute is passed, but its values are all -1, the layout will\nbe computed by the infeed operation.", + "default": [] + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor that will be provided using the infeed mechanism.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "InfeedEnqueuePrelinearizedBuffer", + "summary": "An op which enqueues prelinearized buffer into TPU infeed.", + "attributes": [ + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op is running on a TPU device\nand = 0 when the Op is running on the CPU device.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "A variant tensor representing linearized output.", + "type": 21 + } + ] + }, + { + "name": "InfeedEnqueueTuple", + "summary": "Feeds multiple Tensor values into the computation as an XLA tuple.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "The element types of each element in `inputs`.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shapes of each tensor in `inputs`." + }, + { + "name": "layouts", + "type": "int64[]", + "description": "A vector holding the requested layout in minor-to-major sequence for\nall the tuple shapes, in the order the shapes appear in the \"shapes\" input.\nThe layout elements for a sub-shape can be set to -1, in which case the\ncorresponding layout will be computed by the infeed operation.", + "default": [] + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "default": -1 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of tensors that will be provided using the infeed mechanism.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "InitializeTable", + "summary": "Table initializer that takes two tensors for keys and values respectively.", + "attributes": [ + { + "name": "Tkey", + "type": "type" + }, + { + "name": "Tval", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to a table which will be initialized.", + "type": 7, + "isRef": true + }, + { + "name": "keys", + "description": "Keys of type Tkey.", + "typeAttr": "Tkey" + }, + { + "name": "values", + "description": "Values of type Tval.", + "typeAttr": "Tval" + } + ] + }, + { + "name": "InitializeTableFromDataset", + "inputs": [ + { + "name": "table_handle", + "type": 20 + }, + { + "name": "dataset", + "type": 21 + } + ] + }, + { + "name": "InitializeTableFromTextFile", + "summary": "Initializes a table from a text file.", + "description": "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`.", + "attributes": [ + { + "name": "key_index", + "type": "int64", + "description": "Column index in a line to get the table `key` values from.", + "minimum": -2 + }, + { + "name": "value_index", + "type": "int64", + "description": "Column index that represents information of a line to get the table\n`value` values from.", + "minimum": -2 + }, + { + "name": "vocab_size", + "type": "int64", + "description": "Number of elements of the file, use -1 if unknown.", + "minimum": -1, + "default": -1 + }, + { + "name": "delimiter", + "type": "string", + "description": "Delimiter to separate fields in a line.", + "default": "\t" + }, + { + "name": "offset", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to a table which will be initialized.", + "type": 7, + "isRef": true + }, + { + "name": "filename", + "description": "Filename of a vocabulary text file.", + "type": 7 + } + ] + }, + { + "name": "InitializeTableFromTextFileV2", + "summary": "Initializes a table from a text file.", + "description": "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`.", + "attributes": [ + { + "name": "key_index", + "type": "int64", + "description": "Column index in a line to get the table `key` values from.", + "minimum": -2 + }, + { + "name": "value_index", + "type": "int64", + "description": "Column index that represents information of a line to get the table\n`value` values from.", + "minimum": -2 + }, + { + "name": "vocab_size", + "type": "int64", + "description": "Number of elements of the file, use -1 if unknown.", + "minimum": -1, + "default": -1 + }, + { + "name": "delimiter", + "type": "string", + "description": "Delimiter to separate fields in a line.", + "default": "\t" + }, + { + "name": "offset", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to a table which will be initialized.", + "type": 20 + }, + { + "name": "filename", + "description": "Filename of a vocabulary text file.", + "type": 7 + } + ] + }, + { + "name": "InitializeTableV2", + "summary": "Table initializer that takes two tensors for keys and values respectively.", + "attributes": [ + { + "name": "Tkey", + "type": "type" + }, + { + "name": "Tval", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to a table which will be initialized.", + "type": 20 + }, + { + "name": "keys", + "description": "Keys of type Tkey.", + "typeAttr": "Tkey" + }, + { + "name": "values", + "description": "Values of type Tval.", + "typeAttr": "Tval" + } + ] + }, + { + "name": "InplaceAdd", + "summary": "Adds v into specified rows of x.", + "description": " Computes y = x; y[i, :] += v; return y.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor` of type T.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "A vector. Indices into the left-most dimension of `x`.", + "type": 3 + }, + { + "name": "v", + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "typeAttr": "T" + } + ] + }, + { + "name": "InplaceSub", + "summary": " Subtracts `v` into specified rows of `x`.\n\n Computes y = x; y[i, :] -= v; return y.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor` of type T.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "A vector. Indices into the left-most dimension of `x`.", + "type": 3 + }, + { + "name": "v", + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "typeAttr": "T" + } + ] + }, + { + "name": "InplaceUpdate", + "summary": "Updates specified rows 'i' with values 'v'.", + "description": "Computes `x[i, :] = v; return x`.\n\nOriginally this function is mutative however for compilation we make this\noperation create / operate on a copy of `x`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "description": "A tensor of type `T`.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "A vector. Indices into the left-most dimension of `x`.", + "type": 3 + }, + { + "name": "v", + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "typeAttr": "T" + } + ] + }, + { + "name": "InterleaveDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "Unlike MapDataset, the `f` in InterleaveDataset is expected to return\na Dataset variant, and InterleaveDataset will flatten successive\nresults into a single Dataset. Unlike FlatMapDataset,\nInterleaveDataset will interleave sequences of up to `block_length`\nconsecutive elements from `cycle_length` input elements.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Inv", + "summary": "Computes the reciprocal of x element-wise.", + "description": "I.e., \\\\(y = 1 / x\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "InvGrad", + "summary": "Computes the gradient for the inverse of `x` wrt its input.", + "description": "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Invert", + "summary": "Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.", + "description": "Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.\nThis operation is performed on each element of the tensor argument `x`.\n\nExample:\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\n\n# flip 2 (00000010) to -3 (11111101)\ntf.assert_equal(-3, bitwise_ops.invert(2))\n\ndtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,\n dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]\n\ninputs = [0, 5, 3, 14]\nfor dtype in dtype_list:\n # Because of issues with negative numbers, let's test this indirectly.\n # 1. invert(a) and a = 0\n # 2. invert(a) or a = invert(0)\n input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)\n not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(\n input_tensor, bitwise_ops.invert(input_tensor)),\n bitwise_ops.bitwise_or(\n input_tensor, bitwise_ops.invert(input_tensor)),\n bitwise_ops.invert(\n tf.constant(0, dtype=dtype))]\n\n expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)\n tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)\n\n expected = tf.cast([not_0] * 4, tf.float32)\n tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)\n\n # For unsigned dtypes let's also check the result directly.\n if dtype.is_unsigned:\n inverted = bitwise_ops.invert(input_tensor)\n expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)\n tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "InvertPermutation", + "summary": "Computes the inverse permutation of a tensor.", + "description": "This operation computes the inverse of an index permutation. It takes a 1-D\ninteger tensor `x`, which represents the indices of a zero-based array, and\nswaps each value with its index position. In other words, for an output tensor\n`y` and an input tensor `x`, this operation computes the following:\n\n`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\n\nThe values must include 0. There can be no duplicate values or negative values.\n\nFor example:\n\n```\n# tensor `x` is [3, 4, 0, 2, 1]\ninvert_permutation(x) ==> [2, 4, 3, 0, 1]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "1-D.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "1-D.", + "typeAttr": "T" + } + ] + }, + { + "name": "IsBoostedTreesEnsembleInitialized", + "summary": "Checks whether a tree ensemble has been initialized.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "description": "Handle to the tree ensemble resource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "is_initialized", + "description": "output boolean on whether it is initialized or not.", + "type": 10 + } + ] + }, + { + "name": "IsBoostedTreesQuantileStreamResourceInitialized", + "summary": "Checks whether a quantile stream has been initialized.", + "description": "An Op that checks if quantile stream resource is initialized.", + "inputs": [ + { + "name": "quantile_stream_resource_handle", + "description": "resource; The reference to quantile stream resource handle.", + "type": 20 + } + ], + "outputs": [ + { + "name": "is_initialized", + "description": "bool; True if the resource is initialized, False otherwise.", + "type": 10 + } + ] + }, + { + "name": "IsFinite", + "summary": "Returns which elements of x are finite.", + "description": "@compatibility(numpy)\nEquivalent to np.isfinite\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])\ntf.math.is_finite(x) ==> [True, True, True, False, False]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ] + }, + { + "name": "IsInf", + "summary": "Returns which elements of x are Inf.", + "description": "@compatibility(numpy)\nEquivalent to np.isinf\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, np.inf, 6.8, np.inf])\ntf.math.is_inf(x) ==> [False, True, False, True]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ] + }, + { + "name": "IsNan", + "summary": "Returns which elements of x are NaN.", + "description": "@compatibility(numpy)\nEquivalent to np.isnan\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])\ntf.math.is_nan(x) ==> [False, True, False, True, False]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ] + }, + { + "name": "IsTPUEmbeddingInitialized", + "summary": "Whether TPU Embedding is initialized in a distributed TPU system.", + "attributes": [ + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "is_tpu_embedding_initialized", + "type": 10 + } + ] + }, + { + "name": "IsVariableInitialized", + "summary": "Checks whether a tensor has been initialized.", + "description": "Outputs boolean scalar indicating whether the tensor has been initialized.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the variable tensor." + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node. May be uninitialized.", + "typeAttr": "dtype", + "isRef": true + } + ], + "outputs": [ + { + "name": "is_initialized", + "type": 10 + } + ] + }, + { + "name": "IsotonicRegression", + "summary": "Solves a batch of isotonic regression problems.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "output_dtype", + "type": "type", + "description": "Dtype of output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A (batch_size, dim)-tensor holding a batch of inputs.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A (batch_size, dim)-tensor holding the per-batch element solutions.", + "typeAttr": "output_dtype" + }, + { + "name": "segments", + "description": "An int32 (batch_size, dim)-tensor with the segments.", + "type": 3 + } + ] + }, + { + "name": "Iterator", + "summary": "A container for an iterator resource.", + "attributes": [ + { + "name": "shared_name", + "type": "string" + }, + { + "name": "container", + "type": "string" + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to the iterator that can be passed to a \"MakeIterator\"\nor \"IteratorGetNext\" op.", + "type": 20 + } + ] + }, + { + "name": "IteratorFromStringHandle", + "summary": "Converts the given string representing a handle to an iterator to a resource.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "description": "If specified, defines the type of each tuple component in an\nelement produced by the resulting iterator.", + "minimum": 0, + "default": [] + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "If specified, defines the shape of each tuple component in an\nelement produced by the resulting iterator.", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "string_handle", + "description": "A string representation of the given handle.", + "type": 7 + } + ], + "outputs": [ + { + "name": "resource_handle", + "description": "A handle to an iterator resource.", + "type": 20 + } + ] + }, + { + "name": "IteratorFromStringHandleV2", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 0, + "default": [] + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "string_handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "resource_handle", + "type": 20 + } + ] + }, + { + "name": "IteratorGetDevice", + "summary": "Returns the name of the device on which `resource` has been placed.", + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "device", + "type": 7 + } + ] + }, + { + "name": "IteratorGetNext", + "summary": "Gets the next output from the given iterator .", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "IteratorGetNextAsOptional", + "summary": "Gets the next output from the given iterator as an Optional variant.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "optional", + "type": 21 + } + ] + }, + { + "name": "IteratorGetNextSync", + "summary": "Gets the next output from the given iterator.", + "description": "This operation is a synchronous version IteratorGetNext. It should only be used\nin situations where the iterator does not block the calling thread, or where\nthe calling thread is not a member of the thread pool used to execute parallel\noperations (e.g. in eager mode).", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "IteratorToStringHandle", + "summary": "Converts the given `resource_handle` representing an iterator to a string.", + "inputs": [ + { + "name": "resource_handle", + "description": "A handle to an iterator resource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "string_handle", + "description": "A string representation of the given handle.", + "type": 7 + } + ] + }, + { + "name": "IteratorV2", + "attributes": [ + { + "name": "shared_name", + "type": "string" + }, + { + "name": "container", + "type": "string" + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "KMC2ChainInitialization", + "summary": "Returns the index of a data point that should be added to the seed set.", + "description": "Entries in distances are assumed to be squared distances of candidate points to\nthe already sampled centers in the seed set. The op constructs one Markov chain\nof the k-MC^2 algorithm and returns the index of one candidate point to be added\nas an additional cluster center.", + "inputs": [ + { + "name": "distances", + "description": "Vector with squared distances to the closest previously sampled cluster center\nfor each candidate point.", + "type": 1 + }, + { + "name": "seed", + "description": "Scalar. Seed for initializing the random number generator.", + "type": 9 + } + ], + "outputs": [ + { + "name": "index", + "description": "Scalar with the index of the sampled point.", + "type": 9 + } + ] + }, + { + "name": "KmeansPlusPlusInitialization", + "summary": "Selects num_to_sample rows of input using the KMeans++ criterion.", + "description": "Rows of points are assumed to be input points. One row is selected at random.\nSubsequent rows are sampled with probability proportional to the squared L2\ndistance from the nearest row selected thus far till num_to_sample rows have\nbeen sampled.", + "inputs": [ + { + "name": "points", + "description": "Matrix of shape (n, d). Rows are assumed to be input points.", + "type": 1 + }, + { + "name": "num_to_sample", + "description": "Scalar. The number of rows to sample. This value must not be larger than n.", + "type": 9 + }, + { + "name": "seed", + "description": "Scalar. Seed for initializing the random number generator.", + "type": 9 + }, + { + "name": "num_retries_per_sample", + "description": "Scalar. For each row that is sampled, this parameter\nspecifies the number of additional points to draw from the current\ndistribution before selecting the best. If a negative value is specified, a\nheuristic is used to sample O(log(num_to_sample)) additional points.", + "type": 9 + } + ], + "outputs": [ + { + "name": "samples", + "description": "Matrix of shape (num_to_sample, d). The sampled rows.", + "type": 1 + } + ] + }, + { + "name": "KthOrderStatistic", + "summary": "Computes the Kth order statistic of a data set. The current", + "description": "implementation uses a binary search requiring exactly 32 passes over\nthe input data. The running time is linear with respect to input\nsize. The median-of-medians algorithm is probably faster, but is\ndifficult to implement efficiently in XLA. The implementation imposes\na total ordering on floats. The ordering is consistent with the usual\npartial order. Positive NaNs are greater than positive\ninfinity. Negative NaNs are less than negative infinity. NaNs with\ndistinct payloads are treated as distinct. Subnormal numbers are\npreserved (not flushed to zero). Positive infinity is greater than all\nnumbers. Negative infinity is less than all numbers. Positive is\ngreater than negative zero. There are less than k values greater than\nthe kth order statistic. There are at least k values greater than or\nequal to the Kth order statistic. The semantics are not the same as\ntop_k_unique.", + "attributes": [ + { + "name": "k", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "L2Loss", + "summary": "L2 Loss.", + "description": "Computes half the L2 norm of a tensor without the `sqrt`:\n\n output = sum(t ** 2) / 2", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "t", + "description": "Typically 2-D, but may have any dimensions.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "0-D.", + "typeAttr": "T" + } + ] + }, + { + "name": "LMDBDataset", + "summary": "Creates a dataset that emits the key-value pairs in one or more LMDB files.", + "description": "The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary\nkey-value database. This dataset can read the contents of LMDB database files,\nthe names of which generally have the `.mdb` suffix.\n\nEach output element consists of a key-value pair represented as a pair of\nscalar string `Tensor`s, where the first `Tensor` contains the key and the\nsecond `Tensor` contains the value.\n\nLMDB uses different file formats on big- and little-endian machines.\n`LMDBDataset` can only read files in the format of the host machine.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filenames", + "description": "A scalar or a vector containing the name(s) of the binary file(s) to be\nread.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "LMDBReader", + "summary": "A Reader that outputs the records from a LMDB file.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "LRN", + "category": "Normalization", + "summary": "Local Response Normalization.", + "description": "The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last\ndimension), and each vector is normalized independently. Within a given vector,\neach component is divided by the weighted, squared sum of inputs within\n`depth_radius`. In detail,\n\n sqr_sum[a, b, c, d] =\n sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)\n output = input / (bias + alpha * sqr_sum) ** beta\n\nFor details, see [Krizhevsky et al., ImageNet classification with deep\nconvolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).", + "attributes": [ + { + "name": "depth_radius", + "type": "int64", + "description": "0-D. Half-width of the 1-D normalization window.", + "default": 5 + }, + { + "name": "bias", + "type": "float32", + "description": "An offset (usually positive to avoid dividing by 0).", + "default": 1.0 + }, + { + "name": "alpha", + "type": "float32", + "description": "A scale factor, usually positive.", + "default": 1.0 + }, + { + "name": "beta", + "type": "float32", + "description": "An exponent.", + "default": 0.5 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "LRNGrad", + "summary": "Gradients for Local Response Normalization.", + "attributes": [ + { + "name": "depth_radius", + "type": "int64", + "description": "A depth radius.", + "default": 5 + }, + { + "name": "bias", + "type": "float32", + "description": "An offset (usually > 0 to avoid dividing by 0).", + "default": 1.0 + }, + { + "name": "alpha", + "type": "float32", + "description": "A scale factor, usually positive.", + "default": 1.0 + }, + { + "name": "beta", + "type": "float32", + "description": "An exponent.", + "default": 0.5 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input_grads", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "input_image", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "output_image", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The gradients for LRN.", + "typeAttr": "T" + } + ] + }, + { + "name": "LSTMBlockCell", + "category": "Layer", + "summary": "Computes the LSTM cell forward propagation for 1 time step.", + "description": "This implementation uses 1 weight matrix and 1 bias vector, and there's an\noptional peephole connection.\n\nThis kernel op implements the following mathematical equations:\n\n```python\nxh = [x, h_prev]\n[i, f, ci, o] = xh * w + b\nf = f + forget_bias\n\nif not use_peephole:\n wci = wcf = wco = 0\n\ni = sigmoid(cs_prev * wci + i)\nf = sigmoid(cs_prev * wcf + f)\nci = tanh(ci)\n\ncs = ci .* i + cs_prev .* f\ncs = clip(cs, cell_clip)\n\no = sigmoid(cs * wco + o)\nco = tanh(cs)\nh = co .* o\n```", + "attributes": [ + { + "name": "forget_bias", + "type": "float32", + "description": "The forget gate bias.", + "default": 1.0 + }, + { + "name": "cell_clip", + "type": "float32", + "description": "Value to clip the 'cs' value to.", + "default": 3.0 + }, + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether to use peephole weights.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "x", + "description": "The input to the LSTM cell, shape (batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "Value of the cell state at previous time step.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "Output of the previous cell at previous time step.", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "i", + "description": "The input gate.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh.", + "typeAttr": "T" + }, + { + "name": "h", + "description": "The output h vector.", + "typeAttr": "T" + } + ] + }, + { + "name": "LSTMBlockCellGrad", + "summary": "Computes the LSTM cell backward propagation for 1 timestep.", + "description": "This implementation is to be used in conjunction of LSTMBlockCell.", + "attributes": [ + { + "name": "use_peephole", + "type": "boolean", + "description": "Whether the cell uses peephole connections." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`." + } + ], + "inputs": [ + { + "name": "x", + "description": "The input to the LSTM cell, shape (batch_size, num_inputs).", + "typeAttr": "T" + }, + { + "name": "cs_prev", + "description": "The previous cell state.", + "typeAttr": "T" + }, + { + "name": "h_prev", + "description": "The previous h state.", + "typeAttr": "T" + }, + { + "name": "w", + "description": "The weight matrix.", + "typeAttr": "T" + }, + { + "name": "wci", + "description": "The weight matrix for input gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wcf", + "description": "The weight matrix for forget gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "wco", + "description": "The weight matrix for output gate peephole connection.", + "typeAttr": "T" + }, + { + "name": "b", + "description": "The bias vector.", + "typeAttr": "T" + }, + { + "name": "i", + "description": "The input gate.", + "typeAttr": "T" + }, + { + "name": "cs", + "description": "The cell state before the tanh.", + "typeAttr": "T" + }, + { + "name": "f", + "description": "The forget gate.", + "typeAttr": "T" + }, + { + "name": "o", + "description": "The output gate.", + "typeAttr": "T" + }, + { + "name": "ci", + "description": "The cell input.", + "typeAttr": "T" + }, + { + "name": "co", + "description": "The cell after the tanh.", + "typeAttr": "T" + }, + { + "name": "cs_grad", + "description": "The current gradient of cs.", + "typeAttr": "T" + }, + { + "name": "h_grad", + "description": "The gradient of h vector.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "cs_prev_grad", + "description": "The gradient of cs to be back-propped.", + "typeAttr": "T" + }, + { + "name": "dicfo", + "description": "The derivative wrt to [i, cs, f, o].", + "typeAttr": "T" + }, + { + "name": "wci_grad", + "description": "The gradient for wci to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wcf_grad", + "description": "The gradient for wcf to be back-propped.", + "typeAttr": "T" + }, + { + "name": "wco_grad", + "description": "The gradient for wco to be back-propped.", + "typeAttr": "T" + } + ] + }, + { + "name": "LatencyStatsDataset", + "summary": "Records the latency of producing `input_dataset` elements in a StatsAggregator.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "LeakyRelu", + "category": "Activation", + "summary": "Computes rectified linear: `max(features, features * alpha)`.", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "default": 0.20000000298023224 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "LeakyReluGrad", + "summary": "Computes rectified linear gradients for a LeakyRelu operation.", + "attributes": [ + { + "name": "alpha", + "type": "float32", + "default": 0.20000000298023224 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding LeakyRelu operation.", + "typeAttr": "T" + }, + { + "name": "features", + "description": "The features passed as input to the corresponding LeakyRelu operation,\nOR the outputs of that operation (both work equivalently).", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "`gradients * (features > 0) + alpha * gradients * (features <= 0)`.", + "typeAttr": "T" + } + ] + }, + { + "name": "LearnedUnigramCandidateSampler", + "summary": "Generates labels for candidate sampling with a learned unigram distribution.", + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to randomly sample.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "range_max", + "type": "int64", + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "LeftShift", + "summary": "Elementwise computes the bitwise left-shift of `x` and `y`.", + "description": "If `y` is negative, or greater than or equal to the width of `x` in bits the\nresult is implementation defined.\n\nExample:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\nimport numpy as np\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n\n left_shift_result = bitwise_ops.left_shift(lhs, rhs)\n\n print(left_shift_result)\n\n# This will print:\n# tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64)\n\nlhs = np.array([-2, 64, 101, 32], dtype=np.int8)\nrhs = np.array([-1, -5, -3, -14], dtype=np.int8)\nbitwise_ops.left_shift(lhs, rhs)\n# \n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "LegacyParallelInterleaveDatasetV2", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "deterministic", + "type": "string", + "default": "default" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + }, + { + "name": "buffer_output_elements", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Less", + "summary": "Returns the truth value of (x < y) element-wise.", + "description": "*NOTE*: `Less` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.less(x, y) ==> [False, True, False]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 6, 7])\ntf.math.less(x, y) ==> [False, True, True]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "LessEqual", + "summary": "Returns the truth value of (x <= y) element-wise.", + "description": "*NOTE*: `LessEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.less_equal(x, y) ==> [True, True, False]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 6, 6])\ntf.math.less_equal(x, y) ==> [True, True, True]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "Lgamma", + "summary": "Computes the log of the absolute value of `Gamma(x)` element-wise.", + "description": " For positive numbers, this function computes log((input - 1)!) for every element in the tensor.\n `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])\ntf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "LinSpace", + "summary": "Generates values in an interval.", + "description": "A sequence of `num` evenly-spaced values are generated beginning at `start`.\nIf `num > 1`, the values in the sequence increase by\n`(stop - start) / (num - 1)`, so that the last one is exactly `stop`.\n\nFor example:\n\n```\ntf.linspace(10.0, 12.0, 3, name=\"linspace\") => [ 10.0 11.0 12.0]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "start", + "description": "0-D tensor. First entry in the range.", + "typeAttr": "T" + }, + { + "name": "stop", + "description": "0-D tensor. Last entry in the range.", + "typeAttr": "T" + }, + { + "name": "num", + "description": "0-D tensor. Number of values to generate.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D. The generated values.", + "typeAttr": "T" + } + ] + }, + { + "name": "ListDataset", + "summary": "Creates a dataset that emits each of `tensors` once.", + "attributes": [ + { + "name": "Tinput_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "tensors", + "typeListAttr": "Tinput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ListDiff", + "summary": "Computes the difference between two lists of numbers or strings.", + "description": "Given a list `x` and a list `y`, this operation returns a list `out` that\nrepresents all values that are in `x` but not in `y`. The returned list `out`\nis sorted in the same order that the numbers appear in `x` (duplicates are\npreserved). This operation also returns a list `idx` that represents the\nposition of each `out` element in `x`. In other words:\n\n`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`\n\nFor example, given this input:\n\n```\nx = [1, 2, 3, 4, 5, 6]\ny = [1, 3, 5]\n```\n\nThis operation would return:\n\n```\nout ==> [2, 4, 6]\nidx ==> [1, 3, 5]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_idx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "1-D. Values to keep.", + "typeAttr": "T" + }, + { + "name": "y", + "description": "1-D. Values to remove.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "1-D. Values present in `x` but not in `y`.", + "typeAttr": "T" + }, + { + "name": "idx", + "description": "1-D. Positions of `x` values preserved in `out`.", + "typeAttr": "out_idx" + } + ] + }, + { + "name": "ListSnapshotChunksDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "snapshot_path", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "LoadAllTPUEmbeddingParameters", + "summary": "An op that loads optimization parameters into embedding memory.", + "description": "An op that loads optimization parameters into embedding memory. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding\ntable configuration. For example, this op is used to install parameters that are\nloaded from a checkpoint before a training loop is executed. For Adagrad,\nauxiliary1 should be the accumulators. For SGD, all of the auxiliary* values\nshould be empty. For FTRL, auxiliary1 should be the accumulators and auxiliary2\nshould be the linear terms. For ADAM, auxiliary1 should be the momenta and\nauxiliary2 should be the velocities.", + "attributes": [ + { + "name": "NumTables", + "type": "int64", + "description": "The number of embedding tables.", + "minimum": 1 + }, + { + "name": "config", + "type": "string", + "description": "An TPUEmbeddingConfiguration proto describing the\ntable parameters being loaded, serialized to a string." + }, + { + "name": "num_shards", + "type": "int64", + "description": "Number of shards into which the embedding tables are divided." + }, + { + "name": "shard_id", + "type": "int64", + "description": "Identifier of shard for this operation." + } + ], + "inputs": [ + { + "name": "parameters", + "description": "A list of tensors, one for each embedding table,\ncontaining the initial embedding table parameters to use in embedding\nlookups.", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary1", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the first auxiliary optimization parameter to use in embedding\ntraining loop updates. The shape of each entry is ignored (and thus can be\nempty) for those tables whose optimization algorithms do not have at least one\nauxiliary parameter.", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary2", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the second auxiliary optimization parameter to use in\nembedding training loop updates. The shape of each entry is ignored (and thus\ncan be empty) for those tables whose optimization algorithms do not have at\nleast two auxiliary", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary3", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the third auxiliary optimization parameter to use in embedding\ntraining loop updates. The shape of each entry is ignored (and thus can be\nempty) for those tables whose optimization algorithms do not have three\nauxiliary parameters.", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary4", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the second auxiliary optimization parameter to use in\nembedding training loop updates. The shape of each entry is ignored (and thus\ncan be empty) for those tables whose optimization algorithms do not have at\nleast four auxiliary", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary5", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the third auxiliary optimization parameter to use in embedding\ntraining loop updates. The shape of each entry is ignored (and thus can be\nempty) for those tables whose optimization algorithms do not have five\nauxiliary parameters.", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary6", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the second auxiliary optimization parameter to use in\nembedding training loop updates. The shape of each entry is ignored (and thus\ncan be empty) for those tables whose optimization algorithms do not have at\nleast six auxiliary", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary7", + "description": "A list of tensors, one for each embedding table, containing the\ninitial values of the third auxiliary optimization parameter to use in embedding\ntraining loop updates. The shape of each entry is ignored (and thus can be\nempty) for those tables whose optimization algorithms do not have sevan\nauxiliary parameters.", + "numberAttr": "NumTables", + "type": 1 + } + ] + }, + { + "name": "LoadAndRemapMatrix", + "summary": "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint", + "description": "at `ckpt_path` and potentially reorders its rows and columns using the\nspecified remappings.\n\nMost users should use one of the wrapper initializers (such as\n`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this\nfunction directly.\n\nThe remappings are 1-D tensors with the following properties:\n\n* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output\n matrix will be initialized from the row corresponding to index\n `row_remapping[i]` in the old `Tensor` from the checkpoint.\n* `col_remapping` must have either 0 entries (indicating that no column\n reordering is needed) or `num_cols` entries. If specified, column `j` of the\n output matrix will be initialized from the column corresponding to index\n `col_remapping[j]` in the old `Tensor` from the checkpoint.\n* A value of -1 in either of the remappings signifies a \"missing\" entry. In that\n case, values from the `initializing_values` tensor will be used to fill that\n missing row or column. If `row_remapping` has `r` missing entries and\n `col_remapping` has `c` missing entries, then the following condition must be\n true:\n\n`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`\n\nThe remapping tensors can be generated using the GenerateVocabRemapping op.\n\nAs an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],\ninitializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing\nthe value from row i, column j of the old tensor in the checkpoint, the output\nmatrix will look like the following:\n\n[[w(1, 0), w(1, 2), 0.5],\n [w(0, 0), w(0, 2), -0.5],\n [0.25, -0.25, 42]]", + "attributes": [ + { + "name": "num_rows", + "type": "int64", + "description": "Number of rows (length of the 1st dimension) in the output matrix.", + "minimum": 0 + }, + { + "name": "num_cols", + "type": "int64", + "description": "Number of columns (length of the 2nd dimension) in the output matrix.", + "minimum": 1 + }, + { + "name": "max_rows_in_memory", + "type": "int64", + "description": "The maximum number of rows to load from the checkpoint at\nonce. If less than or equal to 0, the entire matrix will be loaded into\nmemory. Setting this arg trades increased disk reads for lower memory usage.", + "default": -1 + } + ], + "inputs": [ + { + "name": "ckpt_path", + "description": "Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from\nwhich the old matrix `Tensor` will be loaded.", + "type": 7 + }, + { + "name": "old_tensor_name", + "description": "Name of the 2-D `Tensor` to load from checkpoint.", + "type": 7 + }, + { + "name": "row_remapping", + "description": "An int `Tensor` of row remappings (generally created by\n`generate_vocab_remapping`). Even if no row remapping is needed, this must\nstill be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted\nindex-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).", + "type": 9 + }, + { + "name": "col_remapping", + "description": "An int `Tensor` of column remappings (generally created by\n`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping\nis to be done (e.g. column ordering is the same).", + "type": 9 + }, + { + "name": "initializing_values", + "description": "A float `Tensor` containing values to fill in for cells\nin the output matrix that are not loaded from the checkpoint. Length must be\nexactly the same as the number of missing / new cells.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output_matrix", + "description": "Output matrix containing existing values loaded from the\ncheckpoint, and with any missing values filled in from initializing_values.", + "type": 1 + } + ] + }, + { + "name": "LoadDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "compression", + "type": "string", + "default": "" + }, + { + "name": "reader_func", + "type": "function" + }, + { + "name": "Treader_func_args", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "path", + "type": 7 + }, + { + "name": "reader_func_other_args", + "typeListAttr": "Treader_func_args" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "LoadTPUEmbeddingADAMParameters", + "summary": "Load ADAM embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the ADAM optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Value of momenta used in the ADAM optimization algorithm.", + "type": 1 + }, + { + "name": "velocities", + "description": "Value of velocities used in the ADAM optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingAdadeltaParameters", + "summary": "Load Adadelta embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the Adadelta optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the Adadelta optimization algorithm.", + "type": 1 + }, + { + "name": "updates", + "description": "Value of updates used in the Adadelta optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingAdagradMomentumParameters", + "summary": "Load Adagrad Momentum embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the Adagrad Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the Adagrad Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Value of momenta used in the Adagrad Momentum optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingAdagradParameters", + "summary": "Load Adagrad embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the Adagrad optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the Adagrad optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingCenteredRMSPropParameters", + "summary": "Load centered RMSProp embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "ms", + "description": "Value of ms used in the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mom", + "description": "Value of mom used in the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mg", + "description": "Value of mg used in the centered RMSProp optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingFTRLParameters", + "summary": "Load FTRL embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the FTRL optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the FTRL optimization algorithm.", + "type": 1 + }, + { + "name": "linears", + "description": "Value of linears used in the FTRL optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingFrequencyEstimatorParameters", + "summary": "Load frequency estimator embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the frequency estimator optimization algorithm.", + "type": 1 + }, + { + "name": "last_hit_step", + "description": "Value of last_hit_step used in the frequency estimator optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingMDLAdagradLightParameters", + "summary": "Load MDL Adagrad Light embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "weights", + "description": "Value of weights used in the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "benefits", + "description": "Value of benefits used in the MDL Adagrad Light optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingMomentumParameters", + "summary": "Load Momentum embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Value of momenta used in the Momentum optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingProximalAdagradParameters", + "summary": "Load proximal Adagrad embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the proximal Adagrad optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Value of accumulators used in the proximal Adagrad optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingProximalYogiParameters", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingRMSPropParameters", + "summary": "Load RMSProp embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "ms", + "description": "Value of ms used in the RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mom", + "description": "Value of mom used in the RMSProp optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "LoadTPUEmbeddingStochasticGradientDescentParameters", + "summary": "Load SGD embedding parameters.", + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "parameters", + "description": "Value of parameters used in the stochastic gradient descent optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "Log", + "summary": "Computes natural logarithm of x element-wise.", + "description": "I.e., \\\\(y = \\log_e x\\\\).\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 5])\ntf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Log1p", + "summary": "Computes natural logarithm of (1 + x) element-wise.", + "description": "I.e., \\\\(y = \\log_e (1 + x)\\\\).\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 5])\ntf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "LogMatrixDeterminant", + "summary": "Computes the sign and the log of the absolute value of the determinant of", + "description": "one or more square matrices.\n\nThe input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions\nform square matrices. The outputs are two tensors containing the signs and\nabsolute values of the log determinants for all N input submatrices\n`[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`.\nThe `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU`\nis the `LU` decomposition of the input and `P` is the corresponding\npermutation matrix.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[N, M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sign", + "description": "The signs of the log determinants of the inputs. Shape is `[N]`.", + "typeAttr": "T" + }, + { + "name": "log_abs_determinant", + "description": "The logs of the absolute values of the determinants\nof the N input matrices. Shape is `[N]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "LogSoftmax", + "summary": "Computes log softmax activations.", + "description": "For each batch `i` and class `j` we have\n\n logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "logits", + "description": "2-D with shape `[batch_size, num_classes]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "logsoftmax", + "description": "Same shape as `logits`.", + "typeAttr": "T" + } + ] + }, + { + "name": "LogUniformCandidateSampler", + "summary": "Generates labels for candidate sampling with a log-uniform distribution.", + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to randomly sample.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "range_max", + "type": "int64", + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "LogicalAnd", + "summary": "Returns the truth value of x AND y element-wise.", + "description": "*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "LogicalNot", + "summary": "Returns the truth value of `NOT x` element-wise.", + "inputs": [ + { + "name": "x", + "description": "A `Tensor` of type `bool`.", + "type": 10 + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.", + "type": 10 + } + ] + }, + { + "name": "LogicalOr", + "summary": "Returns the truth value of x OR y element-wise.", + "description": "*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "LookupTableExport", + "summary": "Outputs all keys and values in the table.", + "attributes": [ + { + "name": "Tkeys", + "type": "type" + }, + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "keys", + "description": "Vector of all keys present in the table.", + "typeAttr": "Tkeys" + }, + { + "name": "values", + "description": "Tensor of all values in the table. Indexed in parallel with `keys`.", + "typeAttr": "Tvalues" + } + ] + }, + { + "name": "LookupTableExportV2", + "summary": "Outputs all keys and values in the table.", + "attributes": [ + { + "name": "Tkeys", + "type": "type" + }, + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + } + ], + "outputs": [ + { + "name": "keys", + "description": "Vector of all keys present in the table.", + "typeAttr": "Tkeys" + }, + { + "name": "values", + "description": "Tensor of all values in the table. Indexed in parallel with `keys`.", + "typeAttr": "Tvalues" + } + ] + }, + { + "name": "LookupTableFind", + "summary": "Looks up keys in a table, outputs the corresponding values.", + "description": "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 7, + "isRef": true + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "default_value", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "name": "values", + "description": "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableFindV2", + "summary": "Looks up keys in a table, outputs the corresponding values.", + "description": "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "default_value", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "name": "values", + "description": "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableImport", + "summary": "Replaces the contents of the table with the specified keys and values.", + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 7, + "isRef": true + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "values", + "description": "Values to associate with keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableImportV2", + "summary": "Replaces the contents of the table with the specified keys and values.", + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "values", + "description": "Values to associate with keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableInsert", + "summary": "Updates the table to associates keys with values.", + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 7, + "isRef": true + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "values", + "description": "Values to associate with keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableInsertV2", + "summary": "Updates the table to associates keys with values.", + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + }, + { + "name": "keys", + "description": "Any shape. Keys to look up.", + "typeAttr": "Tin" + }, + { + "name": "values", + "description": "Values to associate with keys.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "LookupTableRemoveV2", + "summary": "Removes keys and its associated values from a table.", + "description": "The tensor `keys` must of the same type as the keys of the table. Keys not\nalready in the table are silently ignored.", + "attributes": [ + { + "name": "Tin", + "type": "type" + } + ], + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + }, + { + "name": "keys", + "description": "Any shape. Keys of the elements to remove.", + "typeAttr": "Tin" + } + ] + }, + { + "name": "LookupTableSize", + "summary": "Computes the number of elements in the given table.", + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "size", + "description": "Scalar that contains number of elements in the table.", + "type": 9 + } + ] + }, + { + "name": "LookupTableSizeV2", + "summary": "Computes the number of elements in the given table.", + "inputs": [ + { + "name": "table_handle", + "description": "Handle to the table.", + "type": 20 + } + ], + "outputs": [ + { + "name": "size", + "description": "Scalar that contains number of elements in the table.", + "type": 9 + } + ] + }, + { + "name": "LoopCond", + "summary": "Forwards the input to the output.", + "description": "This operator represents the loop termination condition used by the\n\"pivot\" switches of a loop.", + "inputs": [ + { + "name": "input", + "description": "A boolean scalar, representing the branch predicate of the Switch op.", + "type": 10 + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `input`.", + "type": 10 + } + ] + }, + { + "name": "LowerBound", + "summary": "Applies lower_bound(sorted_search_values, values) along each row.", + "description": "Each set of rows with the same index in (sorted_inputs, values) is treated\nindependently. The resulting row is the equivalent of calling\n`np.searchsorted(sorted_inputs, values, side='left')`.\n\nThe result is not a global index to the entire\n`Tensor`, but rather just the index in the last dimension.\n\nA 2-D example:\n sorted_sequence = [[0, 3, 9, 9, 10],\n [1, 2, 3, 4, 5]]\n values = [[2, 4, 9],\n [0, 2, 6]]\n\n result = LowerBound(sorted_sequence, values)\n\n result == [[1, 2, 2],\n [0, 1, 5]]", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "sorted_inputs", + "description": "2-D Tensor where each row is ordered.", + "typeAttr": "T" + }, + { + "name": "values", + "description": "2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\nthe values that will be searched for in `sorted_search_values`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the same shape as `values`. It contains the first scalar index\ninto the last dimension where values can be inserted without changing the\nordered property.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "Lu", + "summary": "Computes the LU decomposition of one or more square matrices.", + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices.\n\nThe input has to be invertible.\n\nThe output consists of two tensors LU and P containing the LU decomposition\nof all input submatrices `[..., :, :]`. LU encodes the lower triangular and\nupper triangular factors.\n\nFor each input submatrix of shape `[M, M]`, L is a lower triangular matrix of\nshape `[M, M]` with unit diagonal whose entries correspond to the strictly lower\ntriangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose\nentries correspond to the upper triangular part, including the diagonal, of LU.\n\nP represents a permutation matrix encoded as a list of indices each between `0`\nand `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to\nP, then the L, U and P satisfies P_mat * input = L * U.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + }, + { + "name": "output_idx_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of\nsize `[M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "lu", + "description": "A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the\nlower triangular factor `L` with unit diagonal, and whose upper triangular part\ndenotes the upper triangular factor `U`.", + "typeAttr": "T" + }, + { + "name": "p", + "description": "Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is\n`[..., M]`.\n@compatibility(scipy)\nSimilar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are\npacked into a single tensor, the permutation is applied to `input` instead of\nthe right hand side and the permutation `P` is returned as a list of indices\ninstead of a permutation matrix.\n@end_compatibility", + "typeAttr": "output_idx_type" + } + ] + }, + { + "name": "MakeIterator", + "summary": "Makes a new iterator from the given `dataset` and stores it in `iterator`.", + "description": "This operation may be executed multiple times. Each execution will reset the\niterator in `iterator` to the first element of `dataset`.", + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "iterator", + "type": 20 + } + ] + }, + { + "name": "MakeUnique", + "summary": "Make all elements in the non-Batch dimension unique, but \\\"close\\\" to", + "description": "their initial value. Never returns a sub-normal number. Never returns\nzero. The sign of each input element is always identical to the sign\nof the corresponding output element. Behavior for infinite elements is\nundefined. Behavior for subnormal elements is undefined.", + "inputs": [ + { + "name": "input", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "MapAndBatchDataset", + "summary": "Creates a dataset that fuses mapping with batching.", + "description": "Creates a dataset that applies `f` to the outputs of `input_dataset` and then\nbatches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function to apply to the outputs of `input_dataset`." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when building a closure\nfor `f`.", + "typeListAttr": "Targuments" + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "type": 9 + }, + { + "name": "num_parallel_calls", + "description": "A scalar representing the maximum number of parallel invocations of the `map_fn`\nfunction. Applying the `map_fn` on consecutive input elements in parallel has\nthe potential to improve input pipeline throughput.", + "type": 9 + }, + { + "name": "drop_remainder", + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "MapClear", + "summary": "Op removes all elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ] + }, + { + "name": "MapDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_inter_op_parallelism", + "type": "boolean", + "default": true + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "MapDefun", + "summary": " Maps a function on the list of tensors unpacked from arguments on dimension 0.\n The function given by `f` is assumed to be stateless, and is executed\n concurrently on all the slices; up to batch_size (i.e. the size of the 0th\n dimension of each argument) functions will be scheduled at once.\n\n The `max_intra_op_parallelism` attr, which defaults to 1, can be used to\n limit the intra op parallelism. To limit inter-op parallelism, a user can\n set a private threadpool on the dataset using `tf.data.Options`'s\n `ThreadingOptions`.\n\n Note that this op is not exposed to users directly, but is invoked in tf.data\n rewrites.", + "attributes": [ + { + "name": "Targuments", + "type": "type[]", + "description": "A list of types.", + "minimum": 1 + }, + { + "name": "Tcaptured", + "type": "type[]", + "description": "A list of types.", + "minimum": 0, + "default": [] + }, + { + "name": "output_types", + "type": "type[]", + "description": "A list of types.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "A list of shapes.", + "minimum": 1 + }, + { + "name": "f", + "type": "function" + }, + { + "name": "max_intra_op_parallelism", + "type": "int64", + "default": 1 + } + ], + "inputs": [ + { + "name": "arguments", + "description": " A list of tensors whose types are `Targuments`, corresponding to the inputs\n the function should be mapped over.", + "typeListAttr": "Targuments" + }, + { + "name": "captured_inputs", + "description": " A list of tensors whose types are `Tcaptured`, corresponding to the captured\n inputs of the defun.", + "typeListAttr": "Tcaptured" + } + ], + "outputs": [ + { + "name": "output", + "description": " A list of output tensors whose types are `output_types` and whose dimensions\n 0 are the same as the dimensions 0 of the tensors in `arguments`, and whose\n remaining dimensions correspond to those in `output_shapes`.", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "MapIncompleteSize", + "summary": "Op returns the number of incomplete elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "MapPeek", + "summary": "Op peeks at the values at the specified key. If the", + "description": "underlying container does not contain this key\nthis op will block until it does.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "MapSize", + "summary": "Op returns the number of elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "MapStage", + "summary": "Stage (key, values) in the underlying container which behaves like a hashtable.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "fake_dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "It is necessary to match this name to the matching Unstage Op.", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "description": "int64", + "type": 9 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "values", + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "typeListAttr": "fake_dtypes" + } + ] + }, + { + "name": "MapUnstage", + "summary": "Op removes and returns the values associated with the key", + "description": "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "MapUnstageNoKey", + "summary": "Op removes and returns a random (key, value)", + "description": "from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "MatMul", + "summary": "Multiply the matrix \"a\" by the matrix \"b\".", + "description": "The inputs must be two-dimensional matrices and the inner dimension of\n\"a\" (after being transposed if transpose_a is true) must match the\nouter dimension of \"b\" (after being transposed if transposed_b is\ntrue).\n\n*Note*: The default kernel implementation for MatMul on GPUs uses\ncublas.", + "attributes": [ + { + "name": "transpose_a", + "type": "boolean", + "description": "If true, \"a\" is transposed before multiplication.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "If true, \"b\" is transposed before multiplication.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`." + }, + { + "name": "grad_a", + "type": "boolean", + "default": false + }, + { + "name": "grad_b", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "typeAttr": "T" + } + ] + }, + { + "name": "MatchingFiles", + "summary": "Returns the set of files matching one or more glob patterns.", + "description": "Note that this routine only supports wildcard characters in the\nbasename portion of the pattern, not in the directory portion.\nNote also that the order of filenames returned is deterministic.", + "inputs": [ + { + "name": "pattern", + "description": "Shell wildcard pattern(s). Scalar or vector of type string.", + "type": 7 + } + ], + "outputs": [ + { + "name": "filenames", + "description": "A vector of matching filenames.", + "type": 7 + } + ] + }, + { + "name": "MatchingFilesDataset", + "inputs": [ + { + "name": "patterns", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "MatrixBandPart", + "summary": "Copy a tensor setting everything outside a central band in each innermost matrix to zero.", + "description": "The `band` part is computed as follows:\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor with the same shape where\n\n`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.\n\nThe indicator function\n\n`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&\n (num_upper < 0 || (n-m) <= num_upper)`.\n\nFor example:\n\n```\n# if 'input' is [[ 0, 1, 2, 3]\n# [-1, 0, 1, 2]\n# [-2, -1, 0, 1]\n# [-3, -2, -1, 0]],\n\ntf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]\n [-1, 0, 1, 2]\n [ 0, -1, 0, 1]\n [ 0, 0, -1, 0]],\n\ntf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]\n [-1, 0, 1, 0]\n [-2, -1, 0, 1]\n [ 0, -2, -1, 0]]\n```\n\nUseful special cases:\n\n```\n tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.\n tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.\n tf.linalg.band_part(input, 0, 0) ==> Diagonal.\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindex", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `k` tensor.", + "typeAttr": "T" + }, + { + "name": "num_lower", + "description": "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle.", + "typeAttr": "Tindex" + }, + { + "name": "num_upper", + "description": "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle.", + "typeAttr": "Tindex" + } + ], + "outputs": [ + { + "name": "band", + "description": "Rank `k` tensor of the same shape as input. The extracted banded tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDeterminant", + "summary": "Computes the determinant of one or more square matrices.", + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor containing the determinants\nfor all input submatrices `[..., :, :]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[...]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiag", + "summary": "Returns a batched diagonal tensor with a given batched diagonal values.", + "description": "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a\ntensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:\n\n`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.\n\nFor example:\n\n```\n# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nand diagonal.shape = (2, 4)\n\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nwhich has shape (2, 4, 4)\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "diagonal", + "description": "Rank `k`, where `k >= 1`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiagPart", + "summary": "Returns the batched diagonal part of a batched tensor.", + "description": "This operation returns a tensor with the `diagonal` part\nof the batched `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:\n\n`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\n# 'input' is [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nand input.shape = (2, 4, 4)\n\ntf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nwhich has shape (2, 4)\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `k` tensor where `k >= 2`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "description": "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiagPartV2", + "summary": "Returns the batched diagonal part of a batched tensor.", + "description": "Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n`input`.\n\nAssume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\nLet `max_diag_len` be the maximum length among all diagonals to be extracted,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\nLet `num_diags` be the number of diagonals to extract,\n`num_diags = k[1] - k[0] + 1`.\n\nIf `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n`[I, J, ..., L, max_diag_len]` and values:\n\n```\ndiagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\nOtherwise, the output tensor has rank `r` with dimensions\n`[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n```\ndiagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\ninput = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n# A main diagonal from each batch.\ntf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n# A superdiagonal from each batch.\ntf.matrix_diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n# A tridiagonal band from each batch.\ntf.matrix_diag_part(input, k = (-1, 1))\n ==> [[[2, 7, 6], # Output shape: (2, 3, 3)\n [1, 6, 7],\n [5, 8, 0]],\n [[4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n# Padding value = 9\ntf.matrix_diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[4, 9, 9], # Output shape: (2, 3, 3)\n [3, 8, 9],\n [2, 7, 6]],\n [[2, 9, 9],\n [3, 4, 9],\n [4, 3, 8]]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `r` tensor where `r >= 2`.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + }, + { + "name": "padding_value", + "description": "The value to fill the area outside the specified diagonal band with.\nDefault is 0.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "description": "The extracted diagonal(s).", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiagPartV3", + "summary": "Returns the batched diagonal part of a batched tensor.", + "description": "Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n`input`.\n\nAssume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\nLet `max_diag_len` be the maximum length among all diagonals to be extracted,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\nLet `num_diags` be the number of diagonals to extract,\n`num_diags = k[1] - k[0] + 1`.\n\nIf `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n`[I, J, ..., L, max_diag_len]` and values:\n\n```\ndiagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\nOtherwise, the output tensor has rank `r` with dimensions\n`[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n```\ndiagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\ninput = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n# A main diagonal from each batch.\ntf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n# A superdiagonal from each batch.\ntf.matrix_diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n# A band from each batch.\ntf.matrix_diag_part(input, k = (-1, 2))\n ==> [[[0, 3, 8], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [5, 8, 0]],\n [[0, 3, 4],\n [4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n# LEFT_RIGHT alignment.\ntf.matrix_diag_part(input, k = (-1, 2), align=\"LEFT_RIGHT\")\n ==> [[[3, 8, 0], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [0, 5, 8]],\n [[3, 4, 0],\n [4, 3, 8],\n [5, 2, 7],\n [0, 1, 6]]]\n\n# max_diag_len can be shorter than the main diagonal.\ntf.matrix_diag_part(input, k = (-2, -1))\n ==> [[[5, 8],\n [9, 0]],\n [[1, 6],\n [5, 0]]]\n\n# padding_value = 9\ntf.matrix_diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[9, 9, 4], # Output shape: (2, 3, 3)\n [9, 3, 8],\n [2, 7, 6]],\n [[9, 9, 2],\n [9, 3, 4],\n [4, 3, 8]]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "align", + "type": "string", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "default": "RIGHT_LEFT" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `r` tensor where `r >= 2`.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + }, + { + "name": "padding_value", + "description": "The value to fill the area outside the specified diagonal band with.\nDefault is 0.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "description": "The extracted diagonal(s).", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiagV2", + "summary": "Returns a batched diagonal tensor with given batched diagonal values.", + "description": "Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\ndiagonals of a matrix, with everything else padded with `padding`. `num_rows`\nand `num_cols` specify the dimension of the innermost matrix of the output. If\nboth are not specified, the op assumes the innermost matrix is square and infers\nits size from `k` and the innermost dimension of `diagonal`. If only one of them\nis specified, the op assumes the unspecified value is the smallest possible\nbased on other criteria.\n\nLet `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has\nrank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one\ndiagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank\n`r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\nThe second innermost dimension of `diagonal` has double meaning.\nWhen `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size\n[I, J, ..., M], and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n```\n\nOtherwise, `M` is treated as the number of diagonals for the matrix in the\nsame batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.\n\nFor example:\n\n```\n# The main diagonal.\ndiagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n# A superdiagonal (per batch).\ndiagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)\n [4, 5, 0]],\n [[6, 7, 9],\n [9, 1, 0]]])\ntf.matrix_diag(diagonals, k = (-1, 0))\n ==> [[[1, 0, 0], # Output shape: (2, 3, 3)\n [4, 2, 0],\n [0, 5, 3]],\n [[6, 0, 0],\n [9, 7, 0],\n [0, 1, 9]]]\n\n# Rectangular matrix.\ndiagonal = np.array([1, 2]) # Input shape: (2)\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n# Rectangular matrix with inferred num_cols and padding_value = 9.\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "diagonal", + "description": "Rank `r`, where `r >= 1`", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + }, + { + "name": "num_rows", + "description": "The number of rows of the output matrix. If it is not provided, the op assumes\nthe output matrix is a square matrix and infers the matrix size from k and the\ninnermost dimension of `diagonal`.", + "type": 3 + }, + { + "name": "num_cols", + "description": "The number of columns of the output matrix. If it is not provided, the op\nassumes the output matrix is a square matrix and infers the matrix size from\nk and the innermost dimension of `diagonal`.", + "type": 3 + }, + { + "name": "padding_value", + "description": "The number to fill the area outside the specified diagonal band with.\nDefault is 0.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixDiagV3", + "summary": "Returns a batched diagonal tensor with given batched diagonal values.", + "description": "Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\ndiagonals of a matrix, with everything else padded with `padding`. `num_rows`\nand `num_cols` specify the dimension of the innermost matrix of the output. If\nboth are not specified, the op assumes the innermost matrix is square and infers\nits size from `k` and the innermost dimension of `diagonal`. If only one of them\nis specified, the op assumes the unspecified value is the smallest possible\nbased on other criteria.\n\nLet `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has\nrank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one\ndiagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank\n`r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\nThe second innermost dimension of `diagonal` has double meaning.\nWhen `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size\n[I, J, ..., M], and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n```\n\nOtherwise, `M` is treated as the number of diagonals for the matrix in the\nsame batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n```\nwhere `d = n - m`, `diag_index = [k] - d`, and\n`index_in_diag = n - max(d, 0) + offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nFor example:\n\n```\n# The main diagonal.\ndiagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n# A superdiagonal (per batch).\ndiagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n# A tridiagonal band (per batch).\ndiagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 2, 3],\n [6, 7, 9],\n [9, 1, 0]]])\ntf.matrix_diag(diagonals, k = (-1, 1))\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n# LEFT_RIGHT alignment.\ndiagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [0, 4, 5]],\n [[2, 3, 0],\n [6, 7, 9],\n [0, 9, 1]]])\ntf.matrix_diag(diagonals, k = (-1, 1), align=\"LEFT_RIGHT\")\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n# Rectangular matrix.\ndiagonal = np.array([1, 2]) # Input shape: (2)\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n# Rectangular matrix with inferred num_cols and padding_value = 9.\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "align", + "type": "string", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "default": "RIGHT_LEFT" + } + ], + "inputs": [ + { + "name": "diagonal", + "description": "Rank `r`, where `r >= 1`", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + }, + { + "name": "num_rows", + "description": "The number of rows of the output matrix. If it is not provided, the op assumes\nthe output matrix is a square matrix and infers the matrix size from k and the\ninnermost dimension of `diagonal`.", + "type": 3 + }, + { + "name": "num_cols", + "description": "The number of columns of the output matrix. If it is not provided, the op\nassumes the output matrix is a square matrix and infers the matrix size from\nk and the innermost dimension of `diagonal`.", + "type": 3 + }, + { + "name": "padding_value", + "description": "The number to fill the area outside the specified diagonal band with.\nDefault is 0.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixExponential", + "summary": "Deprecated, use python implementation tf.linalg.matrix_exponential.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixInverse", + "summary": "Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).", + "description": "\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the inverse for all input submatrices `[..., :, :]`.\n\nThe op uses LU decomposition with partial pivoting to compute the inverses.\n\nIf a matrix is not invertible there is no guarantee what the op does. It\nmay detect the condition and raise an exception or it may simply return a\ngarbage result.", + "attributes": [ + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixLogarithm", + "summary": "Computes the matrix logarithm of one or more square matrices:", + "description": "\n\\\\(log(exp(A)) = A\\\\)\n\nThis op is only defined for complex matrices. If A is positive-definite and\nreal, then casting to a complex matrix, taking the logarithm and casting back\nto a real matrix will give the correct result.\n\nThis function computes the matrix logarithm using the Schur-Parlett algorithm.\nDetails of the algorithm can be found in Section 11.6.2 of:\nNicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.\nISBN 978-0-898716-46-7.\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the exponential for all input submatrices `[..., :, :]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, M]`.\n\n@compatibility(scipy)\nEquivalent to scipy.linalg.logm\n@end_compatibility", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSetDiag", + "summary": "Returns a batched matrix tensor with new batched diagonal values.", + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the main diagonal of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\nThe output is computed as follows:\n\nAssume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has\n`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a\ntensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:\n\n * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.\n * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `k+1`, where `k >= 1`.", + "typeAttr": "T" + }, + { + "name": "diagonal", + "description": "Rank `k`, where `k >= 1`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Rank `k+1`, with `output.shape = input.shape`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSetDiagV2", + "summary": "Returns a batched matrix tensor with new batched diagonal values.", + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the specified diagonals of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\n`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\nOtherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\nThe output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\nIf `k` is scalar or `k[0] == k[1]`:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\n\nOtherwise,\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.\n\nFor example:\n\n```\n# The main diagonal.\ninput = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\ndiagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n# A superdiagonal (per batch).\ntf.matrix_set_diag(diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)\n [4, 5, 0]],\n [[6, 1, 2],\n [3, 4, 0]]])\ntf.matrix_set_diag(diagonals, k = (-1, 0))\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [4, 2, 7, 7],\n [0, 5, 3, 7]],\n [[6, 7, 7, 7],\n [3, 1, 7, 7],\n [7, 4, 2, 7]]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `r+1`, where `r >= 1`.", + "typeAttr": "T" + }, + { + "name": "diagonal", + "description": "Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.\n`k >= 1`.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Rank `r+1`, with `output.shape = input.shape`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSetDiagV3", + "summary": "Returns a batched matrix tensor with new batched diagonal values.", + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the specified diagonals of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\n`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\nOtherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\nThe output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\nIf `k` is scalar or `k[0] == k[1]`:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\n\nOtherwise,\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and\n`index_in_diag = n - max(d, 0) + offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nFor example:\n\n```\n# The main diagonal.\ninput = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\ndiagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_set_diag(input, diagonal)\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n# A superdiagonal (per batch).\ntf.matrix_set_diag(input, diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 1, 2],\n [5, 6, 4],\n [6, 1, 2],\n [3, 4, 0]]])\ntf.matrix_set_diag(input, diagonals, k = (-1, 2))\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n# LEFT_RIGHT alignment.\ndiagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [0, 4, 5]],\n [[1, 2, 0],\n [5, 6, 4],\n [6, 1, 2],\n [0, 3, 4]]])\ntf.matrix_set_diag(input, diagonals, k = (-1, 2), align=\"LEFT_RIGHT\")\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "align", + "type": "string", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "default": "RIGHT_LEFT" + } + ], + "inputs": [ + { + "name": "input", + "description": "Rank `r+1`, where `r >= 1`.", + "typeAttr": "T" + }, + { + "name": "diagonal", + "description": "Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.\n`k >= 1`.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Rank `r+1`, with `output.shape = input.shape`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSolve", + "summary": "Solves systems of linear equations.", + "description": "`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is\na tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix\nsatisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `True` then each output matrix satisfies\n`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.", + "attributes": [ + { + "name": "adjoint", + "type": "boolean", + "description": "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "matrix", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Shape is `[..., M, K]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, K]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSolveLs", + "summary": "Solves one or more linear least-squares problems.", + "description": "`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same\ntype as `matrix` and shape `[..., M, K]`.\nThe output is a tensor shape `[..., N, K]` where each output matrix solves\neach of the equations\n`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`\nin the least squares sense.\n\nWe use the following notation for (complex) matrix and right-hand sides\nin the batch:\n\n`matrix`=\\\\(A \\in \\mathbb{C}^{m \\times n}\\\\),\n`rhs`=\\\\(B \\in \\mathbb{C}^{m \\times k}\\\\),\n`output`=\\\\(X \\in \\mathbb{C}^{n \\times k}\\\\),\n`l2_regularizer`=\\\\(\\lambda \\in \\mathbb{R}\\\\).\n\nIf `fast` is `True`, then the solution is computed by solving the normal\nequations using Cholesky decomposition. Specifically, if \\\\(m \\ge n\\\\) then\n\\\\(X = (A^H A + \\lambda I)^{-1} A^H B\\\\), which solves the least-squares\nproblem \\\\(X = \\mathrm{argmin}_{Z \\in \\Re^{n \\times k} } ||A Z - B||_F^2 + \\lambda ||Z||_F^2\\\\).\nIf \\\\(m \\lt n\\\\) then `output` is computed as\n\\\\(X = A^H (A A^H + \\lambda I)^{-1} B\\\\), which (for \\\\(\\lambda = 0\\\\)) is the\nminimum-norm solution to the under-determined linear system, i.e.\n\\\\(X = \\mathrm{argmin}_{Z \\in \\mathbb{C}^{n \\times k} } ||Z||_F^2 \\\\),\nsubject to \\\\(A Z = B\\\\). Notice that the fast path is only numerically stable\nwhen \\\\(A\\\\) is numerically full rank and has a condition number\n\\\\(\\mathrm{cond}(A) \\lt \\frac{1}{\\sqrt{\\epsilon_{mach} } }\\\\) or \\\\(\\lambda\\\\) is\nsufficiently large.\n\nIf `fast` is `False` an algorithm based on the numerically robust complete\northogonal decomposition is used. This computes the minimum-norm\nleast-squares solution, even when \\\\(A\\\\) is rank deficient. This path is\ntypically 6-7 times slower than the fast path. If `fast` is `False` then\n`l2_regularizer` is ignored.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + }, + { + "name": "fast", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "matrix", + "description": "Shape is `[..., M, N]`.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Shape is `[..., M, K]`.", + "typeAttr": "T" + }, + { + "name": "l2_regularizer", + "description": "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility", + "type": 2 + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., N, K]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixSquareRoot", + "summary": "Computes the matrix square root of one or more square matrices:", + "description": "matmul(sqrtm(A), sqrtm(A)) = A\n\nThe input matrix should be invertible. If the input matrix is real, it should\nhave no eigenvalues which are real and negative (pairs of complex conjugate\neigenvalues are allowed).\n\nThe matrix square root is computed by first reducing the matrix to\nquasi-triangular form with the real Schur decomposition. The square root\nof the quasi-triangular matrix is then computed directly. Details of\nthe algorithm can be found in: Nicholas J. Higham, \"Computing real\nsquare roots of a real matrix\", Linear Algebra Appl., 1987.\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the matrix square root for all input submatrices `[..., :, :]`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, M]`.\n\n@compatibility(scipy)\nEquivalent to scipy.linalg.sqrtm\n@end_compatibility", + "typeAttr": "T" + } + ] + }, + { + "name": "MatrixTriangularSolve", + "summary": "Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.", + "description": "\n`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form\nsquare matrices. If `lower` is `True` then the strictly upper triangular part\nof each inner-most matrix is assumed to be zero and not accessed.\nIf `lower` is False then the strictly lower triangular part of each inner-most\nmatrix is assumed to be zero and not accessed.\n`rhs` is a tensor of shape `[..., M, N]`.\n\nThe output is a tensor of shape `[..., M, N]`. If `adjoint` is\n`True` then the innermost matrices in `output` satisfy matrix equations\n`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `False` then the strictly then the innermost matrices in\n`output` satisfy matrix equations\n`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.\n\nNote, the batch shapes for the inputs only need to broadcast.\n\nExample:\n```python\n\na = tf.constant([[3, 0, 0, 0],\n [2, 1, 0, 0],\n [1, 0, 1, 0],\n [1, 1, 1, 1]], dtype=tf.float32)\n\nb = tf.constant([[4],\n [2],\n [4],\n [2]], dtype=tf.float32)\n\nx = tf.linalg.triangular_solve(a, b, lower=True)\nx\n# \n\n# in python3 one can use `a@x`\ntf.matmul(a, x)\n# \n```", + "attributes": [ + { + "name": "lower", + "type": "boolean", + "description": "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular.", + "default": true + }, + { + "name": "adjoint", + "type": "boolean", + "description": "Boolean indicating whether to solve with `matrix` or its (block-wise)\n adjoint.\n\n@compatibility(numpy)\nEquivalent to scipy.linalg.solve_triangular\n@end_compatibility", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "matrix", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Shape is `[..., M, K]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M, K]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Max", + "summary": "Computes the maximum of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxIntraOpParallelismDataset", + "summary": "Creates a dataset that overrides the maximum intra-op parallelism.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "max_intra_op_parallelism", + "description": "Identifies the maximum intra-op parallelism to use.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "MaxPool", + "category": "Pool", + "summary": "Performs max pooling on the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D input to pool over.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The max pooled output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPool3D", + "summary": "Performs 3D max pooling on the input.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape `[batch, depth, rows, cols, channels]` tensor to pool over.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The max pooled output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPool3DGrad", + "summary": "Computes gradients of 3D max pooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "TInput", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "TInput" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "TInput" + }, + { + "name": "grad", + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPool3DGradGrad", + "summary": "Computes second-order gradients of the maxpooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5 + }, + { + "name": "strides", + "type": "int64[]", + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "default": "NDHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGrad", + "summary": "Computes gradients of the maxpooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`." + }, + { + "name": "explicit_paddings", + "type": "int64[]", + "default": [] + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D. Gradients w.r.t. the output of `max_pool`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients w.r.t. the input to `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGradGrad", + "summary": "Computes second-order gradients of the maxpooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D. Gradients of gradients w.r.t. the input of `max_pool`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGradGradV2", + "summary": "Computes second-order gradients of the maxpooling function.", + "attributes": [ + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D. Gradients of gradients w.r.t. the input of `max_pool`.", + "typeAttr": "T" + }, + { + "name": "ksize", + "description": "The size of the window for each dimension of the input tensor.", + "type": 3 + }, + { + "name": "strides", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGradGradWithArgmax", + "summary": "Computes second-order gradients of the maxpooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "include_batch_in_index", + "type": "boolean", + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "default": false + }, + { + "name": "Targmax", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\ninput of `max_pool`.", + "typeAttr": "T" + }, + { + "name": "argmax", + "description": "The indices of the maximum values chosen for each output of `max_pool`.", + "typeAttr": "Targmax" + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients of gradients w.r.t. the input of `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGradV2", + "summary": "Computes gradients of the maxpooling function.", + "attributes": [ + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "orig_input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "orig_output", + "description": "The original output tensor.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D. Gradients w.r.t. the output of `max_pool`.", + "typeAttr": "T" + }, + { + "name": "ksize", + "description": "The size of the window for each dimension of the input tensor.", + "type": 3 + }, + { + "name": "strides", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients w.r.t. the input to `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolGradWithArgmax", + "summary": "Computes gradients of the maxpooling function.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "include_batch_in_index", + "type": "boolean", + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "default": false + }, + { + "name": "Targmax", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\noutput of `max_pool`.", + "typeAttr": "T" + }, + { + "name": "argmax", + "description": "The indices of the maximum values chosen for each output of `max_pool`.", + "typeAttr": "Targmax" + } + ], + "outputs": [ + { + "name": "output", + "description": "Gradients w.r.t. the input of `max_pool`.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolV2", + "category": "Pool", + "summary": "Performs max pooling on the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "data_format", + "type": "string", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D input to pool over.", + "typeAttr": "T" + }, + { + "name": "ksize", + "description": "The size of the window for each dimension of the input tensor.", + "type": 3 + }, + { + "name": "strides", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The max pooled output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MaxPoolWithArgmax", + "category": "Pool", + "summary": "Performs max pooling on the input and outputs both max values and indices.", + "description": "The indices in `argmax` are flattened, so that a maximum value at position\n`[b, y, x, c]` becomes flattened index:\n`(y * width + x) * channels + c` if `include_batch_in_index` is False;\n`((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.\n\nThe indices returned are always in `[0, height) x [0, width)` before flattening,\neven if padding is involved and the mathematically correct answer is outside\n(either negative or too large). This is a bug, but fixing it is difficult to do\nin a safe backwards compatible way, especially due to flattening.", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4 + }, + { + "name": "Targmax", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "include_batch_in_index", + "type": "boolean", + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, height, width, channels]`. Input to pool over.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The max pooled output tensor.", + "typeAttr": "T" + }, + { + "name": "argmax", + "description": "4-D. The flattened indices of the max values chosen for each output.", + "typeAttr": "Targmax" + } + ] + }, + { + "name": "Maximum", + "summary": "Returns the max of x and y (i.e. x > y ? x : y) element-wise.", + "description": "*NOTE*: `Maximum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Mean", + "summary": "Computes the mean of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "Merge", + "summary": "Forwards the value of an available tensor from `inputs` to `output`.", + "description": "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor to become available to `output`, and sets\n`value_index` to its index in `inputs`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "The input tensors, exactly one of which will become available.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Will be set to the available input tensor.", + "typeAttr": "T" + }, + { + "name": "value_index", + "description": "The index of the chosen input tensor in `inputs`.", + "type": 3 + } + ] + }, + { + "name": "MergeDedupData", + "summary": "An op merges elements of integer and float tensors into deduplication data as\nXLA tuple.", + "description": "This op merges outputs of SplitDedupDataOp, which gives two 1-D tensors, integer\nand floating point. With respect to tuple_mask, this op merges values of these\ntwo tensors into an XLA tuple, which should be as same as input to\nSplitDedupDataOp.", + "attributes": [ + { + "name": "tuple_mask", + "type": "string", + "description": "A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor,\nwith first column as tuple element type, and second column as span of this type.\nFor example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0,\n1]]. We expect only two types of elements: integer(0) and float(1)." + }, + { + "name": "integer_type", + "type": "type", + "description": "integer_tensor type. Allowed types: {int32, int64, uint32, uint64}. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`." + }, + { + "name": "float_type", + "type": "type", + "description": "float_tensor type. Allowed types: {half, bfloat16, float}. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "integer_tensor", + "description": "A 1-D integer tensor, includes integer elements of deduplication data tuple.", + "typeAttr": "integer_type" + }, + { + "name": "float_tensor", + "description": "A 1-D float tensor, includes float elements of deduplication data tuple.", + "typeAttr": "float_type" + } + ], + "outputs": [ + { + "name": "output", + "description": "An XLA tuple merging integer and float elements as deduplication data tuple.", + "type": 21 + } + ] + }, + { + "name": "MergeSummary", + "summary": "Merges summaries.", + "description": "This op creates a\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nprotocol buffer that contains the union of all the values in the input\nsummaries.\n\nWhen the Op is run, it reports an `InvalidArgument` error if multiple values\nin the summaries to merge use the same tag.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "Can be of any shape. Each must contain serialized `Summary` protocol\nbuffers.", + "numberAttr": "N", + "type": 7 + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "MergeV2Checkpoints", + "summary": "V2 format specific: merges the metadata files of sharded checkpoints. The", + "description": "result is one logical checkpoint, with one physical metadata file and renamed\ndata files.\n\nIntended for \"grouping\" multiple checkpoints in a sharded checkpoint setup.\n\nIf delete_old_dirs is true, attempts to delete recursively the dirname of each\npath in the input checkpoint_prefixes. This is useful when those paths are non\nuser-facing temporary locations.\n\nIf allow_missing_files is true, merges the checkpoint prefixes as long as\nat least one file exists. Otherwise, if no files exist, an error will be thrown.\nThe default value for allow_missing_files is false.", + "attributes": [ + { + "name": "delete_old_dirs", + "type": "boolean", + "description": "see above.", + "default": true + }, + { + "name": "allow_missing_files", + "type": "boolean", + "description": "see above.", + "default": false + } + ], + "inputs": [ + { + "name": "checkpoint_prefixes", + "description": "prefixes of V2 checkpoints to merge.", + "type": 7 + }, + { + "name": "destination_prefix", + "description": "scalar. The desired final prefix. Allowed to be the same\nas one of the checkpoint_prefixes.", + "type": 7 + } + ] + }, + { + "name": "Mfcc", + "summary": "Transforms a spectrogram into a form that's useful for speech recognition.", + "description": "Mel Frequency Cepstral Coefficients are a way of representing audio data that's\nbeen effective as an input feature for machine learning. They are created by\ntaking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the\nhigher frequencies that are less significant to the human ear. They have a long\nhistory in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum\nis a good resource to learn more.", + "attributes": [ + { + "name": "upper_frequency_limit", + "type": "float32", + "description": "The highest frequency to use when calculating the\nceptstrum.", + "default": 4000.0 + }, + { + "name": "lower_frequency_limit", + "type": "float32", + "description": "The lowest frequency to use when calculating the\nceptstrum.", + "default": 20.0 + }, + { + "name": "filterbank_channel_count", + "type": "int64", + "description": "Resolution of the Mel bank used internally.", + "default": 40 + }, + { + "name": "dct_coefficient_count", + "type": "int64", + "description": "How many output channels to produce per time slice.", + "default": 13 + } + ], + "inputs": [ + { + "name": "spectrogram", + "description": "Typically produced by the Spectrogram op, with magnitude_squared\nset to true.", + "type": 1 + }, + { + "name": "sample_rate", + "description": "How many samples per second the source audio used.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "Min", + "summary": "Computes the minimum of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "Minimum", + "summary": "Returns the min of x and y (i.e. x < y ? x : y) element-wise.", + "description": "*NOTE*: `Minimum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "MirrorPad", + "summary": "Pads a tensor with mirrored values.", + "description": "This operation pads a `input` with mirrored values according to the `paddings`\nyou specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many values to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many values to add after the contents of `input`\nin that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater\nthan `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true\n(if false, respectively).\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 2, 3], [4, 5, 6]].\n# 'paddings' is [[1, 1]], [2, 2]].\n# 'mode' is SYMMETRIC.\n# rank of 't' is 2.\npad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]\n [2, 1, 1, 2, 3, 3, 2]\n [5, 4, 4, 5, 6, 6, 5]\n [5, 4, 4, 5, 6, 6, 5]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "mode", + "type": "string", + "description": "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode. Must be one of the following: `REFLECT`, `SYMMETRIC`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The input tensor to be padded.", + "typeAttr": "T" + }, + { + "name": "paddings", + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "description": "The padded tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MirrorPadGrad", + "summary": "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.", + "description": "This operation folds the padded areas of `input` by `MirrorPad` according to the\n`paddings` you specify. `paddings` must be the same as `paddings` argument\ngiven to the corresponding `MirrorPad` op.\n\nThe folded size of each dimension D of the output is:\n\n`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].\n# 'paddings' is [[0, 1]], [0, 1]].\n# 'mode' is SYMMETRIC.\n# rank of 't' is 2.\npad(t, paddings) ==> [[ 1, 5]\n [11, 28]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "mode", + "type": "string", + "description": "The mode used in the `MirrorPad` op. Must be one of the following: `REFLECT`, `SYMMETRIC`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The input tensor to be folded.", + "typeAttr": "T" + }, + { + "name": "paddings", + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "description": "The folded tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "MlirPassthroughOp", + "summary": "Wraps an arbitrary MLIR computation expressed as a module with a main() function.", + "description": "This operation does not have an associated kernel and is not intended to be\nexecuted in a regular TensorFlow session. Instead it is intended to be used for\ntesting or for special case where a user intends to pass custom MLIR computation\nthrough a TensorFlow graph with the intent of having custom tooling processing\nit downstream (when targeting a different environment, like TensorFlow lite for\nexample).\nThe MLIR module is expected to have a main() function that will be used as an\nentry point. The inputs to the operations will be passed as argument to the\nmain() function and the returned values of the main function mapped to the\noutputs.\nExample usage:\n\n```\nimport tensorflow as tf\nfrom tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op\n\nmlir_module = '''python\nfunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\n %add = \"magic.op\"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\n return %ret : tensor<10x10xf32>\n}\n'''\n\n@tf.function\ndef foo(x, y):\n return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])\n\ngraph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()\n```", + "attributes": [ + { + "name": "mlir_module", + "type": "string" + }, + { + "name": "Tinputs", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Toutputs", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "inputs", + "typeListAttr": "Tinputs" + } + ], + "outputs": [ + { + "name": "outputs", + "typeListAttr": "Toutputs" + } + ] + }, + { + "name": "Mod", + "summary": "Returns element-wise remainder of division. This emulates C semantics in that", + "description": "the result here is consistent with a truncating divide. E.g.\n`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.\n\n*NOTE*: `Mod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float16`, `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "ModelDataset", + "summary": "Identity transformation that models performance.", + "description": "Identity transformation that models performance.", + "attributes": [ + { + "name": "algorithm", + "type": "int64", + "default": 0 + }, + { + "name": "cpu_budget", + "type": "int64", + "default": 0 + }, + { + "name": "ram_budget", + "type": "int64", + "default": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Mul", + "summary": "Returns x * y element-wise.", + "description": "*NOTE*: `Mul` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "MulNoNan", + "summary": "Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.", + "description": "*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "MultiDeviceIterator", + "summary": "Creates a MultiDeviceIterator resource.", + "attributes": [ + { + "name": "devices", + "type": "string[]", + "description": "A list of devices the iterator works across.", + "minimum": 1 + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this resource will be shared under the given name\nacross multiple sessions." + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this resource is placed in the given container.\nOtherwise, a default container is used." + }, + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 1 + } + ], + "outputs": [ + { + "name": "handle", + "description": "Handle to the resource created.", + "type": 20 + } + ] + }, + { + "name": "MultiDeviceIteratorFromStringHandle", + "summary": "Generates a MultiDeviceIterator resource from its provided string handle.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 0, + "default": [] + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "string_handle", + "description": "String representing the resource.", + "type": 7 + } + ], + "outputs": [ + { + "name": "multi_device_iterator", + "description": "A MultiDeviceIterator resource.", + "type": 20 + } + ] + }, + { + "name": "MultiDeviceIteratorGetNextFromShard", + "summary": "Gets next element for the provided shard number.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "multi_device_iterator", + "description": "A MultiDeviceIterator resource.", + "type": 20 + }, + { + "name": "shard_num", + "description": "Integer representing which shard to fetch data for.", + "type": 3 + }, + { + "name": "incarnation_id", + "description": "Which incarnation of the MultiDeviceIterator is running.", + "type": 9 + } + ], + "outputs": [ + { + "name": "components", + "description": "Result of the get_next on the dataset.", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "MultiDeviceIteratorInit", + "summary": "Initializes the multi device iterator with the given dataset.", + "inputs": [ + { + "name": "dataset", + "description": "Dataset to be iterated upon.", + "type": 21 + }, + { + "name": "multi_device_iterator", + "description": "A MultiDeviceIteratorResource.", + "type": 20 + }, + { + "name": "max_buffer_size", + "description": "The maximum size of the host side per device buffer to keep.", + "type": 9 + } + ], + "outputs": [ + { + "name": "incarnation_id", + "description": "An int64 indicating which incarnation of the MultiDeviceIterator\nis running.", + "type": 9 + } + ] + }, + { + "name": "MultiDeviceIteratorToStringHandle", + "summary": "Produces a string handle for the given MultiDeviceIterator.", + "inputs": [ + { + "name": "multi_device_iterator", + "description": "A MultiDeviceIterator resource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "string_handle", + "description": "A string representing the resource.", + "type": 7 + } + ] + }, + { + "name": "Multinomial", + "summary": "Draws samples from a multinomial distribution.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "output_dtype", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "logits", + "description": "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes.", + "typeAttr": "T" + }, + { + "name": "num_samples", + "description": "0-D. Number of independent samples to draw for each row slice.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`.", + "typeAttr": "output_dtype" + } + ] + }, + { + "name": "MutableDenseHashTable", + "summary": "Creates an empty hash table that uses tensors as the backing store.", + "description": "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "description": "The shape of each value.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "initial_num_buckets", + "type": "int64", + "description": "The initial number of hash table buckets. Must be a power\nto 2.", + "default": 131072 + }, + { + "name": "max_load_factor", + "type": "float32", + "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.", + "default": 0.800000011920929 + } + ], + "inputs": [ + { + "name": "empty_key", + "description": "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations.", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "MutableDenseHashTableV2", + "summary": "Creates an empty hash table that uses tensors as the backing store.", + "description": "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "description": "The shape of each value.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "initial_num_buckets", + "type": "int64", + "description": "The initial number of hash table buckets. Must be a power\nto 2.", + "default": 131072 + }, + { + "name": "max_load_factor", + "type": "float32", + "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.", + "default": 0.800000011920929 + } + ], + "inputs": [ + { + "name": "empty_key", + "description": "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations.", + "typeAttr": "key_dtype" + }, + { + "name": "deleted_key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 20 + } + ] + }, + { + "name": "MutableHashTable", + "summary": "Creates an empty hash table.", + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "MutableHashTableOfTensors", + "summary": "Creates an empty hash table.", + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "MutableHashTableOfTensorsV2", + "summary": "Creates an empty hash table.", + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + }, + { + "name": "value_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 20 + } + ] + }, + { + "name": "MutableHashTableV2", + "summary": "Creates an empty hash table.", + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "default": "" + }, + { + "name": "use_node_name_sharing", + "type": "boolean", + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "default": false + }, + { + "name": "key_dtype", + "type": "type", + "description": "Type of the table keys." + }, + { + "name": "value_dtype", + "type": "type", + "description": "Type of the table values." + } + ], + "outputs": [ + { + "name": "table_handle", + "description": "Handle to a table.", + "type": 20 + } + ] + }, + { + "name": "MutexLock", + "summary": "Locks a mutex resource. The output is the lock. So long as the lock tensor", + "description": "is alive, any other request to use `MutexLock` with this mutex will wait.\n\nThis is particularly useful for creating a critical section when used in\nconjunction with `MutexLockIdentity`:\n\n```python\n\nmutex = mutex_v2(\n shared_name=handle_name, container=container, name=name)\n\ndef execute_in_critical_section(fn, *args, **kwargs):\n lock = gen_resource_variable_ops.mutex_lock(mutex)\n\n with ops.control_dependencies([lock]):\n r = fn(*args, **kwargs)\n\n with ops.control_dependencies(nest.flatten(r)):\n with ops.colocate_with(mutex):\n ensure_lock_exists = mutex_lock_identity(lock)\n\n # Make sure that if any element of r is accessed, all of\n # them are executed together.\n r = nest.map_structure(tf.identity, r)\n\n with ops.control_dependencies([ensure_lock_exists]):\n return nest.map_structure(tf.identity, r)\n```\n\nWhile `fn` is running in the critical section, no other functions which wish to\nuse this critical section may run.\n\nOften the use case is that two executions of the same graph, in parallel,\nwish to run `fn`; and we wish to ensure that only one of them executes\nat a time. This is especially important if `fn` modifies one or more\nvariables at a time.\n\nIt is also useful if two separate functions must share a resource, but we\nwish to ensure the usage is exclusive.", + "inputs": [ + { + "name": "mutex", + "description": "The mutex resource to lock.", + "type": 20 + } + ], + "outputs": [ + { + "name": "mutex_lock", + "description": "A tensor that keeps a shared pointer to a lock on the mutex;\nwhen the Tensor is destroyed, the use count on the shared pointer is decreased\nby 1. When it reaches 0, the lock is released.", + "type": 21 + } + ] + }, + { + "name": "MutexV2", + "summary": "Creates a Mutex resource that can be locked by `MutexLock`.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "resource", + "description": "The mutex resource.", + "type": 20 + } + ] + }, + { + "name": "NcclAllReduce", + "summary": "Outputs a tensor containing the reduction across all input tensors.", + "description": "Outputs a tensor containing the reduction across all input tensors passed to ops\nwithin the same `shared_name.\n\nThe graph should be constructed so if one op runs with shared_name value `c`,\nthen `num_devices` ops will run with shared_name value `c`. Failure to do so\nwill cause the graph execution to fail to complete.\n\ninput: the input to the reduction\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.\nnum_devices: The number of devices participating in this reduction.\nshared_name: Identifier that shared between ops of the same reduction.", + "attributes": [ + { + "name": "reduction", + "type": "string", + "description": "Must be one of the following: `min`, `max`, `prod`, `sum`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "num_devices", + "type": "int64" + }, + { + "name": "shared_name", + "type": "string" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "NcclBroadcast", + "summary": "Sends `input` to all devices that are connected to the output.", + "description": "Sends `input` to all devices that are connected to the output.\n\nThe graph should be constructed so that all ops connected to the output have a\nvalid device assignment, and the op itself is assigned one of these devices.\n\ninput: The input to the broadcast.\noutput: The same as input.\nshape: The shape of the input tensor.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "shape", + "type": "shape" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "NcclReduce", + "summary": "Reduces `input` from `num_devices` using `reduction` to a single device.", + "description": "Reduces `input` from `num_devices` using `reduction` to a single device.\n\nThe graph should be constructed so that all inputs have a valid device\nassignment, and the op itself is assigned one of these devices.\n\ninput: The input to the reduction.\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.", + "attributes": [ + { + "name": "reduction", + "type": "string", + "description": "Must be one of the following: `min`, `max`, `prod`, `sum`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "num_devices", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input", + "numberAttr": "num_devices", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ] + }, + { + "name": "Ndtri", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "NearestNeighbors", + "summary": "Selects the k nearest centers for each point.", + "description": "Rows of points are assumed to be input points. Rows of centers are assumed to be\nthe list of candidate centers. For each point, the k centers that have least L2\ndistance to it are computed.", + "inputs": [ + { + "name": "points", + "description": "Matrix of shape (n, d). Rows are assumed to be input points.", + "type": 1 + }, + { + "name": "centers", + "description": "Matrix of shape (m, d). Rows are assumed to be centers.", + "type": 1 + }, + { + "name": "k", + "description": "Number of nearest centers to return for each point. If k is larger than m, then\nonly m centers are returned.", + "type": 9 + } + ], + "outputs": [ + { + "name": "nearest_center_indices", + "description": "Matrix of shape (n, min(m, k)). Each row contains the indices of the centers\nclosest to the corresponding point, ordered by increasing distance.", + "type": 9 + }, + { + "name": "nearest_center_distances", + "description": "Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the\ncorresponding center in nearest_center_indices.", + "type": 1 + } + ] + }, + { + "name": "Neg", + "summary": "Computes numerical negative value element-wise.", + "description": "I.e., \\\\(y = -x\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "NegTrain", + "summary": "Training via negative sampling.", + "attributes": [ + { + "name": "vocab_count", + "type": "int64[]", + "description": "Count of words in the vocabulary." + }, + { + "name": "num_negative_samples", + "type": "int64", + "description": "Number of negative samples per example." + } + ], + "inputs": [ + { + "name": "w_in", + "description": "input word embedding.", + "type": 1, + "isRef": true + }, + { + "name": "w_out", + "description": "output word embedding.", + "type": 1, + "isRef": true + }, + { + "name": "examples", + "description": "A vector of word ids.", + "type": 3 + }, + { + "name": "labels", + "description": "A vector of word ids.", + "type": 3 + }, + { + "name": "lr", + "type": 1 + } + ] + }, + { + "name": "NextAfter", + "summary": "Returns the next representable value of `x1` in the direction of `x2`, element-wise.", + "description": "This operation returns the same result as the C++ std::nextafter function.\n\nIt can also return a subnormal number.\n\n@compatibility(cpp)\nEquivalent to C++ std::nextafter function.\n@end_compatibility", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "x1", + "typeAttr": "T" + }, + { + "name": "x2", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "NextIteration", + "summary": "Makes its input available to the next iteration.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the next iteration.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T" + } + ] + }, + { + "name": "NoOp", + "summary": "Does nothing. Only useful as a placeholder for control edges." + }, + { + "name": "NonDeterministicInts", + "summary": "Non-deterministically generates some integers.", + "description": "This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Non-deterministic integer values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "NonMaxSuppression", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "attributes": [ + { + "name": "iou_threshold", + "type": "float32", + "description": "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU.", + "default": 0.5 + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "type": 1 + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "type": 1 + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + } + ] + }, + { + "name": "NonMaxSuppressionV2", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\n\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T_threshold", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "typeAttr": "T" + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "typeAttr": "T" + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + }, + { + "name": "iou_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + } + ] + }, + { + "name": "NonMaxSuppressionV3", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T_threshold", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "typeAttr": "T" + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "typeAttr": "T" + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + }, + { + "name": "iou_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "typeAttr": "T_threshold" + }, + { + "name": "score_threshold", + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + } + ] + }, + { + "name": "NonMaxSuppressionV4", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T_threshold", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "pad_to_max_output_size", + "type": "boolean", + "description": "If true, the output `selected_indices` is padded to be of length\n`max_output_size`. Defaults to false.", + "default": false + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "typeAttr": "T" + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "typeAttr": "T" + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + }, + { + "name": "iou_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "typeAttr": "T_threshold" + }, + { + "name": "score_threshold", + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + }, + { + "name": "valid_outputs", + "description": "A 0-D integer tensor representing the number of valid elements in\n`selected_indices`, with the valid elements appearing first.", + "type": 3 + } + ] + }, + { + "name": "NonMaxSuppressionV5", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)\nThis op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.\nBodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score\nof other overlapping boxes instead of directly causing them to be pruned.\nTo enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be\nlarger than 0.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "pad_to_max_output_size", + "type": "boolean", + "description": "If true, the output `selected_indices` is padded to be of length\n`max_output_size`. Defaults to false.", + "default": false + } + ], + "inputs": [ + { + "name": "boxes", + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "typeAttr": "T" + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "typeAttr": "T" + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + }, + { + "name": "iou_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "typeAttr": "T" + }, + { + "name": "score_threshold", + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "typeAttr": "T" + }, + { + "name": "soft_nms_sigma", + "description": "A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et\nal (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which\nis default), we fall back to standard (hard) NMS.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + }, + { + "name": "selected_scores", + "description": "A 1-D float tensor of shape `[M]` representing the corresponding\nscores for each selected box, where `M <= max_output_size`. Scores only differ\nfrom corresponding input scores when using Soft NMS (i.e. when\n`soft_nms_sigma>0`)", + "typeAttr": "T" + }, + { + "name": "valid_outputs", + "description": "A 0-D integer tensor representing the number of valid elements in\n`selected_indices`, with the valid elements appearing first.", + "type": 3 + } + ] + }, + { + "name": "NonMaxSuppressionWithOverlaps", + "summary": "Greedily selects a subset of bounding boxes in descending order of score,", + "description": "pruning away boxes that have high overlaps\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. N-by-n overlap values are supplied as square matrix,\nwhich allows for defining a custom overlap criterium (eg. intersection over union,\nintersection over area, etc.).\n\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n\n selected_indices = tf.image.non_max_suppression_with_overlaps(\n overlaps, scores, max_output_size, overlap_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "name": "overlaps", + "description": "A 2-D float tensor of shape `[num_boxes, num_boxes]` representing\nthe n-by-n box overlap values.", + "type": 1 + }, + { + "name": "scores", + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "type": 1 + }, + { + "name": "max_output_size", + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "type": 3 + }, + { + "name": "overlap_threshold", + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too.", + "type": 1 + }, + { + "name": "score_threshold", + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "type": 1 + } + ], + "outputs": [ + { + "name": "selected_indices", + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "type": 3 + } + ] + }, + { + "name": "NonSerializableDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "NotEqual", + "summary": "Returns the truth value of (x != y) element-wise.", + "description": "*NOTE*: `NotEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "incompatible_shape_error", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "NthElement", + "summary": "Finds values of the `n`-th order statistic for the last dimension.", + "description": "If the input is a vector (rank-1), finds the entries which is the nth-smallest\nvalue in the vector and outputs their values as scalar tensor.\n\nFor matrices (resp. higher rank input), computes the entries which is the\nnth-smallest value in each row (resp. vector along the last dimension). Thus,\n\n values.shape = input.shape[:-1]", + "attributes": [ + { + "name": "reverse", + "type": "boolean", + "description": "When set to True, find the nth-largest value in the vector and vice\nversa.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "1-D or higher with last dimension at least `n+1`.", + "typeAttr": "T" + }, + { + "name": "n", + "description": "0-D. Position of sorted vector to select along the last dimension (along\neach row for matrices). Valid range of n is `[0, input.shape[:-1])`", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "description": "The `n`-th order statistic along each last dimensional slice.", + "typeAttr": "T" + } + ] + }, + { + "name": "OneHot", + "summary": "Returns a one-hot tensor.", + "description": "The locations represented by indices in `indices` take value `on_value`,\nwhile all other locations take value `off_value`.\n\nIf the input `indices` is rank `N`, the output will have rank `N+1`,\nThe new axis is created at dimension `axis` (default: the new axis is\nappended at the end).\n\nIf `indices` is a scalar the output shape will be a vector of length `depth`.\n\nIf `indices` is a vector of length `features`, the output shape will be:\n```\n features x depth if axis == -1\n depth x features if axis == 0\n```\n\nIf `indices` is a matrix (batch) with shape `[batch, features]`,\nthe output shape will be:\n```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n```\n\n\nExamples\n=========\n\nSuppose that\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 5.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[4 x 3]`:\n```\noutput =\n [5.0 0.0 0.0] // one_hot(0)\n [0.0 0.0 5.0] // one_hot(2)\n [0.0 0.0 0.0] // one_hot(-1)\n [0.0 5.0 0.0] // one_hot(1)\n```\n\nSuppose that\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 0.0\n off_value = 3.0\n axis = 0\n```\n\nThen output is `[3 x 4]`:\n```\noutput =\n [0.0 3.0 3.0 3.0]\n [3.0 3.0 3.0 0.0]\n [3.0 3.0 3.0 3.0]\n [3.0 0.0 3.0 3.0]\n// ^ one_hot(0)\n// ^ one_hot(2)\n// ^ one_hot(-1)\n// ^ one_hot(1)\n```\n\nSuppose that\n```\n indices = [[0, 2], [1, -1]]\n depth = 3\n on_value = 1.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[2 x 2 x 3]`:\n```\noutput =\n [\n [1.0, 0.0, 0.0] // one_hot(0)\n [0.0, 0.0, 1.0] // one_hot(2)\n ][\n [0.0, 1.0, 0.0] // one_hot(1)\n [0.0, 0.0, 0.0] // one_hot(-1)\n ]\n```", + "attributes": [ + { + "name": "axis", + "type": "int64", + "description": "The axis to fill (default: -1, a new inner-most axis).", + "default": -1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "TI", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "indices", + "description": "A tensor of indices.", + "typeAttr": "TI" + }, + { + "name": "depth", + "description": "A scalar defining the depth of the one hot dimension.", + "type": 3 + }, + { + "name": "on_value", + "description": "A scalar defining the value to fill in output when `indices[j] = i`.", + "typeAttr": "T" + }, + { + "name": "off_value", + "description": "A scalar defining the value to fill in output when `indices[j] != i`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The one-hot tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "OneShotIterator", + "summary": "Makes a \"one-shot\" iterator that can be iterated only once.", + "description": "A one-shot iterator bundles the logic for defining the dataset and\nthe state of the iterator in a single op, which allows simple input\npipelines to be defined without an additional initialization\n(\"MakeIterator\") step.\n\nOne-shot iterators have the following limitations:\n\n* They do not support parameterization: all logic for creating the underlying\n dataset must be bundled in the `dataset_factory` function.\n* They are not resettable. Once a one-shot iterator reaches the end of its\n underlying dataset, subsequent \"IteratorGetNext\" operations on that\n iterator will always produce an `OutOfRange` error.\n\nFor greater flexibility, use \"Iterator\" and \"MakeIterator\" to define\nan iterator using an arbitrary subgraph, which may capture tensors\n(including fed values) as parameters, and which may be reset multiple\ntimes by rerunning \"MakeIterator\".", + "attributes": [ + { + "name": "dataset_factory", + "type": "function", + "description": "A function of type `() -> DT_VARIANT`, where the returned\nDT_VARIANT is a dataset." + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "A handle to the iterator that can be passed to an \"IteratorGetNext\"\nop.", + "type": 20 + } + ] + }, + { + "name": "OnesLike", + "summary": "Returns a tensor of ones with the same shape and type as x.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`, `complex64`, `complex128`, `bool`." + } + ], + "inputs": [ + { + "name": "x", + "description": "a tensor of type T.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "a tensor of the same shape and type as x but filled with ones.", + "typeAttr": "T" + } + ] + }, + { + "name": "OptimizeDataset", + "summary": "Creates a dataset by applying optimizations to `input_dataset`.", + "description": "Creates a dataset by applying optimizations to `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "optimization_configs", + "type": "string[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "optimizations", + "description": "A `tf.string` vector `tf.Tensor` identifying optimizations to use.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "OptimizeDatasetV2", + "summary": "Creates a dataset by applying related optimizations to `input_dataset`.", + "description": "Creates a dataset by applying related optimizations to `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "optimization_configs", + "type": "string[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "optimizations_enabled", + "description": "A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.", + "type": 7 + }, + { + "name": "optimizations_disabled", + "description": "A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.", + "type": 7 + }, + { + "name": "optimizations_default", + "description": "A `tf.string` vector `tf.Tensor` identifying optimizations by default.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "OptionalFromValue", + "summary": "Constructs an Optional variant from a tuple of tensors.", + "attributes": [ + { + "name": "Toutput_types", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "optional", + "type": 21 + } + ] + }, + { + "name": "OptionalGetValue", + "summary": "Returns the value stored in an Optional variant or raises an error if none exists.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "optional", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "OptionalHasValue", + "summary": "Returns true if and only if the given Optional variant has a value.", + "inputs": [ + { + "name": "optional", + "type": 21 + } + ], + "outputs": [ + { + "name": "has_value", + "type": 10 + } + ] + }, + { + "name": "OptionalNone", + "summary": "Creates an Optional variant with no value.", + "outputs": [ + { + "name": "optional", + "type": 21 + } + ] + }, + { + "name": "OptionsDataset", + "summary": "Creates a dataset by attaching tf.data.Options to `input_dataset`.", + "attributes": [ + { + "name": "serialized_options", + "type": "string", + "description": "A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer." + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "OrderedMapClear", + "summary": "Op removes all elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ] + }, + { + "name": "OrderedMapIncompleteSize", + "summary": "Op returns the number of incomplete elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "OrderedMapPeek", + "summary": "Op peeks at the values at the specified key. If the", + "description": "underlying container does not contain this key\nthis op will block until it does. This Op is optimized for\nperformance.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "OrderedMapSize", + "summary": "Op returns the number of elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "OrderedMapStage", + "summary": "Stage (key, values) in the underlying container which behaves like a ordered", + "description": "associative container. Elements are ordered by key.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "fake_dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "It is necessary to match this name to the matching Unstage Op.", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "description": "int64", + "type": 9 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "values", + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "typeListAttr": "fake_dtypes" + } + ] + }, + { + "name": "OrderedMapUnstage", + "summary": "Op removes and returns the values associated with the key", + "description": "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "OrderedMapUnstageNoKey", + "summary": "Op removes and returns the (key, value) element with the smallest", + "description": "key from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "OutfeedDequeue", + "summary": "Retrieves a single tensor from the computation outfeed.", + "description": "This operation will block indefinitely until data is available.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor." + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "default": -1 + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor that will be read from the device outfeed.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "OutfeedDequeueTuple", + "summary": "Retrieve multiple values from the computation outfeed.", + "description": "This operation will block indefinitely until data is available. Output `i`\ncorresponds to XLA tuple element `i`.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "The element types of each element in `outputs`.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shapes of each tensor in `outputs`." + }, + { + "name": "device_ordinal", + "type": "int64", + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "default": -1 + } + ], + "outputs": [ + { + "name": "outputs", + "description": "A list of tensors that will be read from the outfeed.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "OutfeedDequeueTupleV2", + "summary": "Retrieve multiple values from the computation outfeed. Device ordinal is a\ntensor allowing dynamic outfeed.", + "description": "This operation will block indefinitely until data is available. Output `i`\ncorresponds to XLA tuple element `i`.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "The element types of each element in `outputs`.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shapes of each tensor in `outputs`." + } + ], + "inputs": [ + { + "name": "device_ordinal", + "description": "An int scalar tensor, representing the TPU device to use. This should be -1 when\nthe Op is running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "type": 3 + } + ], + "outputs": [ + { + "name": "outputs", + "description": "A list of tensors that will be read from the outfeed.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "OutfeedDequeueV2", + "summary": "Retrieves a single tensor from the computation outfeed. Device ordinal is a\ntensor allowing dynamic outfeed.", + "description": "This operation will block indefinitely until data is available.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor." + } + ], + "inputs": [ + { + "name": "device_ordinal", + "description": "An int scalar tensor, representing the TPU device to use. This should be -1 when\nthe Op is running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor that will be read from the device outfeed.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "OutfeedEnqueue", + "summary": "Enqueue a Tensor on the computation outfeed.", + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor that will be inserted into the outfeed queue.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "OutfeedEnqueueTuple", + "summary": "Enqueue multiple Tensor values on the computation outfeed.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of tensors that will be inserted into the outfeed queue as an\nXLA tuple.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "Pack", + "summary": "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.", + "description": "Packs the `N` tensors in `values` into a tensor with rank one higher than each\ntensor in `values`, by packing them along the `axis` dimension.\nGiven a list of tensors of shape `(A, B, C)`;\n\nif `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\nif `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\nEtc.\n\nFor example:\n\n```\n# 'x' is [1, 4]\n# 'y' is [2, 5]\n# 'z' is [3, 6]\npack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\npack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]\n```\n\nThis is the opposite of `unpack`.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "axis", + "type": "int64", + "description": "Dimension along which to pack. Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`.", + "default": 0 + } + ], + "inputs": [ + { + "name": "values", + "description": "Must be of same shape and type.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The packed tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "summary": "Pads a tensor with zeros.", + "description": "This operation pads a `input` with zeros according to the `paddings` you\nspecify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the\nrank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many zeros to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many zeros to add after the contents of `input`\nin that dimension.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 1], [2, 2]]\n# 'paddings' is [[1, 1], [2, 2]]\n# rank of 't' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```\n", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "PadV2", + "summary": "Pads a tensor.", + "description": "This operation pads `input` according to the `paddings` and `constant_values`\nyou specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many padding values to add before the contents of `input` in that dimension,\nand `paddings[D, 1]` indicates how many padding values to add after the contents\nof `input` in that dimension. `constant_values` is a scalar tensor of the same\ntype as `input` that indicates the value to use for padding `input`.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 1], [2, 2]]\n# 'paddings' is [[1, 1], [2, 2]]\n# 'constant_values' is 0\n# rank of 't' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "paddings", + "typeAttr": "Tpaddings" + }, + { + "name": "constant_values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "PaddedBatchDataset", + "summary": "Creates a dataset that batches and pads `batch_size` elements from the input.", + "attributes": [ + { + "name": "Toutput_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "type": 9 + }, + { + "name": "padded_shapes", + "description": "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "padding_values", + "description": "A list of scalars containing the padding value to use for\neach of the outputs.", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "PaddedBatchDatasetV2", + "summary": "Creates a dataset that batches and pads `batch_size` elements from the input.", + "attributes": [ + { + "name": "parallel_copy", + "type": "boolean", + "default": false + }, + { + "name": "Toutput_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "batch_size", + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "type": 9 + }, + { + "name": "padded_shapes", + "description": "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "padding_values", + "description": "A list of scalars containing the padding value to use for\neach of the outputs.", + "typeListAttr": "Toutput_types" + }, + { + "name": "drop_remainder", + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "PaddingFIFOQueue", + "summary": "A queue that produces elements in first-in first-out order.", + "description": "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "PaddingFIFOQueueV2", + "summary": "A queue that produces elements in first-in first-out order.", + "description": "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 20 + } + ] + }, + { + "name": "ParallelBatchDataset", + "attributes": [ + { + "name": "parallel_copy", + "type": "boolean", + "default": false + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "deterministic", + "type": "string", + "default": "default" + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "batch_size", + "type": 9 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelConcat", + "summary": "Concatenates a list of `N` tensors along the first dimension.", + "description": "The input tensors are all required to have size 1 in the first dimension.\n\nFor example:\n\n```\n# 'x' is [[1, 4]]\n# 'y' is [[2, 5]]\n# 'z' is [[3, 6]]\nparallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\n```\n\nThe difference between concat and parallel_concat is that concat requires all\nof the inputs be computed before the operation will begin but doesn't require\nthat the input shapes be known during graph construction. Parallel concat\nwill copy pieces of the input into the output as they become available, in\nsome situations this can provide a performance benefit.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "shape", + "type": "shape", + "description": "the final shape of the result; should be equal to the shapes of any input\nbut with the number of input values in the first dimension." + } + ], + "inputs": [ + { + "name": "values", + "description": "Tensors to be concatenated. All must have size 1 in the first dimension\nand same shape.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The concatenated tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "ParallelDynamicStitch", + "summary": "Interleave the values from the `data` tensors into a single tensor.", + "description": "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues may be merged in parallel, so if an index appears in both `indices[m][i]`\nand `indices[n][j]`, the result may be invalid. This differs from the normal\nDynamicStitch operator that defines the behavior in that case.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
\n\n
", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "numberAttr": "N", + "type": 3 + }, + { + "name": "data", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "merged", + "typeAttr": "T" + } + ] + }, + { + "name": "ParallelFilterDataset", + "summary": "Creates a dataset containing elements of `input_dataset` matching `predicate`.", + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.\n\nUnlike a \"FilterDataset\", which applies `predicate` sequentially, this dataset\ninvokes up to `num_parallel_calls` copies of `predicate` in parallel.\n ", + "attributes": [ + { + "name": "predicate", + "type": "function", + "description": "A function returning a scalar boolean." + }, + { + "name": "deterministic", + "type": "string", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the interleave is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "default": "default" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "typeListAttr": "Targuments" + }, + { + "name": "num_parallel_calls", + "description": "The number of concurrent invocations of `predicate` that process\nelements from `input_dataset` in parallel.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelInterleaveDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! If the `sloppy` parameter is set to `True`, the operation of this\ndataset will not be deterministic!\n\nThis dataset has been superseded by `ParallelInterleaveDatasetV2`. New code\nshould use `ParallelInterleaveDatasetV2`.\n\nThe Python API `tf.data.experimental.parallel_interleave` creates instances of\nthis op. `tf.data.experimental.parallel_interleave` is a deprecated API.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "Targuments", + "type": "type[]", + "description": "Types of the elements of `other_arguments`.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "Dataset that produces a stream of arguments for the function `f`.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDataset` will cycle in a\nround-robin fashion.", + "type": 9 + }, + { + "name": "block_length", + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "type": 9 + }, + { + "name": "sloppy", + "description": "If `True`, return elements as they become available, even if that means returning\nthese elements in a non-deterministic order. Sloppy operation may result in better\nperformance in the presence of stragglers, but the dataset will still block if\nall of its open streams are blocked.\nIf `False`, always return elements in a deterministic order.", + "type": 10 + }, + { + "name": "buffer_output_elements", + "description": "The number of elements each iterator being interleaved should buffer (similar\nto the `.prefetch()` transformation for each interleaved iterator).", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "description": "Determines the number of iterators to prefetch, allowing buffers to warm up and\ndata to be pre-fetched without blocking the main thread.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelInterleaveDatasetV2", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`,\nwhich can improve performance at the expense of non-determinism.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "Targuments", + "type": "type[]", + "description": "Types of the elements of `other_arguments`.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "sloppy", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "Dataset that produces a stream of arguments for the function `f`.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "type": 9 + }, + { + "name": "block_length", + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "type": 9 + }, + { + "name": "num_parallel_calls", + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelInterleaveDatasetV3", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can either set the `deterministic`\nattribute to \"false\", or leave it as \"default\" and set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`.\nThis can improve performance at the expense of non-determinism.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "deterministic", + "type": "string", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the interleave is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "default": "default" + }, + { + "name": "Targuments", + "type": "type[]", + "description": "Types of the elements of `other_arguments`.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "Dataset that produces a stream of arguments for the function `f`.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "type": 9 + }, + { + "name": "block_length", + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "type": 9 + }, + { + "name": "num_parallel_calls", + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelInterleaveDatasetV4", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can either set the `deterministic`\nattribute to \"false\", or leave it as \"default\" and set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`.\nThis can improve performance at the expense of non-determinism.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." + }, + { + "name": "deterministic", + "type": "string", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the interleave is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "default": "default" + }, + { + "name": "Targuments", + "type": "type[]", + "description": "Types of the elements of `other_arguments`.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "Dataset that produces a stream of arguments for the function `f`.", + "type": 21 + }, + { + "name": "other_arguments", + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "type": 9 + }, + { + "name": "block_length", + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "type": 9 + }, + { + "name": "buffer_output_elements", + "description": "The number of elements each iterator being interleaved should buffer (similar\nto the `.prefetch()` transformation for each interleaved iterator).", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "description": "Determines the number of iterators to prefetch, allowing buffers to warm up and\ndata to be pre-fetched without blocking the main thread.", + "type": 9 + }, + { + "name": "num_parallel_calls", + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelMapDataset", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "Unlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `num_parallel_calls` copies of `f` in parallel.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_inter_op_parallelism", + "type": "boolean", + "default": true + }, + { + "name": "sloppy", + "type": "boolean", + "default": false + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "num_parallel_calls", + "description": "The number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParallelMapDatasetV2", + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`.", + "description": "Unlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `num_parallel_calls` copies of `f` in parallel.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_inter_op_parallelism", + "type": "boolean", + "default": true + }, + { + "name": "deterministic", + "type": "string", + "default": "default" + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "num_parallel_calls", + "description": "The number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParameterizedTruncatedNormal", + "summary": "Outputs random values from a normal distribution. The parameters may each be a", + "description": "scalar which applies to the entire output, or a vector of length shape[0] which\nstores the parameters for each batch.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor. Batches are indexed by the 0th dimension.", + "typeAttr": "T" + }, + { + "name": "means", + "description": "The mean parameter of each batch.", + "typeAttr": "dtype" + }, + { + "name": "stdevs", + "description": "The standard deviation parameter of each batch. Must be greater than 0.", + "typeAttr": "dtype" + }, + { + "name": "minvals", + "description": "The minimum cutoff. May be -infinity.", + "typeAttr": "dtype" + }, + { + "name": "maxvals", + "description": "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ParseExample", + "summary": "Transforms a vector of brain.Example protos (as strings) into typed tensors.", + "attributes": [ + { + "name": "Nsparse", + "type": "int64", + "minimum": 0 + }, + { + "name": "Ndense", + "type": "int64", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of Nsparse types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "Tdense", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`." + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A vector containing a batch of binary serialized Example protos.", + "type": 7 + }, + { + "name": "names", + "description": "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this vector must be the same length as \"serialized\".", + "type": 7 + }, + { + "name": "sparse_keys", + "description": "A list of Nsparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with sparse values.", + "numberAttr": "Nsparse", + "type": 7 + }, + { + "name": "dense_keys", + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples' features associated with dense values.", + "numberAttr": "Ndense", + "type": 7 + }, + { + "name": "dense_defaults", + "description": "A list of Ndense Tensors (some may be empty).\ndense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "Nsparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "Nsparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + } + ] + }, + { + "name": "ParseExampleDataset", + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.", + "attributes": [ + { + "name": "sparse_keys", + "type": "string[]", + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0 + }, + { + "name": "dense_keys", + "type": "string[]", + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "Tdense", + "type": "type[]", + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 1 + }, + { + "name": "sloppy", + "type": "boolean", + "default": false + }, + { + "name": "ragged_keys", + "type": "string[]", + "minimum": 0, + "default": [] + }, + { + "name": "ragged_value_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "ragged_split_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int32`, `int64`.", + "default": [] + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "name": "dense_defaults", + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParseExampleDatasetV2", + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.", + "attributes": [ + { + "name": "sparse_keys", + "type": "string[]", + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0 + }, + { + "name": "dense_keys", + "type": "string[]", + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "Tdense", + "type": "type[]", + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "description": "The list of shapes being produced.", + "minimum": 1 + }, + { + "name": "deterministic", + "type": "string", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the dataset is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "default": "default" + }, + { + "name": "ragged_keys", + "type": "string[]", + "minimum": 0, + "default": [] + }, + { + "name": "ragged_value_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "ragged_split_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int32`, `int64`.", + "default": [] + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "name": "dense_defaults", + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ParseExampleV2", + "summary": "Transforms a vector of tf.Example protos (as strings) into typed tensors.", + "attributes": [ + { + "name": "Tdense", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`." + }, + { + "name": "num_sparse", + "type": "int64", + "description": "The number of sparse keys.", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of `num_sparse` types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "ragged_value_types", + "type": "type[]", + "description": "A list of `num_ragged` types; the data types of data in each Feature\ngiven in ragged_keys (where `num_ragged = sparse_keys.size()`).\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "ragged_split_types", + "type": "type[]", + "description": "A list of `num_ragged` types; the data types of row_splits in each Feature\ngiven in ragged_keys (where `num_ragged = sparse_keys.size()`).\nMay be DT_INT32 or DT_INT64. Must be one of the following: `int32`, `int64`.", + "minimum": 0 + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "A list of `num_dense` shapes; the shapes of data in each Feature\ngiven in dense_keys (where `num_dense = dense_keys.size()`).\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A scalar or vector containing binary serialized Example protos.", + "type": 7 + }, + { + "name": "names", + "description": "A tensor containing the names of the serialized protos.\nCorresponds 1:1 with the `serialized` tensor.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this tensor must have the same shape as \"serialized\".", + "type": 7 + }, + { + "name": "sparse_keys", + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with sparse values.", + "type": 7 + }, + { + "name": "dense_keys", + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with dense values.", + "type": 7 + }, + { + "name": "ragged_keys", + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with ragged values.", + "type": 7 + }, + { + "name": "dense_defaults", + "description": "A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`.\ndense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + }, + { + "name": "ragged_values", + "typeListAttr": "ragged_value_types" + }, + { + "name": "ragged_row_splits", + "typeListAttr": "ragged_split_types" + } + ] + }, + { + "name": "ParseSequenceExample", + "summary": "Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.", + "attributes": [ + { + "name": "feature_list_dense_missing_assumed_empty", + "type": "string[]", + "description": "A vector listing the\nFeatureList keys which may be missing from the SequenceExamples. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExamples.", + "minimum": 0 + }, + { + "name": "context_sparse_keys", + "type": "string[]", + "description": "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with context_sparse\nvalues.", + "minimum": 0 + }, + { + "name": "context_dense_keys", + "type": "string[]", + "description": "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' context features associated with\ndense values.", + "minimum": 0 + }, + { + "name": "feature_list_sparse_keys", + "type": "string[]", + "description": "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues.", + "minimum": 0 + }, + { + "name": "feature_list_dense_keys", + "type": "string[]", + "description": "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "minimum": 0 + }, + { + "name": "Ncontext_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Ncontext_dense", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Nfeature_list_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Nfeature_list_dense", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "context_sparse_types", + "type": "type[]", + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "Tcontext_dense", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "feature_list_dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "context_dense_shapes", + "type": "shape[]", + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_sparse_types", + "type": "type[]", + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_dense_shapes", + "type": "shape[]", + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A vector containing binary serialized SequenceExample protos.", + "type": 7 + }, + { + "name": "debug_name", + "description": "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no name is available.", + "type": 7 + }, + { + "name": "context_dense_defaults", + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "typeListAttr": "Tcontext_dense" + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + }, + { + "name": "feature_list_dense_lengths", + "numberAttr": "Nfeature_list_dense", + "type": 9 + } + ] + }, + { + "name": "ParseSequenceExampleV2", + "summary": "Transforms a vector of tf.io.SequenceExample protos (as strings) into\ntyped tensors.", + "attributes": [ + { + "name": "Ncontext_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Tcontext_dense", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "context_sparse_types", + "type": "type[]", + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "context_ragged_value_types", + "type": "type[]", + "description": "RaggedTensor.value dtypes for the ragged context features. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "context_ragged_split_types", + "type": "type[]", + "description": "RaggedTensor.row_split dtypes for the ragged context features. Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "default": [] + }, + { + "name": "context_dense_shapes", + "type": "shape[]", + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "default": [] + }, + { + "name": "Nfeature_list_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Nfeature_list_dense", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "feature_list_dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "feature_list_sparse_types", + "type": "type[]", + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_ragged_value_types", + "type": "type[]", + "description": "RaggedTensor.value dtypes for the ragged FeatureList features. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_ragged_split_types", + "type": "type[]", + "description": "RaggedTensor.row_split dtypes for the ragged FeatureList features. Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_dense_shapes", + "type": "shape[]", + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A scalar or vector containing binary serialized SequenceExample protos.", + "type": 7 + }, + { + "name": "debug_name", + "description": "A scalar or vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no name is available.", + "type": 7 + }, + { + "name": "context_sparse_keys", + "description": "The keys expected in the Examples' features associated with context_sparse\nvalues.", + "type": 7 + }, + { + "name": "context_dense_keys", + "description": "The keys expected in the SequenceExamples' context features associated with\ndense values.", + "type": 7 + }, + { + "name": "context_ragged_keys", + "description": "The keys expected in the Examples' features associated with context_ragged\nvalues.", + "type": 7 + }, + { + "name": "feature_list_sparse_keys", + "description": "The keys expected in the FeatureLists associated with sparse values.", + "type": 7 + }, + { + "name": "feature_list_dense_keys", + "description": "The keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "type": 7 + }, + { + "name": "feature_list_ragged_keys", + "description": "The keys expected in the FeatureLists associated with ragged values.", + "type": 7 + }, + { + "name": "feature_list_dense_missing_assumed_empty", + "description": "A vector corresponding 1:1 with feature_list_dense_keys, indicating which\nfeatures may be missing from the SequenceExamples. If the associated\nFeatureList is missing, it is treated as empty.", + "type": 10 + }, + { + "name": "context_dense_defaults", + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "typeListAttr": "Tcontext_dense" + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "context_ragged_values", + "typeListAttr": "context_ragged_value_types" + }, + { + "name": "context_ragged_row_splits", + "typeListAttr": "context_ragged_split_types" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + }, + { + "name": "feature_list_dense_lengths", + "numberAttr": "Nfeature_list_dense", + "type": 9 + }, + { + "name": "feature_list_ragged_values", + "typeListAttr": "feature_list_ragged_value_types" + }, + { + "name": "feature_list_ragged_outer_splits", + "typeListAttr": "feature_list_ragged_split_types" + }, + { + "name": "feature_list_ragged_inner_splits", + "typeListAttr": "feature_list_ragged_split_types" + } + ] + }, + { + "name": "ParseSingleExample", + "summary": "Transforms a tf.Example proto (as a string) into typed tensors.", + "attributes": [ + { + "name": "num_sparse", + "type": "int64", + "description": "The number of sparse features to be parsed from the example. This\nmust match the lengths of `sparse_keys` and `sparse_types`.", + "minimum": 0 + }, + { + "name": "sparse_keys", + "type": "string[]", + "description": "A list of `num_sparse` strings.\nThe keys expected in the Examples' features associated with sparse values.", + "minimum": 0 + }, + { + "name": "dense_keys", + "type": "string[]", + "description": "The keys expected in the Examples' features associated with dense\nvalues.", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "description": "A list of `num_sparse` types; the data types of data in each\nFeature given in sparse_keys.\nCurrently the ParseSingleExample op supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "Tdense", + "type": "type[]", + "description": "The data types of data in each Feature given in dense_keys.\nThe length of this list must match the length of `dense_keys`.\nCurrently the ParseSingleExample op supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0 + }, + { + "name": "dense_shapes", + "type": "shape[]", + "description": "The shapes of data in each Feature given in dense_keys.\nThe length of this list must match the length of `dense_keys`. The\nnumber of elements in the Feature corresponding to dense_key[j] must\nalways equal dense_shapes[j].NumEntries(). If dense_shapes[j] ==\n(D0, D1, ..., DN) then the shape of output Tensor dense_values[j]\nwill be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,\n..., DN), the shape of the output Tensor dense_values[j] will be (M,\nD1, .., DN), where M is the number of blocks of elements of length\nD1 * .... * DN, in the input.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A vector containing a batch of binary serialized Example protos.", + "type": 7 + }, + { + "name": "dense_defaults", + "description": "A list of Tensors (some may be empty), whose length matches\nthe length of `dense_keys`. dense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + } + ] + }, + { + "name": "ParseSingleSequenceExample", + "summary": "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.", + "attributes": [ + { + "name": "Ncontext_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Ncontext_dense", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Nfeature_list_sparse", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "Nfeature_list_dense", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "context_sparse_types", + "type": "type[]", + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "Tcontext_dense", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "feature_list_dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "default": [] + }, + { + "name": "context_dense_shapes", + "type": "shape[]", + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_sparse_types", + "type": "type[]", + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "default": [] + }, + { + "name": "feature_list_dense_shapes", + "type": "shape[]", + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A scalar containing a binary serialized SequenceExample proto.", + "type": 7 + }, + { + "name": "feature_list_dense_missing_assumed_empty", + "description": "A vector listing the\nFeatureList keys which may be missing from the SequenceExample. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExample.", + "type": 7 + }, + { + "name": "context_sparse_keys", + "description": "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with context_sparse\nvalues.", + "numberAttr": "Ncontext_sparse", + "type": 7 + }, + { + "name": "context_dense_keys", + "description": "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' context features associated with\ndense values.", + "numberAttr": "Ncontext_dense", + "type": 7 + }, + { + "name": "feature_list_sparse_keys", + "description": "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues.", + "numberAttr": "Nfeature_list_sparse", + "type": 7 + }, + { + "name": "feature_list_dense_keys", + "description": "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "numberAttr": "Nfeature_list_dense", + "type": 7 + }, + { + "name": "context_dense_defaults", + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "debug_name", + "description": "A scalar containing the name of the serialized proto.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty scalar if no name is available.", + "type": 7 + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + } + ] + }, + { + "name": "ParseTensor", + "summary": "Transforms a serialized tensorflow.TensorProto proto into a Tensor.", + "attributes": [ + { + "name": "out_type", + "type": "type", + "description": "The type of the serialized tensor. The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place." + } + ], + "inputs": [ + { + "name": "serialized", + "description": "A scalar string containing a serialized TensorProto proto.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of type `out_type`.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "PartitionedCall", + "summary": "returns `f(inputs)`, where `f`'s body is placed and partitioned.", + "description": "Asynchronously executes a function, potentially across multiple devices but\nwithin a single process. The kernel places and partitions a given function's\nunderlying graph, and executes each of the partitioned subgraphs as a function.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "f", + "type": "function", + "description": " A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op." + }, + { + "name": "config", + "type": "string", + "default": "" + }, + { + "name": "config_proto", + "type": "string", + "default": "" + }, + { + "name": "executor_type", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "args", + "description": "A list of input tensors.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "Placeholder", + "summary": "A placeholder op for a value that will be fed into the computation.", + "description": "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained.", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "outputs": [ + { + "name": "output", + "description": "A placeholder tensor that must be replaced using the feed mechanism.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "PlaceholderV2", + "summary": "A placeholder op for a value that will be fed into the computation.", + "description": "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor. The shape can be any partially-specified\nshape. To be unconstrained, pass in a shape with unknown rank." + } + ], + "outputs": [ + { + "name": "output", + "description": "A placeholder tensor that must be replaced using the feed mechanism.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "PlaceholderWithDefault", + "summary": "A placeholder op that passes through `input` when its output is not fed.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The (possibly partial) shape of the tensor." + } + ], + "inputs": [ + { + "name": "input", + "description": "The default value to produce when `output` is not fed.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "A placeholder tensor that defaults to `input` if it is not fed.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "Polygamma", + "summary": "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\).", + "description": "The polygamma function is defined as:\n\n\n\\\\(\\psi^{(a)}(x) = \\frac{d^a}{dx^a} \\psi(x)\\\\)\n\nwhere \\\\(\\psi(x)\\\\) is the digamma function.\nThe polygamma function is defined only for non-negative integer orders \\\\a\\\\.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "PopulationCount", + "summary": "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).", + "description": "For each entry in `x`, calculates the number of `1` (on) bits in the binary\nrepresentation of that entry.\n\n**NOTE**: It is more efficient to first `tf.bitcast` your tensors into\n`int32` or `int64` and perform the bitcount on the result, than to feed in\n8- or 16-bit inputs and then aggregate the resulting counts.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 4 + } + ] + }, + { + "name": "Pow", + "summary": "Computes the power of one value to another.", + "description": "Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\ncorresponding elements in `x` and `y`. For example:\n\n```\n# tensor 'x' is [[2, 2]], [3, 3]]\n# tensor 'y' is [[8, 16], [2, 3]]\ntf.pow(x, y) ==> [[256, 65536], [9, 27]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "PrefetchDataset", + "summary": "Creates a dataset that asynchronously prefetches elements from `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "slack_period", + "type": "int64", + "default": 0 + }, + { + "name": "legacy_autotune", + "type": "boolean", + "default": true + }, + { + "name": "buffer_size_min", + "type": "int64", + "default": 0 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "description": "The maximum number of elements to buffer in an iterator over\nthis dataset.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Prelinearize", + "summary": "An op which linearizes one Tensor value to an opaque variant tensor.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the tensor." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the tensor.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "layout", + "type": "int64[]", + "description": "A vector holding the requested layout in minor-to-major sequence. If a layout\nattribute is passed but its values are all -1 the layout will be computed by\nthe infeed operation.", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor that will be linearized.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ] + }, + { + "name": "PrelinearizeTuple", + "summary": "An op which linearizes multiple Tensor values to an opaque variant tensor.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "The element types of each element in `inputs`.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shapes of each tensor in `inputs`." + }, + { + "name": "layouts", + "type": "int64[]", + "description": "A vector holding the requested layout in minor-to-major sequence for all the\ntuple shapes in the order the shapes appear in the \"shapes\" input. The layout\nelements for a sub-shape can be set to -1 in which case the corresponding layout\nwill be computed by the infeed operation.", + "default": [] + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of tensors that will be provided using the infeed mechanism.", + "typeListAttr": "dtypes" + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ] + }, + { + "name": "PreventGradient", + "summary": "An identity op that triggers an error if a gradient is requested.", + "description": "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, the TensorFlow gradient system\nwill return an error when trying to lookup the gradient of this op,\nbecause no gradient must ever be registered for this function. This\nop exists to prevent subtle bugs from silently returning unimplemented\ngradients in some corner cases.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "message", + "type": "string", + "description": "Will be printed in the error when anyone tries to differentiate\nthis operation.", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "description": "any tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "the same input tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "Print", + "summary": "Prints a list of tensors.", + "description": "Passes `input` through to `output` and prints `data` when evaluating.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "U", + "type": "type[]", + "minimum": 0 + }, + { + "name": "message", + "type": "string", + "description": "A string, prefix of the error message.", + "default": "" + }, + { + "name": "first_n", + "type": "int64", + "description": "Only log `first_n` number of times. -1 disables logging.", + "default": -1 + }, + { + "name": "summarize", + "type": "int64", + "description": "Only print this many entries of each tensor.", + "default": 3 + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor passed to `output`", + "typeAttr": "T" + }, + { + "name": "data", + "description": "A list of tensors to print out when op is evaluated.", + "typeListAttr": "U" + } + ], + "outputs": [ + { + "name": "output", + "description": "The unmodified `input` tensor", + "typeAttr": "T" + } + ] + }, + { + "name": "PrintV2", + "summary": "Prints a string scalar.", + "description": "Prints a string scalar to the desired output_stream.", + "attributes": [ + { + "name": "output_stream", + "type": "string", + "description": "A string specifying the output stream or logging level to print to.", + "default": "stderr" + }, + { + "name": "end", + "type": "string", + "default": "\n" + } + ], + "inputs": [ + { + "name": "input", + "description": "The string scalar to print.", + "type": 7 + } + ] + }, + { + "name": "PriorityQueue", + "summary": "A queue that produces elements sorted by the first component value.", + "description": "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 0, + "default": [] + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0 + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "PriorityQueueV2", + "summary": "A queue that produces elements sorted by the first component value.", + "description": "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 0, + "default": [] + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0 + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 20 + } + ] + }, + { + "name": "PrivateThreadPoolDataset", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_threads", + "description": "Identifies the number of threads to use for the private threadpool.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Prod", + "summary": "Computes the product of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "PyFunc", + "summary": "Invokes a python function to compute func(input)->output.", + "description": "This operation is considered stateful. For a stateless version, see\nPyFuncStateless.", + "attributes": [ + { + "name": "token", + "type": "string", + "description": "A token representing a registered python function in this address space." + }, + { + "name": "Tin", + "type": "type[]", + "description": "Data types of the inputs to the op.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "Data types of the outputs from the op.\nThe length of the list specifies the number of outputs.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "input", + "description": "List of Tensors that will provide input to the Op.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "The outputs from the Op.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "PyFuncStateless", + "summary": "A stateless version of PyFunc.", + "attributes": [ + { + "name": "token", + "type": "string" + }, + { + "name": "Tin", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "Qr", + "summary": "Computes the QR decompositions of one or more matrices.", + "description": "Computes the QR decomposition of each inner matrix in `tensor` such that\n`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`\n\nCurrently, the gradient for the QR decomposition is well-defined only when\nthe first `P` columns of the inner matrix are linearly independent, where\n`P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.\n\n```python\n# a is a tensor.\n# q is a tensor of orthonormal matrices.\n# r is a tensor of upper triangular matrices.\nq, r = qr(a)\nq_full, r_full = qr(a, full_matrices=True)\n```", + "attributes": [ + { + "name": "full_matrices", + "type": "boolean", + "description": "If true, compute full-sized `q` and `r`. If false\n(the default), compute only the leading `P` columns of `q`.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "q", + "description": "Orthonormal basis for range of `a`. If `full_matrices` is `False` then\nshape is `[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`.", + "typeAttr": "T" + }, + { + "name": "r", + "description": "Triangular factor. If `full_matrices` is `False` then shape is\n`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeAndDequantize", + "summary": "Use QuantizeAndDequantizeV2 instead.", + "attributes": [ + { + "name": "signed_input", + "type": "boolean", + "default": true + }, + { + "name": "num_bits", + "type": "int64", + "default": 8 + }, + { + "name": "range_given", + "type": "boolean", + "default": false + }, + { + "name": "input_min", + "type": "float32", + "default": 0.0 + }, + { + "name": "input_max", + "type": "float32", + "default": 0.0 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeAndDequantizeV2", + "summary": "Quantizes then dequantizes a tensor.", + "description": "This op simulates the precision loss from the quantized forward pass by:\n\n1. Quantizing the tensor to fixed point numbers, which should match the target\n quantization method when it is used in inference.\n2. Dequantizing it back to floating point numbers for the following ops, most\n likely matmul.\n\nThere are different ways to quantize. This version uses only scaling, so 0.0\nmaps to 0.\n\nFrom the specified 'num_bits' in the quantized output type, it determines\nminimum and maximum representable quantized values.\n\ne.g.\n\n* [-128, 127] for signed, num_bits = 8, or\n* [0, 255] for unsigned, num_bits = 8.\n\nIf range_given == False, the initial input_min, input_max will be determined\nautomatically as the minimum and maximum values in the input tensor, otherwise\nthe specified values of input_min, input_max are used.\n\nNote: If the input_min, input_max are specified, they do not need to equal the\nactual minimum and maximum values in the tensor. e.g. in some cases it may be\nbeneficial to specify these values such that the low probability extremes of the\ninput distribution are clipped.\n\nThis op determines the maximum scale_factor that would map the initial\n[input_min, input_max] range to a range that lies within the representable\nquantized range.\n\nIt determines the scale from one of input_min and input_max, then updates the\nother one to maximize the representable range.\n\ne.g.\n\n* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\n 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it\n would update input_max to be 127 / 12.8 = 9.921875\n* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\n 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it\n would update input_min to be 128.0 / 12.7 = -10.07874\n* if the output is unsigned, input_min is forced to be 0, and only the\n specified input_max is used.\n\nAfter determining the scale_factor and updating the input range, it applies the\nfollowing to each value in the 'input' tensor.\n\noutput = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.\n\nThe above round function rounds the value based on the given round_mode.\n", + "attributes": [ + { + "name": "signed_input", + "type": "boolean", + "description": "Whether the quantization is signed or unsigned. (actually this parameter should\nhave been called `signed_output`)", + "default": true + }, + { + "name": "num_bits", + "type": "int64", + "description": "The bitwidth of the quantization.", + "default": 8 + }, + { + "name": "range_given", + "type": "boolean", + "description": "Whether the range is given or should be determined from the `input` tensor.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "round_mode", + "type": "string", + "description": "The 'round_mode' attribute controls which rounding tie-breaking algorithm is\nused when rounding float values to their quantized equivalents. The following\nrounding modes are currently supported:\n\n* HALF_TO_EVEN: this is the default round_mode.\n* HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5\n rounds up to -7.\n Must be one of the following: `HALF_TO_EVEN`, `HALF_UP`.", + "default": "HALF_TO_EVEN" + }, + { + "name": "narrow_range", + "type": "boolean", + "description": "If True, then the absolute value of the quantized minimum value is the same as\nthe quantized maximum value, instead of 1 greater.\ni.e. for 8 bit quantization, the minimum value is -127 instead of -128.", + "default": false + }, + { + "name": "axis", + "type": "int64", + "description": "If specified, this axis is treated as a channel or slice axis, and a separate\nquantization range is used for each channel or slice along this axis.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "Tensor to quantize and then dequantize.", + "typeAttr": "T" + }, + { + "name": "input_min", + "description": "If `range_given == True`, this specifies the minimum input value that needs to\nbe represented, otherwise it is determined from the min value of the `input`\ntensor.", + "typeAttr": "T" + }, + { + "name": "input_max", + "description": "If `range_given == True`, this specifies the maximum input value that needs to\nbe represented, otherwise it is determined from the max value of the `input`\ntensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeAndDequantizeV3", + "summary": "Quantizes then dequantizes a tensor.", + "description": "This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a\ntensor, so its value can change during training.", + "attributes": [ + { + "name": "signed_input", + "type": "boolean", + "default": true + }, + { + "name": "range_given", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + }, + { + "name": "axis", + "type": "int64", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_min", + "typeAttr": "T" + }, + { + "name": "input_max", + "typeAttr": "T" + }, + { + "name": "num_bits", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeAndDequantizeV4", + "summary": "Quantizes then dequantizes a tensor.", + "description": "This is almost identical to QuantizeAndDequantizeV2, except that it returns a\ngradient of 1 for inputs that are within the quantization range, or 0 otherwise.", + "attributes": [ + { + "name": "signed_input", + "type": "boolean", + "description": "Whether the quantization is signed or unsigned. (actually this parameter should\nhave been called `signed_output`)", + "default": true + }, + { + "name": "num_bits", + "type": "int64", + "description": "The bitwidth of the quantization.", + "default": 8 + }, + { + "name": "range_given", + "type": "boolean", + "description": "Whether the range is given or should be determined from the `input` tensor.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "round_mode", + "type": "string", + "description": "The 'round_mode' attribute controls which rounding tie-breaking algorithm is\nused when rounding float values to their quantized equivalents. The following\nrounding modes are currently supported:\n\n* HALF_TO_EVEN: this is the default round_mode.\n* HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5\n rounds up to -7.\n Must be one of the following: `HALF_TO_EVEN`, `HALF_UP`.", + "default": "HALF_TO_EVEN" + }, + { + "name": "narrow_range", + "type": "boolean", + "description": "If True, then the absolute value of the quantized minimum value is the same as\nthe quantized maximum value, instead of 1 greater.\ni.e. for 8 bit quantization, the minimum value is -127 instead of -128.", + "default": false + }, + { + "name": "axis", + "type": "int64", + "description": "If specified, this axis is treated as a channel or slice axis, and a separate\nquantization range is used for each channel or slice along this axis.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "Tensor to quantize and then dequantize.", + "typeAttr": "T" + }, + { + "name": "input_min", + "description": "If `range_given == True`, this specifies the minimum input value that needs to\nbe represented, otherwise it is determined from the min value of the `input`\ntensor.", + "typeAttr": "T" + }, + { + "name": "input_max", + "description": "If `range_given == True`, this specifies the maximum input value that needs to\nbe represented, otherwise it is determined from the max value of the `input`\ntensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeAndDequantizeV4Grad", + "summary": "Returns the gradient of `QuantizeAndDequantizeV4`.", + "description": "Returns a gradient of 1 for inputs that are within the quantization range,\nor 0 otherwise.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "axis", + "type": "int64", + "default": -1 + } + ], + "inputs": [ + { + "name": "gradients", + "typeAttr": "T" + }, + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_min", + "typeAttr": "T" + }, + { + "name": "input_max", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_min_backprop", + "typeAttr": "T" + }, + { + "name": "input_max_backprop", + "typeAttr": "T" + } + ] + }, + { + "name": "QuantizeDownAndShrinkRange", + "summary": "Convert the quantized 'input' tensor into a lower-precision 'output', using the", + "description": "actual distribution of the values to maximize the usage of the lower bit depth\nand adjusting the output min and max ranges accordingly.\n\n[input_min, input_max] are scalar floats that specify the range for the float\ninterpretation of the 'input' data. For example, if input_min is -1.0f and\ninput_max is 1.0f, and we are dealing with quint16 quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.\n\nThis operator tries to squeeze as much precision as possible into an output with\na lower bit depth by calculating the actual min and max values found in the\ndata. For example, maybe that quint16 input has no values lower than 16,384 and\nnone higher than 49,152. That means only half the range is actually needed, all\nthe float interpretations are between -0.5f and 0.5f, so if we want to compress\nthe data into a quint8 output, we can use that range rather than the theoretical\n-1.0f to 1.0f that is suggested by the input min and max.\n\nIn practice, this is most useful for taking output from operations like\nQuantizedMatMul that can produce higher bit-depth outputs than their inputs and\nmay have large potential output ranges, but in practice have a distribution of\ninput values that only uses a small fraction of the possible range. By feeding\nthat output into this operator, we can reduce it from 32 bits down to 8 with\nminimal loss of accuracy.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Should be a lower bit depth than Tinput. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "input_min", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "input_max", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "output_min", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "output_max", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizeV2", + "summary": "Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.", + "description": "[min_range, max_range] are scalar floats that specify the range for\nthe 'input' data. The 'mode' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents. The\n'round_mode' attribute controls which rounding tie-breaking algorithm is used\nwhen rounding float values to their quantized equivalents.\n\nIn 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\n\n```\nout[i] = (in[i] - min_range) * range(T) / (max_range - min_range)\nif T == qint8: out[i] -= (range(T) + 1) / 2.0\n```\n\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nAssume the input is type float and has a possible range of [0.0, 6.0] and the\noutput type is quint8 ([0, 255]). The min_range and max_range values should be\nspecified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each\nvalue of the input by 255/6 and cast to quint8.\n\nIf the output type was qint8 ([-128, 127]), the operation will additionally\nsubtract each value by 128 prior to casting, so that the range of values aligns\nwith the range of qint8.\n\nIf the mode is 'MIN_FIRST', then this approach is used:\n\n```\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = num_discrete_values / range\nquantized = round(input * range_scale) - round(range_min * range_scale) +\n numeric_limits::min()\nquantized = max(quantized, numeric_limits::min())\nquantized = min(quantized, numeric_limits::max())\n```\n\nThe biggest difference between this and MIN_COMBINED is that the minimum range\nis rounded first, before it's subtracted from the rounded value. With\nMIN_COMBINED, a small bias is introduced where repeated iterations of quantizing\nand dequantizing will introduce a larger and larger error.\n\n*SCALED mode Example*\n\n`SCALED` mode matches the quantization approach used in\n`QuantizeAndDequantize{V2|V3}`.\n\nIf the mode is `SCALED`, the quantization is performed by multiplying each\ninput value by a scaling_factor.\nThe scaling_factor is determined from `min_range` and `max_range` to be as large\nas possible such that the range from `min_range` to `max_range` is representable\nwithin values of type T.\n\n```c++\n\n const int min_T = std::numeric_limits::min();\n const int max_T = std::numeric_limits::max();\n const float max_float = std::numeric_limits::max();\n\n const float scale_factor_from_min_side =\n (min_T * min_range > 0) ? min_T / min_range : max_float;\n const float scale_factor_from_max_side =\n (max_T * max_range > 0) ? max_T / max_range : max_float;\n\n const float scale_factor = std::min(scale_factor_from_min_side,\n scale_factor_from_max_side);\n```\n\nWe next use the scale_factor to adjust min_range and max_range as follows:\n\n```c++\n min_range = min_T / scale_factor;\n max_range = max_T / scale_factor;\n```\n\n\ne.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would\ncompare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8\nIn this case, min_range would remain -10, but max_range would be adjusted to\n127 / 12.8 = 9.921875\n\nSo we will quantize input values in the range (-10, 9.921875) to (-128, 127).\n\nThe input tensor can now be quantized by clipping values to the range\n`min_range` to `max_range`, then multiplying by scale_factor as follows:\n\n```c++\nresult = round(min(max_range, max(min_range, input)) * scale_factor)\n```\n\nThe adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of\nthis operation. These outputs should be used as the range for any further\ncalculations.\n\n\n*narrow_range (bool) attribute*\n\nIf true, we do not use the minimum quantized value.\ni.e. for int8 the quantized output, it would be restricted to the range\n-127..127 instead of the full -128..127 range.\nThis is provided for compatibility with certain inference backends.\n(Only applies to SCALED mode)\n\n\n*axis (int) attribute*\n\nAn optional `axis` attribute can specify a dimension index of the input tensor,\nsuch that quantization ranges will be calculated and applied separately for each\nslice of the tensor along that dimension. This is useful for per-channel\nquantization.\n\nIf axis is specified, min_range and max_range\n\nif `axis`=None, per-tensor quantization is performed as normal.\n\n\n*ensure_minimum_range (float) attribute*\n\nEnsures the minimum quantization range is at least this value.\nThe legacy default value for this is 0.01, but it is strongly suggested to\nset it to 0 for new uses.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "mode", + "type": "string", + "description": "Must be one of the following: `MIN_COMBINED`, `MIN_FIRST`, `SCALED`.", + "default": "MIN_COMBINED" + }, + { + "name": "round_mode", + "type": "string", + "description": "Must be one of the following: `HALF_AWAY_FROM_ZERO`, `HALF_TO_EVEN`.", + "default": "HALF_AWAY_FROM_ZERO" + }, + { + "name": "narrow_range", + "type": "boolean", + "default": false + }, + { + "name": "axis", + "type": "int64", + "default": -1 + }, + { + "name": "ensure_minimum_range", + "type": "float32", + "default": 0.009999999776482582 + } + ], + "inputs": [ + { + "name": "input", + "type": 1 + }, + { + "name": "min_range", + "description": "The minimum value of the quantization range. This value may be adjusted by the\nop depending on other parameters. The adjusted value is written to `output_min`.\nIf the `axis` attribute is specified, this must be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "type": 1 + }, + { + "name": "max_range", + "description": "The maximum value of the quantization range. This value may be adjusted by the\nop depending on other parameters. The adjusted value is written to `output_max`.\nIf the `axis` attribute is specified, this must be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The quantized data produced from the float input.", + "typeAttr": "T" + }, + { + "name": "output_min", + "description": "The final quantization range minimum, used to clip input values before scaling\nand rounding them to quantized values.\nIf the `axis` attribute is specified, this will be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "type": 1 + }, + { + "name": "output_max", + "description": "The final quantization range maximum, used to clip input values before scaling\nand rounding them to quantized values.\nIf the `axis` attribute is specified, this will be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "type": 1 + } + ] + }, + { + "name": "QuantizedAdd", + "summary": "Returns x + y element-wise, working on quantized buffers.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T1" + }, + { + "name": "y", + "typeAttr": "T2" + }, + { + "name": "min_x", + "description": "The float value that the lowest quantized `x` value represents.", + "type": 1 + }, + { + "name": "max_x", + "description": "The float value that the highest quantized `x` value represents.", + "type": 1 + }, + { + "name": "min_y", + "description": "The float value that the lowest quantized `y` value represents.", + "type": 1 + }, + { + "name": "max_y", + "description": "The float value that the highest quantized `y` value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "Toutput" + }, + { + "name": "min_z", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_z", + "description": "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "type": 1 + } + ] + }, + { + "name": "QuantizedAvgPool", + "summary": "Produces the average pool of the input tensor for quantized types.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "min_input", + "description": "The float value that the lowest quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the highest quantized input value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "min_output", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedBatchNormWithGlobalNormalization", + "summary": "Quantized Batch normalization.", + "description": "This op is deprecated and will be removed in the future. Prefer\n`tf.nn.batch_normalization`.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "variance_epsilon", + "type": "float32", + "description": "A small float number to avoid dividing by 0." + }, + { + "name": "scale_after_normalization", + "type": "boolean", + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." + } + ], + "inputs": [ + { + "name": "t", + "description": "A 4D input Tensor.", + "typeAttr": "Tinput" + }, + { + "name": "t_min", + "description": "The value represented by the lowest quantized input.", + "type": 1 + }, + { + "name": "t_max", + "description": "The value represented by the highest quantized input.", + "type": 1 + }, + { + "name": "m", + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "Tinput" + }, + { + "name": "m_min", + "description": "The value represented by the lowest quantized mean.", + "type": 1 + }, + { + "name": "m_max", + "description": "The value represented by the highest quantized mean.", + "type": 1 + }, + { + "name": "v", + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "typeAttr": "Tinput" + }, + { + "name": "v_min", + "description": "The value represented by the lowest quantized variance.", + "type": 1 + }, + { + "name": "v_max", + "description": "The value represented by the highest quantized variance.", + "type": 1 + }, + { + "name": "beta", + "description": "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor.", + "typeAttr": "Tinput" + }, + { + "name": "beta_min", + "description": "The value represented by the lowest quantized offset.", + "type": 1 + }, + { + "name": "beta_max", + "description": "The value represented by the highest quantized offset.", + "type": 1 + }, + { + "name": "gamma", + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor.", + "typeAttr": "Tinput" + }, + { + "name": "gamma_min", + "description": "The value represented by the lowest quantized gamma.", + "type": 1 + }, + { + "name": "gamma_max", + "description": "The value represented by the highest quantized gamma.", + "type": 1 + } + ], + "outputs": [ + { + "name": "result", + "typeAttr": "out_type" + }, + { + "name": "result_min", + "type": 1 + }, + { + "name": "result_max", + "type": 1 + } + ] + }, + { + "name": "QuantizedBiasAdd", + "summary": "Adds Tensor 'bias' to Tensor 'input' for Quantized types.", + "description": "Broadcasts the values of bias on dimensions 0..N-2 of 'input'.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T1" + }, + { + "name": "bias", + "description": "A 1D bias Tensor with size matching the last dimension of 'input'.", + "typeAttr": "T2" + }, + { + "name": "min_input", + "description": "The float value that the lowest quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the highest quantized input value represents.", + "type": 1 + }, + { + "name": "min_bias", + "description": "The float value that the lowest quantized bias value represents.", + "type": 1 + }, + { + "name": "max_bias", + "description": "The float value that the highest quantized bias value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_out", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_out", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedConcat", + "summary": "Concatenates quantized tensors along one dimension.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "concat_dim", + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values)).", + "type": 3 + }, + { + "name": "values", + "description": "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "name": "input_mins", + "description": "The minimum scalar values for each of the input tensors.", + "numberAttr": "N", + "type": 1 + }, + { + "name": "input_maxes", + "description": "The maximum scalar values for each of the input tensors.", + "numberAttr": "N", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "typeAttr": "T" + }, + { + "name": "output_min", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "output_max", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2D", + "summary": "Computes a 2D convolution given quantized 4D input and filter tensors.", + "description": "The inputs are quantized tensors where the lowest value represents the real\nnumber of the associated minimum, and the highest represents the maximum.\nThis means that you can only interpret the quantized output in the same way, by\ntaking the returned minimum and maximum values into account.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\ntensor." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "filter's input_depth dimension must match input's depth dimensions.", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "description": "The float value that the lowest quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the highest quantized input value represents.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The float value that the lowest quantized filter value represents.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The float value that the highest quantized filter value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DAndRelu", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DAndReluAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 11 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DPerChannel", + "summary": "Computes QuantizedConv2D per channel.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "The quantized type of filter tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The quantized type of output tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "list of stride values." + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "list of dilation values.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "The original filter tensor.", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "description": "The minimum value of the input tensor", + "type": 1 + }, + { + "name": "max_input", + "description": "The maximum value of the input tensor.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The minimum value of the filter tensor.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The maximum value of the filter tensor.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output tensor.", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The minimum value of the final output tensor.", + "type": 1 + }, + { + "name": "max_output", + "description": "The maximum value of the final output tensor.", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBias", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasAndRelu", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasAndReluAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 11 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Tsummand", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + }, + { + "name": "summand", + "typeAttr": "Tsummand" + }, + { + "name": "min_summand", + "type": 1 + }, + { + "name": "max_summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasSumAndRelu", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedConv2DWithBiasSumAndReluAndRequantize", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Tsummand", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + }, + { + "name": "summand", + "typeAttr": "Tsummand" + }, + { + "name": "min_summand", + "type": 1 + }, + { + "name": "max_summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + }, + { + "name": "QuantizedDepthwiseConv2D", + "summary": "Computes quantized depthwise Conv2D.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "List of stride values." + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "List of dilation values.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "The original filter tensor.", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The float value that the minimum quantized filter value represents.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The float value that the maximum quantized filter value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output tensor.", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedDepthwiseConv2DWithBias", + "summary": "Computes quantized depthwise Conv2D with Bias.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "List of stride values." + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "List of dilation values.", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "The original filter tensor.", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "description": "The original bias tensor.", + "type": 1 + }, + { + "name": "min_input", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The float value that the minimum quantized filter value represents.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The float value that the maximum quantized filter value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output tensor.", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedDepthwiseConv2DWithBiasAndRelu", + "summary": "Computes quantized depthwise Conv2D with Bias and Relu.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "List of stride values." + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "List of dilation values.", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "The original filter tensor.", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "description": "The original bias tensor.", + "type": 1 + }, + { + "name": "min_input", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The float value that the minimum quantized filter value represents.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The float value that the maximum quantized filter value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output tensor.", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", + "summary": "Computes quantized depthwise Conv2D with Bias, Relu and Requantize.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tfilter", + "type": "type", + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "The type of the bias. Must be one of the following: `float32`, `qint32`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "strides", + "type": "int64[]", + "description": "List of stride values." + }, + { + "name": "padding", + "type": "string", + "description": "Must be one of the following: `SAME`, `VALID`." + }, + { + "name": "dilations", + "type": "int64[]", + "description": "List of dilation values.", + "default": [ + 1, + 1, + 1, + 1 + ] + }, + { + "name": "padding_list", + "type": "int64[]", + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "description": "The original filter tensor.", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "description": "The original bias tensor.", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + }, + { + "name": "min_filter", + "description": "The float value that the minimum quantized filter value represents.", + "type": 1 + }, + { + "name": "max_filter", + "description": "The float value that the maximum quantized filter value represents.", + "type": 1 + }, + { + "name": "min_freezed_output", + "description": "The minimum float value of the output tensor.", + "type": 1 + }, + { + "name": "max_freezed_output", + "description": "The maximum float value of the output tensor.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output tensor.", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedInstanceNorm", + "summary": "Quantized Instance normalization.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "output_range_given", + "type": "boolean", + "description": "If True, `given_y_min` and `given_y_min`\nand `given_y_max` are used as the output range. Otherwise,\nthe implementation computes the output range.", + "default": false + }, + { + "name": "given_y_min", + "type": "float32", + "description": "Output in `y_min` if `output_range_given` is True.", + "default": 0.0 + }, + { + "name": "given_y_max", + "type": "float32", + "description": "Output in `y_max` if `output_range_given` is True.", + "default": 0.0 + }, + { + "name": "variance_epsilon", + "type": "float32", + "description": "A small float number to avoid dividing by 0.", + "default": 9.999999747378752e-06 + }, + { + "name": "min_separation", + "type": "float32", + "description": "Minimum value of `y_max - y_min`", + "default": 0.0010000000474974513 + } + ], + "inputs": [ + { + "name": "x", + "description": "A 4D input Tensor.", + "typeAttr": "T" + }, + { + "name": "x_min", + "description": "The value represented by the lowest quantized input.", + "type": 1 + }, + { + "name": "x_max", + "description": "The value represented by the highest quantized input.", + "type": 1 + } + ], + "outputs": [ + { + "name": "y", + "description": "A 4D Tensor.", + "typeAttr": "T" + }, + { + "name": "y_min", + "description": "The value represented by the lowest quantized output.", + "type": 1 + }, + { + "name": "y_max", + "description": "The value represented by the highest quantized output.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMatMul", + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b`.", + "description": "The inputs must be two-dimensional matrices and the inner dimension of\n`a` (after being transposed if `transpose_a` is non-zero) must match the\nouter dimension of `b` (after being transposed if `transposed_b` is\nnon-zero).", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "If true, `a` is transposed before multiplication.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "If true, `b` is transposed before multiplication.", + "default": false + }, + { + "name": "Tactivation", + "type": "type", + "description": "The type of output produced by activation function\nfollowing this operation. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + } + ], + "inputs": [ + { + "name": "a", + "description": "Must be a two-dimensional tensor.", + "typeAttr": "T1" + }, + { + "name": "b", + "description": "Must be a two-dimensional tensor.", + "typeAttr": "T2" + }, + { + "name": "min_a", + "description": "The float value that the lowest quantized `a` value represents.", + "type": 1 + }, + { + "name": "max_a", + "description": "The float value that the highest quantized `a` value represents.", + "type": 1 + }, + { + "name": "min_b", + "description": "The float value that the lowest quantized `b` value represents.", + "type": 1 + }, + { + "name": "max_b", + "description": "The float value that the highest quantized `b` value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_out", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMatMulWithBias", + "summary": "Performs a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd.", + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "If true, `a` is transposed before multiplication.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "If true, `b` is transposed before multiplication.", + "default": false + }, + { + "name": "input_quant_mode", + "type": "string", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "default": "MIN_FIRST" + } + ], + "inputs": [ + { + "name": "a", + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "typeAttr": "T1" + }, + { + "name": "b", + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "typeAttr": "T2" + }, + { + "name": "bias", + "description": "A 1D bias tensor with size matching inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "description": "The float value that the lowest quantized `a` value represents.", + "type": 1 + }, + { + "name": "max_a", + "description": "The float value that the highest quantized `a` value represents.", + "type": 1 + }, + { + "name": "min_b", + "description": "The float value that the lowest quantized `b` value represents.", + "type": 1 + }, + { + "name": "max_b", + "description": "The float value that the highest quantized `b` value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_out", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMatMulWithBiasAndDequantize", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `float32`." + }, + { + "name": "transpose_a", + "type": "boolean", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "default": false + }, + { + "name": "input_quant_mode", + "type": "string", + "description": "Must be one of the following: `MIN_FIRST`, `SCALED`.", + "default": "MIN_FIRST" + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T1" + }, + { + "name": "b", + "typeAttr": "T2" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "type": 1 + }, + { + "name": "max_a", + "type": 1 + }, + { + "name": "min_b", + "type": 1 + }, + { + "name": "max_b", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + } + ] + }, + { + "name": "QuantizedMatMulWithBiasAndRelu", + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd and relu fusion.", + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`. Then do\nrelu activation to get non-negative result.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "If true, `a` is transposed before multiplication.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "If true, `b` is transposed before multiplication.", + "default": false + }, + { + "name": "input_quant_mode", + "type": "string", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "default": "MIN_FIRST" + } + ], + "inputs": [ + { + "name": "a", + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "typeAttr": "T1" + }, + { + "name": "b", + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "typeAttr": "T2" + }, + { + "name": "bias", + "description": "A 1D bias tensor with size matching with inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "type": 1 + }, + { + "name": "min_a", + "description": "The float value that the lowest quantized `a` value represents.", + "type": 1 + }, + { + "name": "max_a", + "description": "The float value that the highest quantized `a` value represents.", + "type": 1 + }, + { + "name": "min_b", + "description": "The float value that the lowest quantized `b` value represents.", + "type": 1 + }, + { + "name": "max_b", + "description": "The float value that the highest quantized `b` value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_out", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMatMulWithBiasAndReluAndRequantize", + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd and relu and requantize fusion.", + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`. Then do\nrelu activation to get non-negative result. Then do requantize operation to get\nfinal uint8 result.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "If true, `a` is transposed before multiplication.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "If true, `b` is transposed before multiplication.", + "default": false + }, + { + "name": "input_quant_mode", + "type": "string", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "default": "MIN_FIRST" + } + ], + "inputs": [ + { + "name": "a", + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "typeAttr": "T1" + }, + { + "name": "b", + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "typeAttr": "T2" + }, + { + "name": "bias", + "description": "A 1D bias tensor with size matching with inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "description": "The float value that the lowest quantized `a` value represents.", + "type": 1 + }, + { + "name": "max_a", + "description": "The float value that the highest quantized `a` value represents.", + "type": 1 + }, + { + "name": "min_b", + "description": "The float value that the lowest quantized `b` value represents.", + "type": 1 + }, + { + "name": "max_b", + "description": "The float value that the highest quantized `b` value represents.", + "type": 1 + }, + { + "name": "min_freezed_output", + "description": "The float value that the highest quantized output value after requantize.", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_out", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMatMulWithBiasAndRequantize", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Tbias", + "type": "type", + "description": "Must be one of the following: `float32`, `qint32`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + }, + { + "name": "transpose_a", + "type": "boolean", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "default": false + }, + { + "name": "input_quant_mode", + "type": "string", + "description": "Must be one of the following: `MIN_FIRST`, `SCALED`.", + "default": "MIN_FIRST" + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T1" + }, + { + "name": "b", + "typeAttr": "T2" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "type": 1 + }, + { + "name": "max_a", + "type": 1 + }, + { + "name": "min_b", + "type": 1 + }, + { + "name": "max_b", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "type": 1 + }, + { + "name": "max_out", + "type": 1 + } + ] + }, + { + "name": "QuantizedMaxPool", + "summary": "Produces the max pool of the input tensor for quantized types.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "ksize", + "type": "int64[]", + "description": "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." + }, + { + "name": "strides", + "type": "int64[]", + "description": "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." + }, + { + "name": "padding", + "type": "string", + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.", + "typeAttr": "T" + }, + { + "name": "min_input", + "description": "The float value that the lowest quantized input value represents.", + "type": 1 + }, + { + "name": "max_input", + "description": "The float value that the highest quantized input value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "min_output", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_output", + "description": "The float value that the highest quantized output value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedMul", + "summary": "Returns x * y element-wise, working on quantized buffers.", + "attributes": [ + { + "name": "T1", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "T2", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "Toutput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T1" + }, + { + "name": "y", + "typeAttr": "T2" + }, + { + "name": "min_x", + "description": "The float value that the lowest quantized `x` value represents.", + "type": 1 + }, + { + "name": "max_x", + "description": "The float value that the highest quantized `x` value represents.", + "type": 1 + }, + { + "name": "min_y", + "description": "The float value that the lowest quantized `y` value represents.", + "type": 1 + }, + { + "name": "max_y", + "description": "The float value that the highest quantized `y` value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "Toutput" + }, + { + "name": "min_z", + "description": "The float value that the lowest quantized output value represents.", + "type": 1 + }, + { + "name": "max_z", + "description": "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "type": 1 + } + ] + }, + { + "name": "QuantizedRelu", + "summary": "Computes Quantized Rectified Linear: `max(features, 0)`", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "name": "min_features", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_features", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "activations", + "description": "Has the same output shape as \"features\".", + "typeAttr": "out_type" + }, + { + "name": "min_activations", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_activations", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedRelu6", + "summary": "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "name": "min_features", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_features", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "activations", + "description": "Has the same output shape as \"features\".", + "typeAttr": "out_type" + }, + { + "name": "min_activations", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_activations", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedReluX", + "summary": "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "name": "max_value", + "type": 1 + }, + { + "name": "min_features", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_features", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "activations", + "description": "Has the same output shape as \"features\".", + "typeAttr": "out_type" + }, + { + "name": "min_activations", + "description": "The float value that the lowest quantized value represents.", + "type": 1 + }, + { + "name": "max_activations", + "description": "The float value that the highest quantized value represents.", + "type": 1 + } + ] + }, + { + "name": "QuantizedReshape", + "summary": "Reshapes a quantized tensor as per the Reshape op.", + "description": "```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "Defines the shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "input_min", + "description": "The minimum value of the input.", + "type": 1 + }, + { + "name": "input_max", + "description": "The maximum value of the input.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_min", + "description": "This value is copied from input_min.", + "type": 1 + }, + { + "name": "output_max", + "description": "This value is copied from input_max.", + "type": 1 + } + ] + }, + { + "name": "QuantizedResizeBilinear", + "summary": "Resize quantized `images` to `size` using quantized bilinear interpolation.", + "description": "Input images and output images must be quantized types.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `quint8`, `qint32`, `float32`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "resized_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "typeAttr": "T" + }, + { + "name": "out_min", + "type": 1 + }, + { + "name": "out_max", + "type": 1 + } + ] + }, + { + "name": "QueueClose", + "summary": "Closes the given queue.", + "description": "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately.", + "attributes": [ + { + "name": "cancel_pending_enqueues", + "type": "boolean", + "description": "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled.", + "default": false + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "QueueCloseV2", + "summary": "Closes the given queue.", + "description": "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately.", + "attributes": [ + { + "name": "cancel_pending_enqueues", + "type": "boolean", + "description": "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled.", + "default": false + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + } + ] + }, + { + "name": "QueueDequeue", + "summary": "Dequeues a tuple of one or more tensors from the given queue.", + "description": "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueDequeueMany", + "summary": "Dequeues `n` tuples of one or more tensors from the given queue.", + "description": "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + }, + { + "name": "n", + "description": "The number of tuples to dequeue.", + "type": 3 + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueDequeueManyV2", + "summary": "Dequeues `n` tuples of one or more tensors from the given queue.", + "description": "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + }, + { + "name": "n", + "description": "The number of tuples to dequeue.", + "type": 3 + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueDequeueUpTo", + "summary": "Dequeues `n` tuples of one or more tensors from the given queue.", + "description": "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has k outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + }, + { + "name": "n", + "description": "The number of tuples to dequeue.", + "type": 3 + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueDequeueUpToV2", + "summary": "Dequeues `n` tuples of one or more tensors from the given queue.", + "description": "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size n in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + }, + { + "name": "n", + "description": "The number of tuples to dequeue.", + "type": 3 + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueDequeueV2", + "summary": "Dequeues a tuple of one or more tensors from the given queue.", + "description": "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a tuple.", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + } + ], + "outputs": [ + { + "name": "components", + "description": "One or more tensors that were dequeued as a tuple.", + "typeListAttr": "component_types" + } + ] + }, + { + "name": "QueueEnqueue", + "summary": "Enqueues a tuple of one or more tensors in the given queue.", + "description": "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "Tcomponents", + "type": "type[]", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + }, + { + "name": "components", + "description": "One or more tensors from which the enqueued tensors should be taken.", + "typeListAttr": "Tcomponents" + } + ] + }, + { + "name": "QueueEnqueueMany", + "summary": "Enqueues zero or more tuples of one or more tensors in the given queue.", + "description": "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "Tcomponents", + "type": "type[]", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + }, + { + "name": "components", + "description": "One or more tensors from which the enqueued tensors should\nbe taken.", + "typeListAttr": "Tcomponents" + } + ] + }, + { + "name": "QueueEnqueueManyV2", + "summary": "Enqueues zero or more tuples of one or more tensors in the given queue.", + "description": "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "Tcomponents", + "type": "type[]", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + }, + { + "name": "components", + "description": "One or more tensors from which the enqueued tensors should\nbe taken.", + "typeListAttr": "Tcomponents" + } + ] + }, + { + "name": "QueueEnqueueV2", + "summary": "Enqueues a tuple of one or more tensors in the given queue.", + "description": "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or 'timeout_ms' elapses, if specified).", + "attributes": [ + { + "name": "Tcomponents", + "type": "type[]", + "minimum": 1 + }, + { + "name": "timeout_ms", + "type": "int64", + "description": "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "default": -1 + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + }, + { + "name": "components", + "description": "One or more tensors from which the enqueued tensors should be taken.", + "typeListAttr": "Tcomponents" + } + ] + }, + { + "name": "QueueIsClosed", + "summary": "Returns true if queue is closed.", + "description": "This operation returns true if the queue is closed and false if the queue\nis open.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "is_closed", + "type": 10 + } + ] + }, + { + "name": "QueueIsClosedV2", + "summary": "Returns true if queue is closed.", + "description": "This operation returns true if the queue is closed and false if the queue\nis open.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + } + ], + "outputs": [ + { + "name": "is_closed", + "type": 10 + } + ] + }, + { + "name": "QueueSize", + "summary": "Computes the number of elements in the given queue.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "size", + "description": "The number of elements in the given queue.", + "type": 3 + } + ] + }, + { + "name": "QueueSizeV2", + "summary": "Computes the number of elements in the given queue.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a queue.", + "type": 20 + } + ], + "outputs": [ + { + "name": "size", + "description": "The number of elements in the given queue.", + "type": 3 + } + ] + }, + { + "name": "RFFT", + "summary": "Real-valued fast Fourier transform.", + "description": "Computes the 1-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most dimension of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the\n`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,\nfollowed by the `fft_length / 2` positive-frequency terms.\n\nAlong the axis `RFFT` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A float32 tensor.", + "typeAttr": "Treal" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [1]. The FFT length.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex64 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length / 2 + 1` unique\n frequency components of its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "RFFT2D", + "summary": "2D real-valued fast Fourier transform.", + "description": "Computes the 2-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 2 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT2D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A float32 tensor.", + "typeAttr": "Treal" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [2]. The FFT length for each dimension.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex64 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft2\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "RFFT3D", + "summary": "3D real-valued fast Fourier transform.", + "description": "Computes the 3-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 3 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT3D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A float32 tensor.", + "typeAttr": "Treal" + }, + { + "name": "fft_length", + "description": "An int32 tensor of shape [3]. The FFT length for each dimension.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex64 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the their 3D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn with 3 dimensions.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "RFFTND", + "summary": "ND fast real Fourier transform.", + "description": "Computes the n-dimensional real discrete Fourier transform over designated\ndimensions of `input`. The designated dimensions of `input` are assumed to be\nthe result of `RFFTND`. The length of the last axis transformed will be\nfft_length[-1]//2+1.\n\nIf fft_length[i]shape(input)[i], the input is padded with zeros. If fft_length\nis not given, the default shape(input) is used.\n\nAxes mean the dimensions to perform the transform on. Default is to perform on\nall axes.", + "attributes": [ + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "A complex tensor.", + "typeAttr": "Treal" + }, + { + "name": "fft_length", + "description": "An int32 tensor. The FFT length for each dimension.", + "type": 3 + }, + { + "name": "axes", + "description": "An int32 tensor with a same shape as fft_length. Axes to perform the transform.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A complex tensor of the same shape as `input`. The designated\ndimensions of `input` are replaced with their real Fourier transforms.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn.\n@end_compatibility", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "RGBToHSV", + "summary": "Converts one or more images from RGB to HSV.", + "description": "Outputs a tensor of the same shape as the `images` tensor, containing the HSV\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\n`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and\n`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0\ncorresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.\n\nUsage Example:\n\n>>> blue_image = tf.stack([\n... tf.zeros([5,5]),\n... tf.zeros([5,5]),\n... tf.ones([5,5])],\n... axis=-1)\n>>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)\n>>> blue_hsv_image[0,0].numpy()\narray([0.6666667, 1. , 1. ], dtype=float32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "images", + "description": "1-D or higher rank. RGB data to convert. Last dimension must be size 3.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "`images` converted to HSV.", + "typeAttr": "T" + } + ] + }, + { + "name": "RaggedBincount", + "summary": "Counts the number of occurrences of each value in an integer array.", + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "binary_output", + "type": "boolean", + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "default": false + } + ], + "inputs": [ + { + "name": "splits", + "description": "1D int64 `Tensor`.", + "type": 9 + }, + { + "name": "values", + "description": "2D int `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "size", + "description": "non-negative int scalar `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "weights", + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `input`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "typeAttr": "T" + } + ] + }, + { + "name": "RaggedCountSparseOutput", + "summary": "Performs sparse-output bin counting for a ragged tensor input.", + "description": " Counts the number of times each value occurs in the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "minlength", + "type": "int64", + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "default": -1 + }, + { + "name": "maxlength", + "type": "int64", + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "default": -1 + }, + { + "name": "binary_output", + "type": "boolean", + "description": "Whether to output the number of occurrences of each value or 1." + }, + { + "name": "output_type", + "type": "type", + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "splits", + "description": "Tensor containing the row splits of the ragged tensor to count.", + "type": 9 + }, + { + "name": "values", + "description": "Tensor containing values of the sparse tensor to count.", + "typeAttr": "T" + }, + { + "name": "weights", + "description": "A Tensor of the same shape as indices containing per-index weight values.\nMay also be the empty tensor if no weights are used.", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "Indices tensor for the resulting sparse tensor object.", + "type": 9 + }, + { + "name": "output_values", + "description": "Values tensor for the resulting sparse tensor object.", + "typeAttr": "output_type" + }, + { + "name": "output_dense_shape", + "description": "Shape tensor for the resulting sparse tensor object.\n END\n }\n attr {\n name: \"T\"\n description: <\n```\n\nThe input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.\nThe vector inputs must all have the same size. Scalar inputs are broadcast\nto match the size of the vector inputs.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "starts", + "description": "The starts of each range.", + "typeAttr": "T" + }, + { + "name": "limits", + "description": "The limits of each range.", + "typeAttr": "T" + }, + { + "name": "deltas", + "description": "The deltas of each range.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "rt_nested_splits", + "description": "The `row_splits` for the returned `RaggedTensor`.", + "typeAttr": "Tsplits" + }, + { + "name": "rt_dense_values", + "description": "The `flat_values` for the returned `RaggedTensor`.", + "typeAttr": "T" + } + ] + }, + { + "name": "RaggedTensorFromVariant", + "summary": "Decodes a `variant` Tensor into a `RaggedTensor`.", + "description": "Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input\ncould be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank\n`output_ragged_rank`. It could also have an arbitrary rank, in which case each\nelement is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank`\nand these are then stacked according to the input shape to output a single\n`RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in\nthe input Tensor is decoded by retrieving from the element a 1-D `variant`\nTensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and\nvalues of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is\ninferred as `output_ragged_rank` - `rank(encoded_ragged)`. See\n`RaggedTensorToVariant` for the corresponding encoding logic.\n", + "attributes": [ + { + "name": "input_ragged_rank", + "type": "int64", + "description": "The ragged rank of each encoded `RaggedTensor` component in the input. If set to\n-1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`", + "minimum": -1 + }, + { + "name": "output_ragged_rank", + "type": "int64", + "description": "The expected ragged rank of the output `RaggedTensor`. The following must hold:\n`output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.", + "minimum": 0 + }, + { + "name": "Tvalues", + "type": "type" + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "encoded_ragged", + "description": "A `variant` Tensor containing encoded `RaggedTensor`s.", + "type": 21 + } + ], + "outputs": [ + { + "name": "output_nested_splits", + "description": "A list of one or more Tensors representing the splits of the output\n`RaggedTensor`.", + "numberAttr": "output_ragged_rank", + "typeAttr": "Tsplits" + }, + { + "name": "output_dense_values", + "description": "A Tensor representing the values of the output `RaggedTensor`.", + "typeAttr": "Tvalues" + } + ] + }, + { + "name": "RaggedTensorToSparse", + "summary": "Converts a `RaggedTensor` into a `SparseTensor` with the same values.", + "description": "input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)\noutput=SparseTensor(indices=sparse_indices, values=sparse_values,\n dense_shape=sparse_dense_shape)", + "attributes": [ + { + "name": "RAGGED_RANK", + "type": "int64", + "description": "The ragged rank of the input RaggedTensor. `rt_nested_splits` should contain\nthis number of ragged-splits tensors. This value should equal\n`input.ragged_rank`.", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "rt_nested_splits", + "description": "The `row_splits` for the `RaggedTensor`.", + "numberAttr": "RAGGED_RANK", + "typeAttr": "Tsplits" + }, + { + "name": "rt_dense_values", + "description": "The `flat_values` for the `RaggedTensor`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "description": "The indices for the `SparseTensor`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "The values of the `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "sparse_dense_shape", + "description": "`sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.", + "type": 9 + } + ] + }, + { + "name": "RaggedTensorToTensor", + "summary": "Create a dense tensor from a ragged tensor, possibly altering its shape.", + "description": "The `ragged_to_dense` op creates a dense tensor from a list of row partition\ntensors, a value vector, and default values. If the shape is unspecified, the\nminimal shape required to contain all the elements in the ragged tensor (the\nnatural shape) will be used. If some dimensions are left unspecified, then the\nsize of the natural shape is used in that dimension.\n\nThe default_value will be broadcast to the output shape. After that, the values\nfrom the ragged tensor overwrite the default values. Note that the default_value\nmust have less dimensions than the value.\n\nThe row partition tensors are in the order of the dimensions.\nAt present, the types can be:\n* \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n* \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n* \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then it\n is preceded by \"FIRST_DIM_SIZE\".", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindex", + "type": "type", + "description": "Must be one of the following: `int64`, `int32`." + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int64`, `int32`." + }, + { + "name": "num_row_partition_tensors", + "type": "int64", + "minimum": 1 + }, + { + "name": "row_partition_types", + "type": "string[]", + "description": "The types of the row partition tensors. At present, these can be:\n* \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n* \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n* \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then it\n is preceeded by \"FIRST_DIM_SIZE\".\nThe tensors are in the order of the dimensions." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The desired shape of the output tensor. If left unspecified (empty),\nthe minimal shape required to contain all the elements in the ragged tensor\n(the natural shape) will be used. If some dimensions are left unspecified, then\nthe size of the natural shape is used in that dimension.\n\nNote that dense dimensions cannot be modified by the shape argument. Trying to\nchange the size of a dense dimension will cause the op to fail.\nExamples:\nnatural shape: [4, 5, 6]\nshape: -1\noutput shape: [4, 5, 6]\n\nnatural shape: [4, 5, 6]\nshape: [3, -1, 2]\noutput shape: [3, 5, 2]\n\nnatural shape: [4, 5, 6]\nshape: [3, 7, 2]\noutput shape: [3, 7, 2]\n", + "typeAttr": "Tshape" + }, + { + "name": "values", + "description": "A 1D tensor representing the values of the ragged tensor.", + "typeAttr": "T" + }, + { + "name": "default_value", + "description": "The default_value when the shape is larger than the ragged tensor. The\ndefault_value is broadcast until it is the shape of the output tensor, and\nthen overwritten by values in the ragged tensor. The default value must be\ncompatible with this broadcast operation, and must have fewer dimensions than\nthe value tensor.", + "typeAttr": "T" + }, + { + "name": "row_partition_tensors", + "numberAttr": "num_row_partition_tensors", + "typeAttr": "Tindex" + } + ], + "outputs": [ + { + "name": "result", + "description": "The resulting dense tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "RaggedTensorToVariant", + "summary": "Encodes a `RaggedTensor` into a `variant` Tensor.", + "description": "\nEncodes the given `RaggedTensor` and returns a `variant` Tensor. If\n`batched_input` is True, then input `RaggedTensor` is unbatched along the\nzero-th dimension, each component `RaggedTensor` is encoded into a scalar\n`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.\nIf `batched_input` is False, then the input `RaggedTensor` is encoded as is and\na scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first\ncreating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the\nsplits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor\nis wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the\ncorresponding decoding logic.\n", + "attributes": [ + { + "name": "RAGGED_RANK", + "type": "int64", + "minimum": 0 + }, + { + "name": "Tvalues", + "type": "type" + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "batched_input", + "type": "boolean", + "description": "A `bool` denoting whether the input is a batched `RaggedTensor`." + } + ], + "inputs": [ + { + "name": "rt_nested_splits", + "description": "A list of one or more Tensors representing the splits of the input\n`RaggedTensor`.", + "numberAttr": "RAGGED_RANK", + "typeAttr": "Tsplits" + }, + { + "name": "rt_dense_values", + "description": "A Tensor representing the values of the input `RaggedTensor`.", + "typeAttr": "Tvalues" + } + ], + "outputs": [ + { + "name": "encoded_ragged", + "description": "A `variant` Tensor that containing encoded `RaggedTensor`.", + "type": 21 + } + ] + }, + { + "name": "RaggedTensorToVariantGradient", + "summary": "Helper used to compute the gradient for `RaggedTensorToVariant`.", + "description": "Computes the gradient for the dense_values input to the RaggedTensorToVariant\nop, given the variant-encoded ragged gradients of the outputs, along with\nthe outer row-splits and the shape of the dense-values that were provided as\ninputs to the RaggedTensorToVariant op.", + "attributes": [ + { + "name": "Tvalues", + "type": "type" + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "encoded_ragged_grad", + "description": "A `variant` Tensor containing encoded `RaggedTensor` gradients.", + "type": 21 + }, + { + "name": "row_splits", + "description": "Outermost row-splits that were used as input to the RaggedTensorToVariant op.", + "typeAttr": "Tsplits" + }, + { + "name": "dense_values_shape", + "description": "Shape of the dense_values that was used as an input to the\nRaggedTensorToVariant op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "dense_values_grad", + "description": "Gradient for the dense_values of the RaggedTensorToVariant op.", + "typeAttr": "Tvalues" + } + ] + }, + { + "name": "RandomCrop", + "summary": "Randomly crop `image`.", + "description": "`size` is a 1-D int64 tensor with 2 elements representing the crop height and\nwidth. The values must be non negative.\n\nThis Op picks a random location in `image` and crops a `height` by `width`\nrectangle from that location. The random location is picked so the cropped\narea will fit inside the original image.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "image", + "description": "3-D of shape `[height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "1-D of length 2 containing: `crop_height`, `crop_width`..", + "type": 9 + } + ], + "outputs": [ + { + "name": "output", + "description": "3-D of shape `[crop_height, crop_width, channels].`", + "typeAttr": "T" + } + ] + }, + { + "name": "RandomDataset", + "summary": "Creates a Dataset that returns pseudorandom numbers.", + "description": "Creates a Dataset that returns a stream of uniformly distributed\npseudorandom 64-bit signed integers.\n\nIn the TensorFlow Python API, you can instantiate this dataset via the\nclass `tf.data.experimental.RandomDataset`.\n\nInstances of this dataset are also created as a result of the\n`hoist_random_uniform` static optimization. Whether this optimization is\nperformed is determined by the `experimental_optimization.hoist_random_uniform`\noption of `tf.data.Options`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "seed", + "description": "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "type": 9 + }, + { + "name": "seed2", + "description": "A second scalar seed to avoid seed collision.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "RandomDatasetV2", + "summary": "Creates a Dataset that returns pseudorandom numbers.", + "description": "Creates a Dataset that returns a stream of uniformly distributed\npseudorandom 64-bit signed integers. It accepts a boolean attribute that\ndetermines if the random number generators are re-applied at each epoch. The\ndefault value is True which means that the seeds are applied and the same\nsequence of random numbers are generated at each epoch. If set to False, the\nseeds are not re-applied and a different sequence of random numbers are\ngenerated at each epoch.\n\nIn the TensorFlow Python API, you can instantiate this dataset via the\nclass `tf.data.experimental.RandomDatasetV2`.", + "attributes": [ + { + "name": "rerandomize_each_iteration", + "type": "boolean", + "description": "A boolean attribute to rerandomize the sequence of random numbers generated\nat each epoch.", + "default": false + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "seed", + "description": "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "type": 9 + }, + { + "name": "seed2", + "description": "A second scalar seed to avoid seed collision.", + "type": 9 + }, + { + "name": "seed_generator", + "description": "A resource for the random number seed generator.", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "RandomGamma", + "summary": "Outputs random values from the Gamma distribution(s) described by alpha.", + "description": "This op uses the algorithm by Marsaglia et al. to acquire samples via\ntransformation-rejection from pairs of uniform and normal random variables.\nSee http://dl.acm.org/citation.cfm?id=358414", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha.", + "typeAttr": "S" + }, + { + "name": "alpha", + "description": "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.", + "typeAttr": "T" + } + ] + }, + { + "name": "RandomGammaGrad", + "summary": "Computes the derivative of a Gamma random sample w.r.t. `alpha`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "alpha", + "typeAttr": "T" + }, + { + "name": "sample", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RandomIndexShuffle", + "summary": "Outputs the position of `value` in a permutation of [0, ..., max_index].", + "description": "Output values are a bijection of the `index` for any combination and `seed` and `max_index`.\n\nIf multiple inputs are vectors (matrix in case of seed) then the size of the\nfirst dimension must match.\n\nThe outputs are deterministic.", + "attributes": [ + { + "name": "rounds", + "type": "int64", + "description": "The number of rounds to use the in block cipher.", + "default": 4 + }, + { + "name": "dtype", + "type": "type", + "description": "The dtype of the input and output. Must be one of the following: `int32`, `uint32`, `int64`, `uint64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "The type of `seed`. Must be one of the following: `int32`, `uint32`, `int64`, `uint64`." + } + ], + "inputs": [ + { + "name": "index", + "description": "A scalar tensor or a vector of dtype `dtype`. The index (or indices) to be shuffled. Must be within [0, max_index].", + "typeAttr": "dtype" + }, + { + "name": "seed", + "description": "A tensor of dtype `Tseed` and shape [3] or [n, 3]. The random seed.", + "typeAttr": "Tseed" + }, + { + "name": "max_index", + "description": "A scalar tensor or vector of dtype `dtype`. The upper bound(s) of the interval (inclusive).", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "A scalar tensor of dtype `dtype`, within [0, max_index]. The randomly shuffled index.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "RandomPoisson", + "summary": "Use RandomPoissonV2 instead.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "shape", + "typeAttr": "S" + }, + { + "name": "rate", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "RandomPoissonV2", + "summary": "Outputs random values from the Poisson distribution(s) described by rate.", + "description": "This op uses two algorithms, depending on rate. If rate >= 10, then\nthe algorithm by Hormann is used to acquire samples via\ntransformation-rejection.\nSee http://www.sciencedirect.com/science/article/pii/0167668793909974.\n\nOtherwise, Knuth's algorithm is used to acquire samples via multiplying uniform\nrandom variables.\nSee Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer\nProgramming, Volume 2. Addison Wesley", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "R", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 2 + } + }, + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in rate.", + "typeAttr": "S" + }, + { + "name": "rate", + "description": "A tensor in which each scalar is a \"rate\" parameter describing the\nassociated poisson distribution.", + "typeAttr": "R" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor with shape `shape + shape(rate)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`rate[i0, i1, ...iN]`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "RandomShuffle", + "summary": "Randomly shuffles a tensor along its first dimension.", + "description": " The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\n to one and only one `output[i]`. For example, a mapping that might occur for a\n 3x2 tensor is:\n\n```\n[[1, 2], [[5, 6],\n [3, 4], ==> [1, 2],\n [5, 6]] [3, 4]]\n```", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "value", + "description": "The tensor to be shuffled.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of same shape and type as `value`, shuffled along its first\ndimension.", + "typeAttr": "T" + } + ] + }, + { + "name": "RandomShuffleQueue", + "summary": "A queue that randomizes the order of elements.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "min_after_dequeue", + "type": "int64", + "description": "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements.", + "default": 0 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "RandomShuffleQueueV2", + "summary": "A queue that randomizes the order of elements.", + "attributes": [ + { + "name": "component_types", + "type": "type[]", + "description": "The type of each component in a value.", + "minimum": 1 + }, + { + "name": "shapes", + "type": "shape[]", + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "default": [] + }, + { + "name": "capacity", + "type": "int64", + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "default": -1 + }, + { + "name": "min_after_dequeue", + "type": "int64", + "description": "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements.", + "default": 0 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the queue.", + "type": 20 + } + ] + }, + { + "name": "RandomStandardNormal", + "summary": "Outputs random values from a normal distribution.", + "description": "The generated values will have mean 0 and standard deviation 1.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with random normal values.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "RandomUniform", + "summary": "Outputs random values from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with uniform random values.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "RandomUniformInt", + "summary": "Outputs random integers from a uniform distribution.", + "description": "The generated values are uniform integers in the range `[minval, maxval)`.\nThe lower bound `minval` is included in the range, while the upper bound\n`maxval` is excluded.\n\nThe random integers are slightly biased unless `maxval - minval` is an exact\npower of two. The bias is small for values of `maxval - minval` significantly\nsmaller than the range of the output (either `2^32` or `2^64`).", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "minval", + "description": "0-D. Inclusive lower bound on the generated integers.", + "typeAttr": "Tout" + }, + { + "name": "maxval", + "description": "0-D. Exclusive upper bound on the generated integers.", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with uniform random integers.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "Range", + "summary": "Creates a sequence of numbers.", + "description": "This operation creates a sequence of numbers that begins at `start` and\nextends by increments of `delta` up to but not including `limit`.\n\nFor example:\n\n```\n# 'start' is 3\n# 'limit' is 18\n# 'delta' is 3\ntf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]\n```", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint16`, `uint32`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "start", + "description": "0-D (scalar). First entry in the sequence.", + "typeAttr": "Tidx" + }, + { + "name": "limit", + "description": "0-D (scalar). Upper limit of sequence, exclusive.", + "typeAttr": "Tidx" + }, + { + "name": "delta", + "description": "0-D (scalar). Optional. Default is 1. Number that increments `start`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D.", + "typeAttr": "Tidx" + } + ] + }, + { + "name": "RangeDataset", + "summary": "Creates a dataset with a range of values. Corresponds to python's xrange.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + }, + { + "name": "replicate_on_split", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "start", + "description": "corresponds to start in python's xrange().", + "type": 9 + }, + { + "name": "stop", + "description": "corresponds to stop in python's xrange().", + "type": 9 + }, + { + "name": "step", + "description": "corresponds to step in python's xrange().", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Rank", + "summary": "Returns the rank of a tensor.", + "description": "This operation returns an integer representing the rank of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\n# shape of tensor 't' is [2, 2, 3]\nrank(t) ==> 3\n```\n\n**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank\nof a tensor is the number of indices required to uniquely select each element\nof the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 3 + } + ] + }, + { + "name": "ReadFile", + "summary": "Reads and outputs the entire contents of the input filename.", + "inputs": [ + { + "name": "filename", + "type": 7 + } + ], + "outputs": [ + { + "name": "contents", + "type": 7 + } + ] + }, + { + "name": "ReadVariableOp", + "summary": "Reads the value of a variable.", + "description": "The tensor returned by this operation is immutable.\n\nThe value returned by this operation is guaranteed to be influenced by all the\nwrites on which this operation depends directly or indirectly, and to not be\ninfluenced by any of the writes which depend directly or indirectly on this\noperation.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "the dtype of the value." + } + ], + "inputs": [ + { + "name": "resource", + "description": "handle to the resource in which to store the variable.", + "type": 20 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ReadVariableXlaSplitND", + "summary": "Splits resource variable input tensor across all dimensions.", + "description": "An op which splits the resource variable input tensor based on the given\nnum_splits attribute, pads slices optionally, and returned the slices. Slices\nare returned in row-major order.\n\nThis op may be generated via the TPU bridge.\n\nFor example, with `input` tensor:\n```\n[[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]]\n```\n`num_splits`:\n```\n[2, 2]\n```\nand `paddings`:\n```\n[1, 1]\n```\nthe expected `outputs` is:\n```\n[[0, 1],\n [3, 4]]\n[[2, 0],\n [5, 0]]\n[[6, 7],\n [0, 0]]\n[[8, 0],\n [0, 0]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_splits", + "type": "int64[]", + "description": "Number of ways to split per dimension. Shape dimensions must be evenly\ndivisible." + }, + { + "name": "paddings", + "type": "int64[]", + "description": "Optional list of right paddings per dimension of input tensor to apply before\nsplitting. This can be used to make a dimension evenly divisible.", + "default": [] + } + ], + "inputs": [ + { + "name": "resource", + "description": "Resource variable of input tensor to split across all dimensions.\n }\n out_arg {\n name: \"outputs\"\n description: < [-2.25, 3.25]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ] + }, + { + "name": "RealDiv", + "summary": "Returns x / y element-wise for real types.", + "description": "If `x` and `y` are reals, this will return the floating-point division.\n\n*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RebatchDataset", + "summary": "Creates a dataset that changes the batch size.", + "description": "Creates a dataset that changes the batch size of the dataset to current batch\nsize // num_workers.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_fallback", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "num_replicas", + "description": "A scalar representing the number of replicas to distribute this batch across. As\na result of this transformation the current batch size would end up being\ndivided by this parameter.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "RebatchDatasetV2", + "summary": "Creates a dataset that changes the batch size.", + "description": "Creates a dataset that rebatches elements from `input_dataset` into new batch\nsizes.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "batch_sizes", + "description": "A vector of integers representing the size of batches to produce. These values\nare cycled through in order.", + "type": 9 + }, + { + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Reciprocal", + "summary": "Computes the reciprocal of x element-wise.", + "description": "I.e., \\\\(y = 1 / x\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "ReciprocalGrad", + "summary": "Computes the gradient for the inverse of `x` wrt its input.", + "description": "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RecordInput", + "summary": "Emits randomized records.", + "attributes": [ + { + "name": "file_pattern", + "type": "string", + "description": "Glob pattern for the data files." + }, + { + "name": "file_random_seed", + "type": "int64", + "description": "Random seeds used to produce randomized records.", + "default": 301 + }, + { + "name": "file_shuffle_shift_ratio", + "type": "float32", + "description": "Shifts the list of files after the list is randomly\nshuffled.", + "default": 0.0 + }, + { + "name": "file_buffer_size", + "type": "int64", + "description": "The randomization shuffling buffer.", + "default": 10000 + }, + { + "name": "file_parallelism", + "type": "int64", + "description": "How many sstables are opened and concurrently iterated over.", + "default": 16 + }, + { + "name": "batch_size", + "type": "int64", + "description": "The batch size.", + "default": 32 + }, + { + "name": "compression_type", + "type": "string", + "description": "The type of compression for the file. Currently ZLIB and\nGZIP are supported. Defaults to none.", + "default": "" + } + ], + "outputs": [ + { + "name": "records", + "description": "A tensor of shape [batch_size].", + "type": 7 + } + ] + }, + { + "name": "Recv", + "summary": "Receives the named tensor from send_device on recv_device.", + "attributes": [ + { + "name": "tensor_type", + "type": "type" + }, + { + "name": "tensor_name", + "type": "string", + "description": "The name of the tensor to receive." + }, + { + "name": "send_device", + "type": "string", + "description": "The name of the device sending the tensor." + }, + { + "name": "send_device_incarnation", + "type": "int64", + "description": "The current incarnation of send_device." + }, + { + "name": "recv_device", + "type": "string", + "description": "The name of the device receiving the tensor." + }, + { + "name": "client_terminated", + "type": "boolean", + "description": "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller.", + "default": false + } + ], + "outputs": [ + { + "name": "tensor", + "description": "The tensor to receive.", + "typeAttr": "tensor_type" + } + ] + }, + { + "name": "RecvTPUEmbeddingActivations", + "summary": "An op that receives embedding activations on the TPU.", + "description": "The TPU system performs the embedding lookups and aggregations specified by\nthe arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The\nresults of these aggregations are visible to the Tensorflow Graph as the\noutputs of a RecvTPUEmbeddingActivations op. This op returns a list containing\none Tensor of activations per table specified in the model. There can be at\nmost one RecvTPUEmbeddingActivations op in the TPU graph.", + "attributes": [ + { + "name": "num_outputs", + "type": "int64", + "description": "The number of output activation tensors, equal to the number of\nembedding tables in the model.", + "minimum": 1 + }, + { + "name": "config", + "type": "string", + "description": "Serialized TPUEmbeddingConfiguration proto." + } + ], + "outputs": [ + { + "name": "outputs", + "description": "A TensorList of embedding activations containing one Tensor per\nembedding table in the model.", + "numberAttr": "num_outputs", + "type": 1 + } + ] + }, + { + "name": "ReduceDataset", + "summary": "Reduces the input dataset to a singleton using a reduce function.", + "attributes": [ + { + "name": "f", + "type": "function", + "description": "A function that maps `(old_state, input_element)` to `new_state`. It must take\ntwo arguments and return a nested structures of tensors. The structure of\n`new_state` must match the structure of `initial_state`." + }, + { + "name": "Tstate", + "type": "type[]", + "minimum": 1 + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "use_inter_op_parallelism", + "type": "boolean", + "default": true + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "initial_state", + "description": "A nested structure of tensors, representing the initial state of the\ntransformation.", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "ReduceJoin", + "summary": "Joins a string Tensor across the given dimensions.", + "description": "Computes the string join across dimensions in the given string Tensor of shape\n`[\\\\(d_0, d_1, ..., d_{n-1}\\\\)]`. Returns a new Tensor created by joining the input\nstrings with the given separator (default: empty string). Negative indices are\ncounted backwards from the end, with `-1` being equivalent to `n - 1`. If\nindices are not specified, joins across all dimensions beginning from `n - 1`\nthrough `0`.\n\nFor example:\n\n```python\n# tensor `a` is [[\"a\", \"b\"], [\"c\", \"d\"]]\ntf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, 0, keep_dims=True) ==> [[\"ac\", \"bd\"]]\ntf.reduce_join(a, 1, keep_dims=True) ==> [[\"ab\"], [\"cd\"]]\ntf.reduce_join(a, 0, separator=\".\") ==> [\"a.c\", \"b.d\"]\ntf.reduce_join(a, [0, 1]) ==> \"acbd\"\ntf.reduce_join(a, [1, 0]) ==> \"abcd\"\ntf.reduce_join(a, []) ==> [[\"a\", \"b\"], [\"c\", \"d\"]]\ntf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> \"abcd\"\n```", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If `True`, retain reduced dimensions with length `1`.", + "default": false + }, + { + "name": "separator", + "type": "string", + "description": "The separator to use when joining.", + "default": "" + } + ], + "inputs": [ + { + "name": "inputs", + "description": "The input to be joined. All reduced indices must have non-zero size.", + "type": 7 + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce over. Dimensions are reduced in the\norder specified. Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`.", + "type": 7 + } + ] + }, + { + "name": "RefEnter", + "summary": "Creates or finds a child frame, and makes `data` available to the child frame.", + "description": "The unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "frame_name", + "type": "string", + "description": "The name of the child frame." + }, + { + "name": "is_constant", + "type": "boolean", + "description": "If true, the output is constant within the child frame.", + "default": false + }, + { + "name": "parallel_iterations", + "type": "int64", + "description": "The number of iterations allowed to run in parallel.", + "default": 10 + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the child frame.", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RefExit", + "summary": "Exits the current frame to its parent frame.", + "description": "Exit makes its input `data` available to the parent frame.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the parent frame.", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RefIdentity", + "summary": "Return the same ref tensor as the input ref tensor.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RefMerge", + "summary": "Forwards the value of an available tensor from `inputs` to `output`.", + "description": "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor for become available to `output`, and sets\n`value_index` to its index in `inputs`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "The input tensors, exactly one of which will become available.", + "numberAttr": "N", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "Will be set to the available input tensor.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "value_index", + "description": "The index of the chosen input tensor in `inputs`.", + "type": 3 + } + ] + }, + { + "name": "RefNextIteration", + "summary": "Makes its input available to the next iteration.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be made available to the next iteration.", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as `data`.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RefSelect", + "summary": "Forwards the `index`th element of `inputs` to `output`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "index", + "description": "A scalar that determines the input that gets selected.", + "type": 3 + }, + { + "name": "inputs", + "description": "A list of ref tensors, one of which will be forwarded to `output`.", + "numberAttr": "N", + "typeAttr": "T", + "isRef": true + } + ], + "outputs": [ + { + "name": "output", + "description": "The forwarded tensor.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RefSwitch", + "summary": "Forwards the ref tensor `data` to the output port determined by `pred`.", + "description": "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `Switch` and `Merge`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The ref tensor to be forwarded to the appropriate output.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "pred", + "description": "A scalar that specifies which output port will receive data.", + "type": 10 + } + ], + "outputs": [ + { + "name": "output_false", + "description": "If `pred` is false, data will be forwarded to this output.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "output_true", + "description": "If `pred` is true, data will be forwarded to this output.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "RegexFullMatch", + "summary": "Check if the input matches the regex pattern.", + "description": "The input is a string tensor of any shape. The pattern is a scalar\nstring tensor which is applied to every element of the input tensor.\nThe boolean values (True or False) of the output tensor indicate\nif the input matches the regex pattern provided.\n\nThe pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\n\nExamples:\n\n>>> tf.strings.regex_full_match([\"TF lib\", \"lib TF\"], \".*lib$\")\n\n>>> tf.strings.regex_full_match([\"TF lib\", \"lib TF\"], \".*TF$\")\n", + "inputs": [ + { + "name": "input", + "description": "A string tensor of the text to be processed.", + "type": 7 + }, + { + "name": "pattern", + "description": "A scalar string tensor containing the regular expression to match the input.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A bool tensor with the same shape as `input`.", + "type": 10 + } + ] + }, + { + "name": "RegexReplace", + "summary": "Replaces matches of the `pattern` regular expression in `input` with the\nreplacement string provided in `rewrite`.", + "description": "It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "attributes": [ + { + "name": "replace_global", + "type": "boolean", + "description": "If True, the replacement is global (that is, all matches of the `pattern` regular\nexpression in each input string are rewritten), otherwise the `rewrite`\nsubstitution is only made for the first `pattern` match.", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "description": "The text to be processed.", + "type": 7 + }, + { + "name": "pattern", + "description": "The regular expression to be matched in the `input` strings.", + "type": 7 + }, + { + "name": "rewrite", + "description": "The rewrite string to be substituted for the `pattern` expression where it is\nmatched in the `input` strings.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "The text after applying pattern match and rewrite substitution.", + "type": 7 + } + ] + }, + { + "name": "RegisterDataset", + "summary": "Registers a dataset with the tf.data service.", + "attributes": [ + { + "name": "external_state_policy", + "type": "int64" + }, + { + "name": "element_spec", + "type": "string", + "default": "" + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + } + ], + "outputs": [ + { + "name": "dataset_id", + "type": 9 + } + ] + }, + { + "name": "RegisterDatasetV2", + "summary": "Registers a dataset with the tf.data service.", + "attributes": [ + { + "name": "external_state_policy", + "type": "int64" + }, + { + "name": "element_spec", + "type": "string", + "default": "" + }, + { + "name": "requested_dataset_id", + "type": "string", + "default": "" + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + } + ], + "outputs": [ + { + "name": "dataset_id", + "type": 7 + } + ] + }, + { + "name": "Relayout", + "attributes": [ + { + "name": "layout", + "type": "string" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RelayoutLike", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "U", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "layout_input", + "typeAttr": "U" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Relu", + "category": "Activation", + "summary": "Computes rectified linear: `max(features, 0)`.", + "description": "See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)\nExample usage:\n>>> tf.nn.relu([-2., 0., 3.]).numpy()\narray([0., 0., 3.], dtype=float32)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "Relu6", + "category": "Activation", + "summary": "Computes rectified linear 6: `min(max(features, 0), 6)`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "Relu6Grad", + "summary": "Computes rectified linear 6 gradients for a Relu6 operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding Relu6 operation.", + "typeAttr": "T" + }, + { + "name": "features", + "description": "The features passed as input to the corresponding Relu6 operation, or\nits output; using either one produces the same result.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "The gradients:\n`gradients * (features > 0) * (features < 6)`.", + "typeAttr": "T" + } + ] + }, + { + "name": "ReluGrad", + "summary": "Computes rectified linear gradients for a Relu operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding Relu operation.", + "typeAttr": "T" + }, + { + "name": "features", + "description": "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently).", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "`gradients * (features > 0)`.", + "typeAttr": "T" + } + ] + }, + { + "name": "RemoteCall", + "summary": "Runs function `f` on a remote device indicated by `target`.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "The type list for the arguments.", + "minimum": 1 + }, + { + "name": "Tout", + "type": "type[]", + "description": "The type list for the return values.", + "minimum": 1 + }, + { + "name": "f", + "type": "function", + "description": "The function to run remotely." + } + ], + "inputs": [ + { + "name": "target", + "description": "A fully specified device name where we want to run the function.", + "type": 7 + }, + { + "name": "args", + "description": "A list of arguments for the function.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "RepeatDataset", + "summary": "Creates a dataset that emits the outputs of `input_dataset` `count` times.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "count", + "description": "A scalar representing the number of times that `input_dataset` should\nbe repeated. A value of `-1` indicates that it should be repeated infinitely.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "RequantizationRange", + "summary": "Computes a range that covers the actual values present in a quantized tensor.", + "description": "Given a quantized tensor described by `(input, input_min, input_max)`, outputs a\nrange that covers the actual values present in that tensor. This op is typically\nused to produce the `requested_output_min` and `requested_output_max` for\n`Requantize`.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "input_min", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "input_max", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output_min", + "description": "The computed min output.", + "type": 1 + }, + { + "name": "output_max", + "description": "the computed max output.", + "type": 1 + } + ] + }, + { + "name": "RequantizationRangePerChannel", + "summary": "Computes requantization range per channel.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "clip_value_max", + "type": "float32", + "description": "The maximum value of the output that needs to be clipped.\nExample: set this to 6 for Relu6." + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "input_min", + "description": "The minimum value of the input tensor", + "type": 1 + }, + { + "name": "input_max", + "description": "The maximum value of the input tensor.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output_min", + "description": "The minimum value of the final output tensor", + "type": 1 + }, + { + "name": "output_max", + "description": "The maximum value of the final output tensor.", + "type": 1 + } + ] + }, + { + "name": "Requantize", + "summary": "Converts the quantized `input` tensor into a lower-precision `output`.", + "description": "Converts the quantized `input` tensor into a lower-precision `output`, using the\noutput range specified with `requested_output_min` and `requested_output_max`.\n\n`[input_min, input_max]` are scalar floats that specify the range for the float\ninterpretation of the `input` data. For example, if `input_min` is -1.0f and\n`input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.", + "attributes": [ + { + "name": "Tinput", + "type": "type", + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + }, + { + "name": "out_type", + "type": "type", + "description": "The type of the output. Should be a lower bit depth than Tinput. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "input_min", + "description": "The float value that the minimum quantized input value represents.", + "type": 1 + }, + { + "name": "input_max", + "description": "The float value that the maximum quantized input value represents.", + "type": 1 + }, + { + "name": "requested_output_min", + "description": "The float value that the minimum quantized output value represents.", + "type": 1 + }, + { + "name": "requested_output_max", + "description": "The float value that the maximum quantized output value represents.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "output_min", + "description": "The requested_output_min value is copied into this output.", + "type": 1 + }, + { + "name": "output_max", + "description": "The requested_output_max value is copied into this output.", + "type": 1 + } + ] + }, + { + "name": "RequantizePerChannel", + "summary": "Requantizes input with min and max values known per channel.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 13 + } + }, + { + "name": "out_type", + "type": "type", + "description": "The quantized type of output tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "default": { + "type": "type", + "value": 12 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The original input tensor.", + "typeAttr": "T" + }, + { + "name": "input_min", + "description": "The minimum value of the input tensor", + "type": 1 + }, + { + "name": "input_max", + "description": "The maximum value of the input tensor.", + "type": 1 + }, + { + "name": "requested_output_min", + "description": "The minimum value of the output tensor requested.", + "type": 1 + }, + { + "name": "requested_output_max", + "description": "The maximum value of the output tensor requested.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "description": "Output tensor.", + "typeAttr": "out_type" + }, + { + "name": "output_min", + "description": "The minimum value of the final output tensor", + "type": 1 + }, + { + "name": "output_max", + "description": "The maximum value of the final output tensor.", + "type": 1 + } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "summary": "Reshapes a tensor.", + "description": "Given `tensor`, this operation returns a tensor that has the same values\nas `tensor` with shape `shape`.\n\nIf one component of 1-D tensor `shape` is the special value -1, the size of that\ndimension is computed so that the total size remains constant. In particular, a\n`shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be\nunknown.\n\nThe `shape` must be 1-D and the operation returns a tensor with shape\n`shape` filled with the values of `tensor`. In this case, the number of elements\nimplied by `shape` must be the same as the number of elements in `tensor`.\n\nIt is an error if `shape` is not 1-D.\n\nFor example:\n\n```\n# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# tensor 't' has shape [9]\nreshape(t, [3, 3]) ==> [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n\n# tensor 't' is [[[1, 1], [2, 2]],\n# [[3, 3], [4, 4]]]\n# tensor 't' has shape [2, 2, 2]\nreshape(t, [2, 4]) ==> [[1, 1, 2, 2],\n [3, 3, 4, 4]]\n\n# tensor 't' is [[[1, 1, 1],\n# [2, 2, 2]],\n# [[3, 3, 3],\n# [4, 4, 4]],\n# [[5, 5, 5],\n# [6, 6, 6]]]\n# tensor 't' has shape [3, 2, 3]\n# pass '[-1]' to flatten 't'\nreshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]\n\n# -1 can also be used to infer the shape\n\n# -1 is inferred to be 9:\nreshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 2:\nreshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 3:\nreshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n [[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6]]]\n\n# tensor 't' is [7]\n# shape `[]` reshapes to a scalar\nreshape(t, []) ==> 7\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "Defines the shape of the output tensor.", + "typeAttr": "Tshape" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "ResizeArea", + "summary": "Resize `images` to `size` using area interpolation.", + "description": "Input images can be of different types but output images are always float.\n\nThe range of pixel values for the output image might be slightly different\nfrom the range for the input image because of limited numerical precision.\nTo guarantee an output range, for example `[0.0, 1.0]`, apply\n`tf.clip_by_value` to the output.\n\nEach output pixel is computed by first transforming the pixel's footprint into\nthe input tensor and then averaging the pixels that intersect the footprint. An\ninput pixel's contribution to the average is weighted by the fraction of its\narea that intersects the footprint. This is the same as OpenCV's INTER_AREA.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + } + ], + "outputs": [ + { + "name": "resized_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "type": 1 + } + ] + }, + { + "name": "ResizeBicubic", + "summary": "Resize `images` to `size` using bicubic interpolation.", + "description": "Input images can be of different types but output images are always float.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + } + ], + "outputs": [ + { + "name": "resized_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "type": 1 + } + ] + }, + { + "name": "ResizeBicubicGrad", + "summary": "Computes the gradient of bicubic interpolation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "grads", + "description": "4-D with shape `[batch, height, width, channels]`.", + "type": 1 + }, + { + "name": "original_image", + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResizeBilinear", + "summary": "Resize `images` to `size` using bilinear interpolation.", + "description": "Input images can be of different types but output images are always float.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`, `bfloat16`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + } + ], + "outputs": [ + { + "name": "resized_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "type": 1 + } + ] + }, + { + "name": "ResizeBilinearGrad", + "summary": "Computes the gradient of bilinear interpolation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `bfloat16`, `float16`, `float64`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "grads", + "description": "4-D with shape `[batch, height, width, channels]`.", + "type": 1 + }, + { + "name": "original_image", + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResizeNearestNeighbor", + "summary": "Resize `images` to `size` using nearest neighbor interpolation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "images", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "type": 3 + } + ], + "outputs": [ + { + "name": "resized_images", + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResizeNearestNeighborGrad", + "summary": "Computes the gradient of nearest neighbor interpolation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int32`, `float16`, `float32`, `float64`, `bfloat16`." + }, + { + "name": "align_corners", + "type": "boolean", + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "default": false + }, + { + "name": "half_pixel_centers", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "grads", + "description": "4-D with shape `[batch, height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceAccumulatorApplyGradient", + "summary": "Applies a gradient to a given accumulator.", + "description": "Does not add if local_step is lesser than the accumulator's global_step.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a accumulator.", + "type": 20 + }, + { + "name": "local_step", + "description": "The local_step value at which the gradient was computed.", + "type": 9 + }, + { + "name": "gradient", + "description": "A tensor of the gradient to be accumulated.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceAccumulatorNumAccumulated", + "summary": "Returns the number of gradients aggregated in the given accumulators.", + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 20 + } + ], + "outputs": [ + { + "name": "num_accumulated", + "description": "The number of gradients aggregated in the given accumulator.", + "type": 3 + } + ] + }, + { + "name": "ResourceAccumulatorSetGlobalStep", + "summary": "Updates the accumulator with a new value for global_step.", + "description": "Logs warning if the accumulator's value is already higher than\nnew_global_step.", + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 20 + }, + { + "name": "new_global_step", + "description": "The new global_step value to set.", + "type": 9 + } + ] + }, + { + "name": "ResourceAccumulatorTakeGradient", + "summary": "Extracts the average gradient in the given ConditionalAccumulator.", + "description": "The op blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it returns the average of\nthe accumulated gradients. Also automatically increments the recorded\nglobal_step in the accumulator by 1, and resets the aggregate to 0.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to an accumulator.", + "type": 20 + }, + { + "name": "num_required", + "description": "Number of gradients required before we return an aggregate.", + "type": 3 + } + ], + "outputs": [ + { + "name": "average", + "description": "The average of the accumulated gradients.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceApplyAdaMax", + "summary": "Update '*var' according to the AdaMax algorithm.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nv_t <- max(beta2 * v_{t-1}, abs(g))\nvariable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "m", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "v", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "beta1_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta1", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAdadelta", + "summary": "Update '*var' according to the adadelta scheme.", + "description": "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum_update", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAdagrad", + "summary": "Update '*var' according to the adagrad scheme.", + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAdagradDA", + "summary": "Update '*var' according to the proximal adagrad scheme.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "gradient_accumulator", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "gradient_squared_accumulator", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "global_step", + "description": "Training step number. Must be a scalar.", + "type": 9 + } + ] + }, + { + "name": "ResourceApplyAdagradV2", + "summary": "Update '*var' according to the adagrad scheme.", + "description": "accum += grad * grad\nvar -= lr * grad * (1 / (sqrt(accum) + epsilon))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAdam", + "summary": "Update '*var' according to the Adam algorithm.", + "description": "$$\\text{lr}_t := \\mathrm{lr} \\cdot \\frac{\\sqrt{1 - \\beta_2^t}}{1 - \\beta_1^t}$$\n$$m_t := \\beta_1 \\cdot m_{t-1} + (1 - \\beta_1) \\cdot g$$\n$$v_t := \\beta_2 \\cdot v_{t-1} + (1 - \\beta_2) \\cdot g^2$$\n$$\\text{var} := \\begin{cases} \\text{var} - (m_t \\beta_1 + g \\cdot (1 - \\beta_1))\\cdot\\text{lr}_t/(\\sqrt{v_t} + \\epsilon), &\\text{if use_nesterov}\\\\\\\\ \\text{var} - m_t \\cdot \\text{lr}_t /(\\sqrt{v_t} + \\epsilon), &\\text{otherwise} \\end{cases}$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, uses the nesterov update.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "m", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "v", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "beta1_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta1", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAdamWithAmsgrad", + "summary": "Update '*var' according to the Adam algorithm.", + "description": "$$\\text{lr}_t := \\mathrm{learning_rate} * \\sqrt{1 - \\beta_2^t} / (1 - \\beta_1^t)$$\n$$m_t := \\beta_1 * m_{t-1} + (1 - \\beta_1) * g$$\n$$v_t := \\beta_2 * v_{t-1} + (1 - \\beta_2) * g * g$$\n$$\\hat{v}_t := max{\\hat{v}_{t-1}, v_t}$$\n$$\\text{variable} := \\text{variable} - \\text{lr}_t * m_t / (\\sqrt{\\hat{v}_t} + \\epsilon)$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "m", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "v", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "vhat", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "beta1_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2_power", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta1", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta2", + "description": "Momentum factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyAddSign", + "summary": "Update '*var' according to the AddSign update.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- (alpha + sign_decay * sign(g) *sign(m)) * g\nvariable <- variable - lr_t * update", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "m", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "alpha", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "sign_decay", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyCenteredRMSProp", + "summary": "Update '*var' according to the centered RMSProp algorithm.", + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mg", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "description": "Momentum Scale. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyFtrl", + "summary": "Update '*var' according to the Ftrl-proximal scheme.", + "description": "accum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyFtrlV2", + "summary": "Update '*var' according to the Ftrl-proximal scheme.", + "description": "accum_new = accum + grad * grad\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 shrinkage regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyGradientDescent", + "summary": "Update '*var' by subtracting 'alpha' * 'delta' from it.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "delta", + "description": "The change.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyKerasMomentum", + "summary": "Update '*var' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum - lr * grad\nvar += accum", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar + momentum * accum, so in the end, the var you get is actually\nvar + momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyMomentum", + "summary": "Update '*var' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyPowerSign", + "summary": "Update '*var' according to the AddSign update.", + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g\nvariable <- variable - lr_t * update", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "m", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "logbase", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "sign_decay", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyProximalAdagrad", + "summary": "Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.", + "description": "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyProximalGradientDescent", + "summary": "Update '*var' as FOBOS algorithm with fixed learning rate.", + "description": "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "delta", + "description": "The change.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceApplyRMSProp", + "summary": "Update '*var' according to the RMSProp algorithm.", + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceConditionalAccumulator", + "summary": "A conditional accumulator for aggregating gradients.", + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.\nThis is a resource version of ConditionalAccumulator that will work in TF2.0\nwith tf.cond version 2.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the values, can be [], in which case shape is unknown." + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions.", + "default": "" + }, + { + "name": "reduction_type", + "type": "string", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "default": "MEAN" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the accumulator.", + "type": 20 + } + ] + }, + { + "name": "ResourceCountUpTo", + "summary": "Increments variable pointed to by 'resource' until it reaches 'limit'.", + "attributes": [ + { + "name": "limit", + "type": "int64", + "description": "If incrementing ref would bring it above limit, instead generates an\n'OutOfRange' error." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a scalar `Variable` node.", + "type": 20 + } + ], + "outputs": [ + { + "name": "output", + "description": "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceGather", + "summary": "Gather slices from the variable pointed to by `resource` according to `indices`.", + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```", + "attributes": [ + { + "name": "batch_dims", + "type": "int64", + "default": 0 + }, + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "dtype", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceGatherNd", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterAdd", + "summary": "Adds sparse updates to the variable referenced by `resource`.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterDiv", + "summary": "Divides sparse updates into the variable referenced by `resource`.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] /= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] /= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterMax", + "summary": "Reduces sparse updates into the variable referenced by `resource` using the `max` operation.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = max(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions are combined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterMin", + "summary": "Reduces sparse updates into the variable referenced by `resource` using the `min` operation.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = min(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions are combined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterMul", + "summary": "Multiplies sparse updates into the variable referenced by `resource`.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] *= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] *= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterNdAdd", + "summary": "Applies sparse addition to individual values or slices in a Variable.", + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that addition would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nadd = tf.scatter_nd_add(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(add)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A resource handle. Must be from a VarHandleOp.", + "type": 20 + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues to add to ref.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceScatterNdMax", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A resource handle. Must be from a VarHandleOp.", + "type": 20 + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues whose element wise max is taken with ref", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceScatterNdMin", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A resource handle. Must be from a VarHandleOp.", + "type": 20 + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues whose element wise min is taken with ref.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceScatterNdSub", + "summary": "Applies sparse subtraction to individual values or slices in a Variable.", + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor\nwith 8 elements. In Python, that subtraction would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nsub = tf.scatter_nd_sub(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(sub)\n```\n\nThe resulting update to ref would look like this:\n\n [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A resource handle. Must be from a VarHandleOp.", + "type": 20 + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues to add to ref.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceScatterNdUpdate", + "summary": "Applies sparse `updates` to individual values or slices within a given", + "description": "variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.scatter_nd_update(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A resource handle. Must be from a VarHandleOp.", + "type": 20 + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceScatterSub", + "summary": "Subtracts sparse updates from the variable referenced by `resource`.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] -= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] -= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceScatterUpdate", + "summary": "Assigns sparse updates to the variable referenced by `resource`.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "resource", + "description": "Should be from a `Variable` node.", + "type": 20 + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "ResourceSparseApplyAdadelta", + "summary": "var: Should be from a Variable().", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum_update", + "description": ": Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyAdagrad", + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme.", + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyAdagradDA", + "summary": "Update entries in '*var' and '*accum' according to the proximal adagrad scheme.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "gradient_accumulator", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "gradient_squared_accumulator", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "global_step", + "description": "Training step number. Must be a scalar.", + "type": 9 + } + ] + }, + { + "name": "ResourceSparseApplyAdagradV2", + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme.", + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyCenteredRMSProp", + "summary": "Update '*var' according to the centered RMSProp algorithm.", + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mg", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var, ms and mom.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyFtrl", + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme.", + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\naccum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceSparseApplyFtrlV2", + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme.", + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 shrinkage regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceSparseApplyKerasMomentum", + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum - lr * grad\nvar += accum", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar + momentum * accum, so in the end, the var you get is actually\nvar + momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceSparseApplyMomentum", + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ] + }, + { + "name": "ResourceSparseApplyProximalAdagrad", + "summary": "Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.", + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nprox_v = var\nprox_v -= lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyProximalGradientDescent", + "summary": "Sparse update '*var' as FOBOS algorithm with fixed learning rate.", + "description": "That is for rows we have grad for, we update var as follows:\nprox_v = var - alpha * grad\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceSparseApplyRMSProp", + "summary": "Update '*var' according to the RMSProp algorithm.", + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "type": 20 + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var, ms and mom.", + "typeAttr": "Tindices" + } + ] + }, + { + "name": "ResourceStridedSliceAssign", + "summary": "Assign `value` to the sliced l-value reference of `ref`.", + "description": "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s\nshape must be exactly the shape produced by the slice of `ref`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "begin_mask", + "type": "int64", + "default": 0 + }, + { + "name": "end_mask", + "type": "int64", + "default": 0 + }, + { + "name": "ellipsis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "new_axis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "shrink_axis_mask", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "ref", + "type": 20 + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ] + }, + { + "name": "Restore", + "summary": "Restores a tensor from checkpoint files.", + "description": "Reads a tensor stored in one or several files. If there are several files (for\ninstance because a tensor was saved as slices), `file_pattern` may contain\nwildcard symbols (`*` and `?`) in the filename portion only, not in the\ndirectory portion.\n\nIf a `file_pattern` matches several files, `preferred_shard` can be used to hint\nin which file the requested tensor is likely to be found. This op will first\nopen the file at index `preferred_shard` in the list of matching files and try\nto restore tensors from that file. Only if some tensors or tensor slices are\nnot found in that first file, then the Op opens all the files. Setting\n`preferred_shard` to match the value passed as the `shard` input\nof a matching `Save` Op may speed up Restore. This attribute only affects\nperformance, not correctness. The default value -1 means files are processed in\norder.\n\nSee also `RestoreSlice`.", + "attributes": [ + { + "name": "dt", + "type": "type", + "description": "The type of the tensor to be restored." + }, + { + "name": "preferred_shard", + "type": "int64", + "description": "Index of file to open first if multiple files match\n`file_pattern`.", + "default": -1 + } + ], + "inputs": [ + { + "name": "file_pattern", + "description": "Must have a single element. The pattern of the files from\nwhich we read the tensor.", + "type": 7 + }, + { + "name": "tensor_name", + "description": "Must have a single element. The name of the tensor to be\nrestored.", + "type": 7 + } + ], + "outputs": [ + { + "name": "tensor", + "description": "The restored tensor.", + "typeAttr": "dt" + } + ] + }, + { + "name": "RestoreSlice", + "summary": "Restores a tensor from checkpoint files.", + "description": "This is like `Restore` except that restored tensor can be listed as filling\nonly a slice of a larger tensor. `shape_and_slice` specifies the shape of the\nlarger tensor and the slice that the restored tensor covers.\n\nThe `shape_and_slice` input has the same format as the\nelements of the `shapes_and_slices` input of the `SaveSlices` op.", + "attributes": [ + { + "name": "dt", + "type": "type", + "description": "The type of the tensor to be restored." + }, + { + "name": "preferred_shard", + "type": "int64", + "description": "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`.", + "default": -1 + } + ], + "inputs": [ + { + "name": "file_pattern", + "description": "Must have a single element. The pattern of the files from\nwhich we read the tensor.", + "type": 7 + }, + { + "name": "tensor_name", + "description": "Must have a single element. The name of the tensor to be\nrestored.", + "type": 7 + }, + { + "name": "shape_and_slice", + "description": "Scalar. The shapes and slice specifications to use when\nrestoring a tensors.", + "type": 7 + } + ], + "outputs": [ + { + "name": "tensor", + "description": "The restored tensor.", + "typeAttr": "dt" + } + ] + }, + { + "name": "RestoreV2", + "summary": "Restores tensors from a V2 checkpoint.", + "description": "For backward compatibility with the V1 format, this Op currently allows\nrestoring from a V1 checkpoint as well:\n - This Op first attempts to find the V2 index file pointed to by \"prefix\", and\n if found proceed to read it as a V2 checkpoint;\n - Otherwise the V1 read path is invoked.\nRelying on this behavior is not recommended, as the ability to fall back to read\nV1 might be deprecated and eventually removed.\n\nBy default, restores the named tensors in full. If the caller wishes to restore\nspecific slices of stored tensors, \"shape_and_slices\" should be non-empty\nstrings and correspondingly well-formed.\n\nCallers must ensure all the named tensors are indeed stored in the checkpoint.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "description": "shape {N}. The list of expected dtype for the tensors. Must match\nthose stored in the checkpoint.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "prefix", + "description": "Must have a single element. The prefix of a V2 checkpoint.", + "type": 7 + }, + { + "name": "tensor_names", + "description": "shape {N}. The names of the tensors to be restored.", + "type": 7 + }, + { + "name": "shape_and_slices", + "description": "shape {N}. The slice specs of the tensors to be restored.\nEmpty strings indicate that they are non-partitioned tensors.", + "type": 7 + } + ], + "outputs": [ + { + "name": "tensors", + "description": "shape {N}. The restored tensors, whose shapes are read from the\ncheckpoint directly.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "RetrieveAllTPUEmbeddingParameters", + "summary": "An op that retrieves optimization parameters from embedding to host memory.", + "description": "An op that retrieves optimization parameters from embedding to host memory.\nMust be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to retrieve updated\nparameters before saving a checkpoint. For Adagrad, auxiliary1 will contain the\naccumulators after running this op. For SGD, all of the auxiliary* values will\nbe empty (0x0 tensors for that table). For FTRL, auxiliary1 will contain the\naccumulators and auxiliary2 will contain the linear terms. For ADAM, auxiliary1\nwill contain the momenta and auxiliary2 will contain the velocities.", + "attributes": [ + { + "name": "NumTables", + "type": "int64", + "description": "The number of embedding tables.", + "minimum": 1 + }, + { + "name": "config", + "type": "string", + "description": "An TPUEmbeddingConfiguration proto describing the\ntable parameters being loaded, serialized to a string." + }, + { + "name": "num_shards", + "type": "int64", + "description": "Number of shards into which the embedding tables are divided." + }, + { + "name": "shard_id", + "type": "int64", + "description": "Identifier of shard for this operation." + } + ], + "outputs": [ + { + "name": "parameters", + "description": " A list of tensors, one for each embedding table, containing the\nstored embedding table parameters.", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary1", + "description": "A list of tensors, one for each embedding table, containing the\nfirst auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary2", + "description": "A list of tensors, one for each embedding table, containing the\nsecond auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary3", + "description": "A list of tensors, one for each embedding table, containing the\nthird auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary4", + "description": "A list of tensors, one for each embedding table, containing the\nfourth auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary5", + "description": "A list of tensors, one for each embedding table, containing the\nfifth auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary6", + "description": "A list of tensors, one for each embedding table, containing the\nsix auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + }, + { + "name": "auxiliary7", + "description": "A list of tensors, one for each embedding table, containing the\nseventh auxiliary optimization parameter stored. Elements are\npresent in the list, but have zero size, for unused optimization parameters\n(based on the algorithm in use for each table).", + "numberAttr": "NumTables", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingADAMParameters", + "summary": "Retrieve ADAM embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the ADAM optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Parameter momenta updated by the ADAM optimization algorithm.", + "type": 1 + }, + { + "name": "velocities", + "description": "Parameter velocities updated by the ADAM optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingAdadeltaParameters", + "summary": "Retrieve Adadelta embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the Adadelta optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the Adadelta optimization algorithm.", + "type": 1 + }, + { + "name": "updates", + "description": "Parameter updates updated by the Adadelta optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingAdagradMomentumParameters", + "summary": "Retrieve Adagrad Momentum embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the Adagrad Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the Adagrad Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Parameter momenta updated by the Adagrad Momentum optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingAdagradParameters", + "summary": "Retrieve Adagrad embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the Adagrad optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the Adagrad optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingCenteredRMSPropParameters", + "summary": "Retrieve centered RMSProp embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "ms", + "description": "Parameter ms updated by the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mom", + "description": "Parameter mom updated by the centered RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mg", + "description": "Parameter mg updated by the centered RMSProp optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingFTRLParameters", + "summary": "Retrieve FTRL embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the FTRL optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the FTRL optimization algorithm.", + "type": 1 + }, + { + "name": "linears", + "description": "Parameter linears updated by the FTRL optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingFrequencyEstimatorParameters", + "summary": "Retrieve frequency estimator embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the frequency estimator optimization algorithm.", + "type": 1 + }, + { + "name": "last_hit_step", + "description": "Parameter last_hit_step updated by the frequency estimator optimization\nalgorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingMDLAdagradLightParameters", + "summary": "Retrieve MDL Adagrad Light embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "weights", + "description": "Parameter weights updated by the MDL Adagrad Light optimization algorithm.", + "type": 1 + }, + { + "name": "benefits", + "description": "Parameter benefits updated by the MDL Adagrad Light optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingMomentumParameters", + "summary": "Retrieve Momentum embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the Momentum optimization algorithm.", + "type": 1 + }, + { + "name": "momenta", + "description": "Parameter momenta updated by the Momentum optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingProximalAdagradParameters", + "summary": "Retrieve proximal Adagrad embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the proximal Adagrad optimization algorithm.", + "type": 1 + }, + { + "name": "accumulators", + "description": "Parameter accumulators updated by the proximal Adagrad optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingProximalYogiParameters", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingRMSPropParameters", + "summary": "Retrieve RMSProp embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "ms", + "description": "Parameter ms updated by the RMSProp optimization algorithm.", + "type": 1 + }, + { + "name": "mom", + "description": "Parameter mom updated by the RMSProp optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "RetrieveTPUEmbeddingStochasticGradientDescentParameters", + "summary": "Retrieve SGD embedding parameters.", + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "default": -1 + }, + { + "name": "table_name", + "type": "string", + "default": "" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "parameters", + "description": "Parameter parameters updated by the stochastic gradient descent optimization algorithm.", + "type": 1 + } + ] + }, + { + "name": "Reverse", + "summary": "Reverses specific dimensions of a tensor.", + "description": "Given a `tensor`, and a `bool` tensor `dims` representing the dimensions\nof `tensor`, this operation reverses each dimension i of `tensor` where\n`dims[i]` is `True`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions\nof `tensor` must equal the number of elements in `dims`. In other words:\n\n`rank(tensor) = size(dims)`\n\nFor example:\n\n```\n# tensor 't' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor 't' shape is [1, 2, 3, 4]\n\n# 'dims' is [False, False, False, True]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# 'dims' is [False, True, False, False]\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# 'dims' is [False, False, True, False]\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `uint16`, `int16`, `uint32`, `int32`, `uint64`, `int64`, `bool`, `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `string`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Up to 8-D.", + "typeAttr": "T" + }, + { + "name": "dims", + "description": "1-D. The dimensions to reverse.", + "type": 10 + } + ], + "outputs": [ + { + "name": "output", + "description": "The same shape as `tensor`.", + "typeAttr": "T" + } + ] + }, + { + "name": "ReverseSequence", + "summary": "Reverses variable length slices.", + "description": "This op first slices `input` along the dimension `batch_dim`, and for each\nslice `i`, reverses the first `seq_lengths[i]` elements along\nthe dimension `seq_dim`.\n\nThe elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,\nand `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\n\nThe output slice `i` along dimension `batch_dim` is then given by input\nslice `i`, with the first `seq_lengths[i]` slices along dimension\n`seq_dim` reversed.\n\nFor example:\n\n```\n# Given this:\nbatch_dim = 0\nseq_dim = 1\ninput.dims = (4, 8, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\noutput[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\noutput[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\noutput[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[0, 7:, :, ...] = input[0, 7:, :, ...]\noutput[1, 2:, :, ...] = input[1, 2:, :, ...]\noutput[2, 3:, :, ...] = input[2, 3:, :, ...]\noutput[3, 2:, :, ...] = input[3, 2:, :, ...]\n```\n\nIn contrast, if:\n\n```\n# Given this:\nbatch_dim = 2\nseq_dim = 0\ninput.dims = (8, ?, 4, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\noutput[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\noutput[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\noutput[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\noutput[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\noutput[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\noutput[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\n```", + "attributes": [ + { + "name": "seq_dim", + "type": "int64", + "description": "The dimension which is partially reversed." + }, + { + "name": "batch_dim", + "type": "int64", + "description": "The dimension along which reversal is performed.", + "default": 0 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tlen", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The input to reverse.", + "typeAttr": "T" + }, + { + "name": "seq_lengths", + "description": "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) <= input.dims(seq_dim)`", + "typeAttr": "Tlen" + } + ], + "outputs": [ + { + "name": "output", + "description": "The partially reversed input. It has the same shape as `input`.", + "typeAttr": "T" + } + ] + }, + { + "name": "ReverseV2", + "summary": "Reverses specific dimensions of a tensor.", + "description": "Given a `tensor`, and a `int32` tensor `axis` representing the set of\ndimensions of `tensor` to reverse. This operation reverses each dimension\n`i` for which there exists `j` s.t. `axis[j] == i`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions specified\nin `axis` may be 0 or more entries. If an index is specified more than\nonce, a InvalidArgument error is raised.\n\nFor example:\n\n```\n# tensor 't' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor 't' shape is [1, 2, 3, 4]\n\n# 'dims' is [3] or 'dims' is [-1]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# 'dims' is '[1]' (or 'dims' is '[-3]')\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# 'dims' is '[2]' (or 'dims' is '[-2]')\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `int64`, `uint64`, `bool`, `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `string`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Up to 8-D.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "1-D. The indices of the dimensions to reverse. Must be in the range\n`[-rank(tensor), rank(tensor))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The same shape as `tensor`.", + "typeAttr": "T" + } + ] + }, + { + "name": "RewriteDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "rewrite_name", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "RightShift", + "summary": "Elementwise computes the bitwise right-shift of `x` and `y`.", + "description": "Performs a logical shift for unsigned integer types, and an arithmetic shift\nfor signed integer types.\n\nIf `y` is negative, or greater than or equal to than the width of `x` in bits\nthe result is implementation defined.\n\nExample:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\nimport numpy as np\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n\n right_shift_result = bitwise_ops.right_shift(lhs, rhs)\n\n print(right_shift_result)\n\n# This will print:\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)\n\nlhs = np.array([-2, 64, 101, 32], dtype=np.int8)\nrhs = np.array([-1, -5, -3, -14], dtype=np.int8)\nbitwise_ops.right_shift(lhs, rhs)\n# \n```\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Rint", + "summary": "Returns element-wise integer closest to x.", + "description": "If the result is midway between two representable values,\nthe even representable is chosen.\nFor example:\n\n```\nrint(-1.5) ==> -2.0\nrint(0.5000001) ==> 1.0\nrint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscAbs", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscAdd", + "summary": "Returns x + y element-wise.", + "description": "*NOTE*: `RiscAdd` does not supports broadcasting.\n\nGiven two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.\n\nBoth input and output have a range `(-inf, inf)`.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscBinaryArithmetic", + "attributes": [ + { + "name": "op_type", + "type": "string", + "description": "Must be one of the following: `ADD`, `SUB`, `MUL`, `DIV`, `REM`, `MIN`, `POW`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscBinaryComparison", + "attributes": [ + { + "name": "op_type", + "type": "string", + "description": "Must be one of the following: `EQ`, `NE`, `GE`, `GT`, `LE`, `LT`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "RiscBitcast", + "attributes": [ + { + "name": "SrcT", + "type": "type" + }, + { + "name": "DstT", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "SrcT" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "DstT" + } + ] + }, + { + "name": "RiscBroadcast", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "shape", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscCast", + "attributes": [ + { + "name": "SrcT", + "type": "type" + }, + { + "name": "DstT", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "SrcT" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "DstT" + } + ] + }, + { + "name": "RiscCeil", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscCholesky", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscConcat", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscCondition", + "attributes": [ + { + "name": "func_true", + "type": "function" + }, + { + "name": "func_false", + "type": "function" + }, + { + "name": "SrcT", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "DstT", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "pred", + "type": 10 + }, + { + "name": "input_true", + "typeAttr": "SrcT" + }, + { + "name": "input_false", + "typeAttr": "SrcT" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "DstT" + } + ] + }, + { + "name": "RiscConv", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "name": "data_format", + "type": "string", + "description": "Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "dilations", + "type": "int64[]", + "default": [ + 1, + 1, + 1, + 1 + ] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscCos", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscDiv", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscDot", + "attributes": [ + { + "name": "transpose_a", + "type": "boolean", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscExp", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscFft", + "attributes": [ + { + "name": "Tcomplex", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tcomplex" + } + ] + }, + { + "name": "RiscFloor", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscGather", + "attributes": [ + { + "name": "batch_dims", + "type": "int64", + "default": 0 + }, + { + "name": "Tparams", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Taxis", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "params", + "typeAttr": "Tparams" + }, + { + "name": "indices", + "typeAttr": "Tindices" + }, + { + "name": "axis", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tparams" + } + ] + }, + { + "name": "RiscImag", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ] + }, + { + "name": "RiscIsFinite", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ] + }, + { + "name": "RiscLog", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscLogicalAnd", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "RiscLogicalNot", + "inputs": [ + { + "name": "x", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "RiscLogicalOr", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ] + }, + { + "name": "RiscMax", + "summary": "Returns max(x, y) element-wise.", + "description": "*NOTE*: `RiscMax` does not supports broadcasting.\n\nGiven two input tensors, the `tf.risc_max` operation computes the maximum for every element in the tensor.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "max", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscMin", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscMul", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscNeg", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscPad", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "paddings", + "typeAttr": "Tpaddings" + }, + { + "name": "constant_values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscPool", + "attributes": [ + { + "name": "ksize", + "type": "int64[]", + "minimum": 4 + }, + { + "name": "strides", + "type": "int64[]", + "minimum": 4 + }, + { + "name": "pooling_type", + "type": "string", + "description": "Must be one of the following: `AVG`, `MAX`." + }, + { + "name": "data_format", + "type": "string", + "description": "Must be one of the following: `NHWC`, `NCHW`.", + "default": "NHWC" + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscPow", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscRandomUniform", + "attributes": [ + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "RiscReal", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `complex64`, `complex128`.", + "default": { + "type": "type", + "value": 8 + } + }, + { + "name": "Tout", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ] + }, + { + "name": "RiscReduce", + "attributes": [ + { + "name": "reduce_type", + "type": "string", + "description": "Must be one of the following: `MEAN`, `SUM`." + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "axis", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscRem", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscReshape", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "shape", + "typeAttr": "Tshape" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscReverse", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscScatter", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "indices", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "typeAttr": "T" + }, + { + "name": "shape", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscShape", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ] + }, + { + "name": "RiscSign", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscSlice", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "size", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscSort", + "attributes": [ + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "direction", + "type": "string", + "description": "Must be one of the following: `ASCENDING`, `DESCENDING`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "axis", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscSqueeze", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "squeeze_dims", + "type": "int64[]", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscSub", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscTranspose", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tperm", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "perm", + "typeAttr": "Tperm" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscTriangularSolve", + "attributes": [ + { + "name": "lower", + "type": "boolean", + "default": true + }, + { + "name": "adjoint", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscUnary", + "attributes": [ + { + "name": "op_type", + "type": "string", + "description": "Must be one of the following: `ABL`, `CEIL`, `COS`, `EXP`, `FLOOR`, `IMAG`, `LOG`, `NEG`, `REAL`, `SIGN`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RiscWhile", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 0 + }, + { + "name": "cond", + "type": "function" + }, + { + "name": "body", + "type": "function" + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + }, + { + "name": "parallel_iterations", + "type": "int64", + "default": 10 + } + ], + "inputs": [ + { + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "T" + } + ] + }, + { + "name": "RngReadAndSkip", + "summary": "Advance the counter of a counter-based RNG.", + "description": "The state of the RNG after\n`rng_read_and_skip(n)` will be the same as that after `uniform([n])`\n(or any other distribution). The actual increment added to the\ncounter is an unspecified implementation choice.\n\nIn the case that the input algorithm is RNG_ALG_AUTO_SELECT, the counter in the state needs to be of size int64[2], the current maximal counter size among algorithms. In this case, this op will manage the counter as if it is an 128-bit integer with layout [lower_64bits, higher_64bits]. If an algorithm needs less than 128 bits for the counter, it should use the left portion of the int64[2]. In this way, the int64[2] is compatible with all current RNG algorithms (Philox, ThreeFry and xla::RandomAlgorithm::RNG_DEFAULT). Downstream RNG ops can thus use this counter with any RNG algorithm.", + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG. The state consists of the counter followed by the key.", + "type": 20 + }, + { + "name": "alg", + "description": "The RNG algorithm.", + "type": 3 + }, + { + "name": "delta", + "description": "The amount of advancement.", + "type": 23 + } + ], + "outputs": [ + { + "name": "value", + "description": "The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).", + "type": 9 + } + ] + }, + { + "name": "RngSkip", + "summary": "Advance the counter of a counter-based RNG.", + "description": "The state of the RNG after\n`rng_skip(n)` will be the same as that after `stateful_uniform([n])`\n(or any other distribution). The actual increment added to the\ncounter is an unspecified implementation detail.", + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "delta", + "description": "The amount of advancement.", + "type": 9 + } + ] + }, + { + "name": "Roll", + "summary": "Rolls the elements of a tensor along an axis.", + "description": "The elements are shifted positively (towards larger indices) by the offset of\n`shift` along the dimension of `axis`. Negative `shift` values will shift\nelements in the opposite direction. Elements that roll passed the last position\nwill wrap around to the first and vice versa. Multiple shifts along multiple\naxes may be specified.\n\nFor example:\n\n```\n# 't' is [0, 1, 2, 3, 4]\nroll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]\n\n# shifting along multiple dimensions\n# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\nroll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]\n\n# shifting along the same axis multiple times\n# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\nroll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tshift", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Taxis", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "shift", + "description": "Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which\nelements are shifted positively (towards larger indices) along the dimension\nspecified by `axis[i]`. Negative shifts will roll the elements in the opposite\ndirection.", + "typeAttr": "Tshift" + }, + { + "name": "axis", + "description": "Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift\n`shift[i]` should occur. If the same axis is referenced more than once, the\ntotal shift for that axis will be the sum of all the shifts that belong to that\naxis.", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has the same shape and size as the input. The elements are shifted\npositively (towards larger indices) by the offsets of `shift` along the\ndimensions of `axis`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Round", + "summary": "Rounds the values of a tensor to the nearest integer, element-wise.", + "description": "Rounds half to even. Also known as bankers rounding. If you want to round\naccording to the current system rounding mode use std::cint.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Rsqrt", + "summary": "Computes reciprocal of square root of x element-wise.", + "description": "I.e., \\\\(y = 1 / \\sqrt{x}\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "RsqrtGrad", + "summary": "Computes the gradient for the rsqrt of `x` wrt its input.", + "description": "Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`\nis the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "SampleDistortedBoundingBox", + "summary": "Generate a single randomly distorted bounding box for an image.", + "description": "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`." + }, + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "min_object_covered", + "type": "float32", + "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.", + "default": 0.10000000149011612 + }, + { + "name": "aspect_ratio_range", + "type": "float32[]", + "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.", + "default": [ + 0.75, + 1.3300000429153442 + ] + }, + { + "name": "area_range", + "type": "float32[]", + "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.", + "default": [ + 0.05000000074505806, + 1.0 + ] + }, + { + "name": "max_attempts", + "type": "int64", + "description": "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage.", + "default": 100 + }, + { + "name": "use_image_if_no_bounding_boxes", + "type": "boolean", + "description": "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error.", + "default": false + } + ], + "inputs": [ + { + "name": "image_size", + "description": "1-D, containing `[height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "bounding_boxes", + "description": "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image.", + "type": 1 + } + ], + "outputs": [ + { + "name": "begin", + "description": "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "bboxes", + "description": "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`.", + "type": 1 + } + ] + }, + { + "name": "SampleDistortedBoundingBoxV2", + "summary": "Generate a single randomly distorted bounding box for an image.", + "description": "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`." + }, + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "aspect_ratio_range", + "type": "float32[]", + "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.", + "default": [ + 0.75, + 1.3300000429153442 + ] + }, + { + "name": "area_range", + "type": "float32[]", + "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.", + "default": [ + 0.05000000074505806, + 1.0 + ] + }, + { + "name": "max_attempts", + "type": "int64", + "description": "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage.", + "default": 100 + }, + { + "name": "use_image_if_no_bounding_boxes", + "type": "boolean", + "description": "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error.", + "default": false + } + ], + "inputs": [ + { + "name": "image_size", + "description": "1-D, containing `[height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "bounding_boxes", + "description": "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image.", + "type": 1 + }, + { + "name": "min_object_covered", + "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.", + "type": 1 + } + ], + "outputs": [ + { + "name": "begin", + "description": "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "bboxes", + "description": "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`.", + "type": 1 + } + ] + }, + { + "name": "SamplingDataset", + "summary": "Creates a dataset that takes a Bernoulli sample of the contents of another dataset.", + "description": "There is no transformation in the `tf.data` Python API for creating this dataset.\nInstead, it is created as a result of the `filter_with_random_uniform_fusion`\nstatic optimization. Whether this optimization is performed is determined by the\n`experimental_optimization.filter_with_random_uniform_fusion` option of\n`tf.data.Options`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "rate", + "description": "A scalar representing the sample rate. Each element of `input_dataset` is\nretained with this probability, independent of all other elements.", + "type": 1 + }, + { + "name": "seed", + "description": "A scalar representing seed of random number generator.", + "type": 9 + }, + { + "name": "seed2", + "description": "A scalar representing seed2 of random number generator.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Save", + "summary": "Saves the input tensors to disk.", + "description": "The size of `tensor_names` must match the number of tensors in `data`. `data[i]`\nis written to `filename` with name `tensor_names[i]`.\n\nSee also `SaveSlices`.", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filename", + "description": "Must have a single element. The name of the file to which we write\nthe tensor.", + "type": 7 + }, + { + "name": "tensor_names", + "description": "Shape `[N]`. The names of the tensors to be saved.", + "type": 7 + }, + { + "name": "data", + "description": "`N` tensors to save.", + "typeListAttr": "T" + } + ] + }, + { + "name": "SaveDataset", + "attributes": [ + { + "name": "compression", + "type": "string", + "default": "" + }, + { + "name": "shard_func", + "type": "function" + }, + { + "name": "use_shard_func", + "type": "boolean", + "default": true + }, + { + "name": "Tshard_func_args", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "path", + "type": 7 + }, + { + "name": "shard_func_other_args", + "typeListAttr": "Tshard_func_args" + } + ] + }, + { + "name": "SaveDatasetV2", + "attributes": [ + { + "name": "compression", + "type": "string", + "default": "" + }, + { + "name": "shard_func", + "type": "function" + }, + { + "name": "use_shard_func", + "type": "boolean", + "default": true + }, + { + "name": "Tshard_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "path", + "type": 7 + }, + { + "name": "shard_func_other_args", + "typeListAttr": "Tshard_func_args" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SaveSlices", + "summary": "Saves input tensors slices to disk.", + "description": "This is like `Save` except that tensors can be listed in the saved file as being\na slice of a larger tensor. `shapes_and_slices` specifies the shape of the\nlarger tensor and the slice that this tensor covers. `shapes_and_slices` must\nhave as many elements as `tensor_names`.\n\nElements of the `shapes_and_slices` input must either be:\n\n* The empty string, in which case the corresponding tensor is\n saved normally.\n* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the\n `dimI` are the dimensions of the larger tensor and `slice-spec`\n specifies what part is covered by the tensor to save.\n\n`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`\nwhere each `sliceI` is either:\n\n* The string `-` meaning that the slice covers all indices of this dimension\n* `start,length` where `start` and `length` are integers. In that\n case the slice covers `length` indices starting at `start`.\n\nSee also `Save`.", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "filename", + "description": "Must have a single element. The name of the file to which we write the\ntensor.", + "type": 7 + }, + { + "name": "tensor_names", + "description": "Shape `[N]`. The names of the tensors to be saved.", + "type": 7 + }, + { + "name": "shapes_and_slices", + "description": "Shape `[N]`. The shapes and slice specifications to use when\nsaving the tensors.", + "type": 7 + }, + { + "name": "data", + "description": "`N` tensors to save.", + "typeListAttr": "T" + } + ] + }, + { + "name": "SaveV2", + "summary": "Saves tensors in V2 checkpoint format.", + "description": "By default, saves the named tensors in full. If the caller wishes to save\nspecific slices of full tensors, \"shape_and_slices\" should be non-empty strings\nand correspondingly well-formed.", + "attributes": [ + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "prefix", + "description": "Must have a single element. The prefix of the V2 checkpoint to which we\nwrite the tensors.", + "type": 7 + }, + { + "name": "tensor_names", + "description": "shape {N}. The names of the tensors to be saved.", + "type": 7 + }, + { + "name": "shape_and_slices", + "description": "shape {N}. The slice specs of the tensors to be saved.\nEmpty strings indicate that they are non-partitioned tensors.", + "type": 7 + }, + { + "name": "tensors", + "description": "`N` tensors to save.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "ScalarSummary", + "summary": "Outputs a `Summary` protocol buffer with scalar values.", + "description": "The input `tags` and `values` must have the same shape. The generated summary\nhas a summary value for each tag-value pair in `tags` and `values`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "tags", + "description": "Tags for the summary.", + "type": 7 + }, + { + "name": "values", + "description": "Same shape as `tags. Values for the summary.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "summary", + "description": "Scalar. Serialized `Summary` protocol buffer.", + "type": 7 + } + ] + }, + { + "name": "ScaleAndTranslate", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "kernel_type", + "type": "string", + "default": "lanczos3" + }, + { + "name": "antialias", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "images", + "typeAttr": "T" + }, + { + "name": "size", + "type": 3 + }, + { + "name": "scale", + "type": 1 + }, + { + "name": "translation", + "type": 1 + } + ], + "outputs": [ + { + "name": "resized_images", + "type": 1 + } + ] + }, + { + "name": "ScaleAndTranslateGrad", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`." + }, + { + "name": "kernel_type", + "type": "string", + "default": "lanczos3" + }, + { + "name": "antialias", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "grads", + "typeAttr": "T" + }, + { + "name": "original_image", + "typeAttr": "T" + }, + { + "name": "scale", + "type": 1 + }, + { + "name": "translation", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "ScanDataset", + "summary": "Creates a dataset successively reduces `f` over the elements of `input_dataset`.", + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "name": "Tstate", + "type": "type[]", + "minimum": 1 + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "preserve_cardinality", + "type": "boolean", + "default": false + }, + { + "name": "use_default_device", + "type": "boolean", + "default": true + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "initial_state", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ScatterAdd", + "summary": "Adds sparse updates to a variable reference.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to add to `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterDiv", + "summary": "Divides a variable reference by sparse updates.", + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] /= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] /= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions divide.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of values that `ref` is divided by.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterMax", + "summary": "Reduces sparse updates into a variable reference using the `max` operation.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = max(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions combine.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the update will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to reduce into `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterMin", + "summary": "Reduces sparse updates into a variable reference using the `min` operation.", + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = min(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions combine.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the update will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to reduce into `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterMul", + "summary": "Multiplies sparse updates into a variable reference.", + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] *= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] *= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to multiply to `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterNd", + "summary": "Scatters `updates` into a tensor of shape `shape` according to `indices`.", + "description": "Scatter sparse `updates` according to individual values at the specified\n`indices`. This op returns an output tensor with the `shape` you specify. This\nop is the inverse of the `tf.gather_nd` operator which extracts values or slices\nfrom a given tensor.\n\nThis operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor\nis zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)`\nis identical to calling\n`tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)`\n\nIf `indices` contains duplicates, the associated `updates` are accumulated\n(summed) into the output tensor.\n\n**WARNING**: For floating-point data types, the output may be nondeterministic.\nThis is because the order in which the updates are applied is nondeterministic\nand when floating-point numbers are added in different orders the resulting\nnumerical approximation error can be slightly different. However, the output\nwill be deterministic if op determinism is enabled via\n`tf.config.experimental.enable_op_determinism`.\n\n`indices` is an integer tensor containing indices into the output tensor. The\nlast dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices of elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`.\n\n`updates` is a tensor with shape:\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of the scatter op is to insert individual elements in\na tensor by index. Consider an example where you want to insert 4 scattered\nelements in a rank-1 tensor with 8 elements.\n\n
\n\n
\n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n shape = tf.constant([8])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n```\n\nThe resulting tensor would look like this:\n\n [0, 11, 0, 10, 9, 0, 0, 12]\n\nYou can also insert entire slices of a higher rank tensor all at once. For\nexample, you can insert two slices in the first dimension of a rank-3 tensor\nwith two matrices of new values.\n\n
\n\n
\n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[1], [3]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n shape = tf.constant([4, 4, 4])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n```\n\nThe resulting tensor would look like this:\n\n [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "Tensor of indices.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Values to scatter into the output tensor.", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "1-D. The shape of the output tensor.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor with the given shape and updates applied according\nto the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "ScatterNdAdd", + "summary": "Applies sparse addition to individual values or slices in a Variable.", + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that addition would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nadd = tf.scatter_nd_add(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(add)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "A mutable Tensor. Should be from a Variable node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterNdMax", + "summary": "Computes element-wise maximum.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "A mutable Tensor. Should be from a Variable node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterNdMin", + "summary": "Computes element-wise minimum.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "A mutable Tensor. Should be from a Variable node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterNdNonAliasingAdd", + "summary": "Applies sparse addition to `input` using individual values or slices", + "description": "from `updates` according to indices `indices`. The updates are non-aliasing:\n`input` is only modified in-place if no other operations will use it.\nOtherwise, a copy of `input` is made. This operation has a gradient with\nrespect to both `input` and `updates`.\n\n`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `input`.\nIt must be shape \\\\([d_0, ..., d_{Q-2}, K]\\\\) where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or `(P-K)`-dimensional slices\n(if `K < P`) along the `K`th dimension of `input`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to 8\nelements. In Python, that addition would look like this:\n\n input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n output = tf.scatter_nd_non_aliasing_add(input, indices, updates)\n with tf.Session() as sess:\n print(sess.run(output))\n\nThe resulting value `output` would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to slices.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "A Tensor.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: `int32`, `int64`.\nA tensor of indices into `input`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to `input`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the same shape as `input`, containing values of `input`\nupdated with `updates`.", + "typeAttr": "T" + } + ] + }, + { + "name": "ScatterNdSub", + "summary": "Applies sparse subtraction to individual values or slices in a Variable.", + "description": "within a given variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor\nwith 8 elements. In Python, that subtraction would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nsub = tf.scatter_nd_sub(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(sub)\n```\n\nThe resulting update to ref would look like this:\n\n [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "A mutable Tensor. Should be from a Variable node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterNdUpdate", + "summary": "Applies sparse `updates` to individual values or slices within a given", + "description": "variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape \\\\([d_0, ..., d_{Q-2}, K]\\\\) where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.scatter_nd_update(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nSee also `tf.scatter_update` and `tf.batch_scatter_update`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "A mutable Tensor. Should be from a Variable node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterSub", + "summary": "Subtracts sparse updates to a variable reference.", + "description": "```python\n # Scalar indices\n ref[indices, ...] -= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] -= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their (negated) contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to subtract from `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "ScatterUpdate", + "summary": "Applies sparse updates to a variable reference.", + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nIf values in `ref` is to be updated more than once, because there are\nduplicate entries in `indices`, the order at which the updates happen\nfor each value is undefined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
\n\n
\n\nSee also `tf.batch_scatter_update` and `tf.scatter_nd_update`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": true + } + ], + "inputs": [ + { + "name": "ref", + "description": "Should be from a `Variable` node.", + "typeAttr": "T", + "isRef": true + }, + { + "name": "indices", + "description": "A tensor of indices into the first dimension of `ref`.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "A tensor of updated values to store in `ref`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SdcaFprint", + "summary": "Computes fingerprints of the input strings.", + "inputs": [ + { + "name": "input", + "description": "vector of strings to compute fingerprints on.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint.", + "type": 9 + } + ] + }, + { + "name": "SdcaOptimizer", + "summary": "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for", + "description": "linear models with L1 + L2 regularization. As global optimization objective is\nstrongly-convex, the optimizer optimizes the dual objective at each step. The\noptimizer applies each update one example at a time. Examples are sampled\nuniformly, and the optimizer is learning rate free and enjoys linear convergence\nrate.\n\n[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
\nShai Shalev-Shwartz, Tong Zhang. 2012\n\n$$Loss Objective = \\sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$\n\n[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
\nChenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,\nPeter Richtarik, Martin Takac. 2015\n\n[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
\nDominik Csiba, Zheng Qu, Peter Richtarik. 2015", + "attributes": [ + { + "name": "loss_type", + "type": "string", + "description": "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses. Must be one of the following: `logistic_loss`, `squared_loss`, `hinge_loss`, `smooth_hinge_loss`, `poisson_loss`." + }, + { + "name": "adaptative", + "type": "boolean", + "description": "Whether to use Adaptive SDCA for the inner loop.", + "default": false + }, + { + "name": "num_sparse_features", + "type": "int64", + "description": "Number of sparse feature groups to train on.", + "minimum": 0 + }, + { + "name": "num_sparse_features_with_values", + "type": "int64", + "description": "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0.", + "minimum": 0 + }, + { + "name": "num_dense_features", + "type": "int64", + "description": "Number of dense feature groups to train on.", + "minimum": 0 + }, + { + "name": "l1", + "type": "float32", + "description": "Symmetric l1 regularization strength." + }, + { + "name": "l2", + "type": "float32", + "description": "Symmetric l2 regularization strength." + }, + { + "name": "num_loss_partitions", + "type": "int64", + "description": "Number of partitions of the global loss function.", + "minimum": 1 + }, + { + "name": "num_inner_iterations", + "type": "int64", + "description": "Number of iterations per mini-batch.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "sparse_example_indices", + "description": "a list of vectors which contain example indices.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_feature_indices", + "description": "a list of vectors which contain feature indices.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_feature_values", + "description": "a list of vectors which contains feature value\nassociated with each feature group.", + "numberAttr": "num_sparse_features_with_values", + "type": 1 + }, + { + "name": "dense_features", + "description": "a list of matrices which contains the dense feature values.", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "name": "example_weights", + "description": "a vector which contains the weight associated with each\nexample.", + "type": 1 + }, + { + "name": "example_labels", + "description": "a vector which contains the label/target associated with each\nexample.", + "type": 1 + }, + { + "name": "sparse_indices", + "description": "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_weights", + "description": "a list of vectors where each value is the weight associated with\na sparse feature group.", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "name": "dense_weights", + "description": "a list of vectors where the values are the weights associated\nwith a dense feature group.", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "name": "example_state_data", + "description": "a list of vectors containing the example state data.", + "type": 1 + } + ], + "outputs": [ + { + "name": "out_example_state_data", + "description": "a list of vectors containing the updated example state\ndata.", + "type": 1 + }, + { + "name": "out_delta_sparse_weights", + "description": "a list of vectors where each value is the delta\nweights associated with a sparse feature group.", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "name": "out_delta_dense_weights", + "description": "a list of vectors where the values are the delta\nweights associated with a dense feature group.", + "numberAttr": "num_dense_features", + "type": 1 + } + ] + }, + { + "name": "SdcaOptimizerV2", + "summary": "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for", + "description": "linear models with L1 + L2 regularization. As global optimization objective is\nstrongly-convex, the optimizer optimizes the dual objective at each step. The\noptimizer applies each update one example at a time. Examples are sampled\nuniformly, and the optimizer is learning rate free and enjoys linear convergence\nrate.\n\n[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
\nShai Shalev-Shwartz, Tong Zhang. 2012\n\n$$Loss Objective = \\sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$\n\n[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
\nChenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,\nPeter Richtarik, Martin Takac. 2015\n\n[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
\nDominik Csiba, Zheng Qu, Peter Richtarik. 2015", + "attributes": [ + { + "name": "loss_type", + "type": "string", + "description": "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses. Must be one of the following: `logistic_loss`, `squared_loss`, `hinge_loss`, `smooth_hinge_loss`, `poisson_loss`." + }, + { + "name": "adaptive", + "type": "boolean", + "description": "Whether to use Adaptive SDCA for the inner loop.", + "default": false + }, + { + "name": "num_sparse_features", + "type": "int64", + "description": "Number of sparse feature groups to train on.", + "minimum": 0 + }, + { + "name": "num_sparse_features_with_values", + "type": "int64", + "description": "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0.", + "minimum": 0 + }, + { + "name": "num_dense_features", + "type": "int64", + "description": "Number of dense feature groups to train on.", + "minimum": 0 + }, + { + "name": "l1", + "type": "float32", + "description": "Symmetric l1 regularization strength." + }, + { + "name": "l2", + "type": "float32", + "description": "Symmetric l2 regularization strength." + }, + { + "name": "num_loss_partitions", + "type": "int64", + "description": "Number of partitions of the global loss function.", + "minimum": 1 + }, + { + "name": "num_inner_iterations", + "type": "int64", + "description": "Number of iterations per mini-batch.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "sparse_example_indices", + "description": "a list of vectors which contain example indices.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_feature_indices", + "description": "a list of vectors which contain feature indices.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_feature_values", + "description": "a list of vectors which contains feature value\nassociated with each feature group.", + "numberAttr": "num_sparse_features_with_values", + "type": 1 + }, + { + "name": "dense_features", + "description": "a list of matrices which contains the dense feature values.", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "name": "example_weights", + "description": "a vector which contains the weight associated with each\nexample.", + "type": 1 + }, + { + "name": "example_labels", + "description": "a vector which contains the label/target associated with each\nexample.", + "type": 1 + }, + { + "name": "sparse_indices", + "description": "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach.", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "name": "sparse_weights", + "description": "a list of vectors where each value is the weight associated with\na sparse feature group.", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "name": "dense_weights", + "description": "a list of vectors where the values are the weights associated\nwith a dense feature group.", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "name": "example_state_data", + "description": "a list of vectors containing the example state data.", + "type": 1 + } + ], + "outputs": [ + { + "name": "out_example_state_data", + "description": "a list of vectors containing the updated example state\ndata.", + "type": 1 + }, + { + "name": "out_delta_sparse_weights", + "description": "a list of vectors where each value is the delta\nweights associated with a sparse feature group.", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "name": "out_delta_dense_weights", + "description": "a list of vectors where the values are the delta\nweights associated with a dense feature group.", + "numberAttr": "num_dense_features", + "type": 1 + } + ] + }, + { + "name": "SdcaShrinkL1", + "summary": "Applies L1 regularization shrink step on the parameters.", + "attributes": [ + { + "name": "num_features", + "type": "int64", + "description": "Number of feature groups to apply shrinking step.", + "minimum": 0 + }, + { + "name": "l1", + "type": "float32", + "description": "Symmetric l1 regularization strength." + }, + { + "name": "l2", + "type": "float32", + "description": "Symmetric l2 regularization strength. Should be a positive float." + } + ], + "inputs": [ + { + "name": "weights", + "description": "a list of vectors where each value is the weight associated with a\nfeature group.", + "numberAttr": "num_features", + "type": 1, + "isRef": true + } + ] + }, + { + "name": "SegmentMax", + "summary": "Computes the maximum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\max_j(data_j)\\\\) where `max` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the max is empty for a given segment ID `i`, `output[i] = 0`.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy()\narray([[4, 3, 3, 4],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentMaxV2", + "summary": "Computes the maximum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\max_j(data_j)\\\\) where `max` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the maximum is empty for a given segment ID `i`, it outputs the smallest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::lowest()`.\n\nNote: That this op is currently only supported with jit_compile=True.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\nThe only difference with SegmentMax is the additional input `num_segments`.\nThis helps in evaluating the output shape in compile time.\n`num_segments` should be consistent with segment_ids.\ne.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids\nWith inconsistent num_segments, the op still runs. only difference is,\nthe output takes the size of num_segments irrespective of size of segment_ids and data.\nfor num_segments less than expected output size, the last elements are ignored\nfor num_segments more than the expected output size, last elements are assigned \nsmallest possible value for the specific numeric type.\n\nFor example:\n\n>>> @tf.function(jit_compile=True)\n... def test(c):\n... return tf.raw_ops.SegmentMaxV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2)\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> test(c).numpy()\narray([[4, 3, 3, 4],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimensionw which has size\n`num_segments`.\n", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentMean", + "summary": "Computes the mean along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\frac{\\sum_j data_j}{N}\\\\) where `mean` is\nover `j` such that `segment_ids[j] == i` and `N` is the total number of\nvalues summed.\n\nIf the mean is empty for a given segment ID `i`, `output[i] = 0`.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as a smaller following index when computing the numerator\nof the mean.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy()\narray([[2.5, 2.5, 2.5, 2.5],\n [5., 6., 7., 8.]], dtype=float32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentMin", + "summary": "Computes the minimum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\min_j(data_j)\\\\) where `min` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the min is empty for a given segment ID `i`, `output[i] = 0`.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy()\narray([[1, 2, 2, 1],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentMinV2", + "summary": "Computes the minimum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\min_j(data_j)\\\\) where `min` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the minimum is empty for a given segment ID `i`, it outputs the largest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::max()`.\n\nNote: That this op is currently only supported with jit_compile=True.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\nThe only difference with SegmentMin is the additional input `num_segments`.\nThis helps in evaluating the output shape in compile time.\n`num_segments` should be consistent with segment_ids.\ne.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids\nWith inconsistent num_segments, the op still runs. only difference is,\nthe output takes the size of num_segments irrespective of size of segment_ids and data.\nfor num_segments less than expected output size, the last elements are ignored\nfor num_segments more than the expected output size, last elements are assigned \nthe largest possible value for the specific numeric type.\n\nFor example:\n\n>>> @tf.function(jit_compile=True)\n... def test(c):\n... return tf.raw_ops.SegmentMinV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2)\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> test(c).numpy()\narray([[1, 2, 2, 1],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimensionw which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentProd", + "summary": "Computes the product along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\prod_j data_j\\\\) where the product is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the product is empty for a given segment ID `i`, `output[i] = 1`.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy()\narray([[4, 6, 6, 4],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentProdV2", + "summary": "Computes the product along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\prod_j data_j\\\\) where the product is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the product is empty for a given segment ID `i`, `output[i] = 1`.\n\nNote: That this op is currently only supported with jit_compile=True.\n\nThe only difference with SegmentProd is the additional input `num_segments`.\nThis helps in evaluating the output shape in compile time.\n`num_segments` should be consistent with segment_ids.\ne.g. Max(segment_ids) - 1 should be equal to `num_segments` for a 1-d segment_ids\nWith inconsistent num_segments, the op still runs. only difference is, \nthe output takes the size of num_segments irrespective of size of segment_ids and data.\nfor num_segments less than expected output size, the last elements are ignored\nfor num_segments more than the expected output size, last elements are assigned 1.\n\nFor example:\n\n>>> @tf.function(jit_compile=True)\n... def test(c):\n... return tf.raw_ops.SegmentProdV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2)\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> test(c).numpy()\narray([[4, 6, 6, 4],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimensionw which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentSum", + "summary": "Computes the sum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\nCaution: On CPU, values in `segment_ids` are always validated to be sorted,\nand an error is thrown for indices that are not increasing. On GPU, this\ndoes not throw an error for unsorted indices. On GPU, out-of-order indices\nresult in safe but unspecified behavior, which may include treating\nout-of-order indices as the same as a smaller following index.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\n>>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy()\narray([[5, 5, 5, 5],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SegmentSumV2", + "summary": "Computes the sum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\nNote that this op is currently only supported with jit_compile=True.\n\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be sorted on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Select", + "summary": "Selects elements from `t` or `e`, depending on `condition`.", + "description": "The `t`, and `e` tensors must all have the same shape, and the\noutput will also have that shape.\n\nThe `condition` tensor must be a scalar if `t` and `e` are scalars.\nIf `t` and `e` are vectors or higher rank, then `condition` must be either a\nscalar, a vector with size matching the first dimension of `t`, or must have\nthe same shape as `t`.\n\nThe `condition` tensor acts as a mask that chooses, based on the value at each\nelement, whether the corresponding element / row in the output should be\ntaken from `t` (if true) or `e` (if false).\n\nIf `condition` is a vector and `t` and `e` are higher rank matrices, then\nit chooses which row (outer dimension) to copy from `t` and `e`.\nIf `condition` has the same shape as `t` and `e`, then it chooses which\nelement to copy from `t` and `e`.\n\nFor example:\n\n```python\n# 'condition' tensor is [[True, False]\n# [False, True]]\n# 't' is [[1, 2],\n# [3, 4]]\n# 'e' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) # => [[1, 6], [7, 4]]\n\n\n# 'condition' tensor is [True, False]\n# 't' is [[1, 2],\n# [3, 4]]\n# 'e' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) ==> [[1, 2],\n [7, 8]]\n\n```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "condition", + "type": 10 + }, + { + "name": "t", + "description": "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`.", + "typeAttr": "T" + }, + { + "name": "e", + "description": "= A `Tensor` with the same type and shape as `t`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "= A `Tensor` with the same type and shape as `t` and `e`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SelectV2", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "condition", + "type": 10 + }, + { + "name": "t", + "typeAttr": "T" + }, + { + "name": "e", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SelfAdjointEig", + "summary": "Computes the Eigen Decomposition of a batch of square self-adjoint matrices.", + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices, with the same constraints as the single matrix\nSelfAdjointEig.\n\nThe result is a [..., M+1, M] matrix with [..., 0,:] containing the\neigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues\nare sorted in non-decreasing order.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`." + } + ], + "inputs": [ + { + "name": "input", + "description": "Shape is `[..., M, M]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Shape is `[..., M+1, M]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SelfAdjointEigV2", + "summary": "Computes the eigen decomposition of one or more square self-adjoint matrices.", + "description": "Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues\nare sorted in non-decreasing order.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = self_adjoint_eig(a)\ne = self_adjoint_eig(a, compute_v=False)\n```", + "attributes": [ + { + "name": "compute_v", + "type": "boolean", + "description": "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "`Tensor` input of shape `[N, N]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "e", + "description": "Eigenvalues. Shape is `[N]`.", + "typeAttr": "T" + }, + { + "name": "v", + "description": "Eigenvectors. Shape is `[N, N]`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Selu", + "summary": "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`", + "description": "if < 0, `scale * features` otherwise.\n\nTo be used together with\n`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.\nFor correct dropout, use `tf.contrib.nn.alpha_dropout`.\n\nSee [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "SeluGrad", + "summary": "Computes gradients for the scaled exponential linear (Selu) operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding Selu operation.", + "typeAttr": "T" + }, + { + "name": "outputs", + "description": "The outputs of the corresponding Selu operation.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "The gradients: `gradients * (outputs + scale * alpha)`\nif outputs < 0, `scale * gradients` otherwise.", + "typeAttr": "T" + } + ] + }, + { + "name": "Send", + "summary": "Sends the named tensor from send_device to recv_device.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "tensor_name", + "type": "string", + "description": "The name of the tensor to send." + }, + { + "name": "send_device", + "type": "string", + "description": "The name of the device sending the tensor." + }, + { + "name": "send_device_incarnation", + "type": "int64", + "description": "The current incarnation of send_device." + }, + { + "name": "recv_device", + "type": "string", + "description": "The name of the device receiving the tensor." + }, + { + "name": "client_terminated", + "type": "boolean", + "description": "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller.", + "default": false + } + ], + "inputs": [ + { + "name": "tensor", + "description": "The tensor to send.", + "typeAttr": "T" + } + ] + }, + { + "name": "SendTPUEmbeddingGradients", + "summary": "Performs gradient updates of embedding tables.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "NN", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "config", + "type": "string", + "description": "Serialized TPUEmbeddingConfiguration proto." + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A TensorList of gradients with which to update embedding tables.\nThis argument has the same length and shapes as the return value of\nRecvTPUEmbeddingActivations, but contains gradients of the model's loss\nwith respect to the embedding activations. The embedding tables are updated\nfrom these gradients via the optimizer specified in the TPU embedding\nconfiguration given to tpu.initialize_system.", + "numberAttr": "N", + "type": 1 + }, + { + "name": "learning_rates", + "description": "A TensorList of float32 scalars, one for each dynamic learning\nrate tag: see the comments in\n//third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.\nMultiple tables can share the same dynamic learning rate tag as specified\nin the configuration. If the learning rates for all tables are constant,\nthis list should be empty.", + "numberAttr": "NN", + "type": 1 + } + ] + }, + { + "name": "SerializeIterator", + "summary": "Converts the given `resource_handle` representing an iterator to a variant tensor.", + "attributes": [ + { + "name": "external_state_policy", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "resource_handle", + "description": "A handle to an iterator resource.", + "type": 20 + } + ], + "outputs": [ + { + "name": "serialized", + "description": "A variant tensor storing the state of the iterator contained in the\nresource.", + "type": 21 + } + ] + }, + { + "name": "SerializeManySparse", + "summary": "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.", + "description": "The `SparseTensor` must have rank `R` greater than 1, and the first dimension\nis treated as the minibatch dimension. Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension. The serialized\n`SparseTensor` objects going into each row of `serialized_sparse` will have\nrank `R-1`.\n\nThe minibatch size `N` is extracted from `sparse_shape[0]`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "The `dtype` to use for serialization; the supported types are `string`\n(default) and `variant`. Must be one of the following: `string`, `variant`.", + "default": { + "type": "type", + "value": 7 + } + } + ], + "inputs": [ + { + "name": "sparse_indices", + "description": "2-D. The `indices` of the minibatch `SparseTensor`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "sparse_shape", + "description": "1-D. The `shape` of the minibatch `SparseTensor`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "serialized_sparse", + "typeAttr": "out_type" + } + ] + }, + { + "name": "SerializeSparse", + "summary": "Serialize a `SparseTensor` into a `[3]` `Tensor` object.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "The `dtype` to use for serialization; the supported types are `string`\n(default) and `variant`. Must be one of the following: `string`, `variant`.", + "default": { + "type": "type", + "value": 7 + } + } + ], + "inputs": [ + { + "name": "sparse_indices", + "description": "2-D. The `indices` of the `SparseTensor`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "1-D. The `values` of the `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "sparse_shape", + "description": "1-D. The `shape` of the `SparseTensor`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "serialized_sparse", + "typeAttr": "out_type" + } + ] + }, + { + "name": "SerializeTensor", + "summary": "Transforms a Tensor into a serialized TensorProto proto.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The type of the input tensor." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "A Tensor of type `T`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "serialized", + "description": "A serialized TensorProto proto of the input tensor.", + "type": 7 + } + ] + }, + { + "name": "SetSize", + "summary": "Number of unique elements along last dimension of input `set`.", + "description": "Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,\nand `set_shape`. The last dimension contains values in a set, duplicates are\nallowed but ignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set`\nindices. Setting is to `False` while passing invalid arguments results in\nundefined behavior.", + "attributes": [ + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`." + } + ], + "inputs": [ + { + "name": "set_indices", + "description": "2D `Tensor`, indices of a `SparseTensor`.", + "type": 9 + }, + { + "name": "set_values", + "description": "1D `Tensor`, values of a `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "set_shape", + "description": "1D `Tensor`, shape of a `SparseTensor`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "size", + "description": "For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n`n-1` dimensions as `set`. Each value is the number of unique elements in\nthe corresponding `[0...n-1]` dimension of `set`.", + "type": 3 + } + ] + }, + { + "name": "SetStatsAggregatorDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "counter_prefix", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Shape", + "summary": "Returns the shape of a tensor.", + "description": "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ] + }, + { + "name": "ShapeN", + "summary": "Returns shape of tensors.", + "description": "This operation returns N 1-D integer tensors representing shape of `input[i]s`.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "numberAttr": "N", + "typeAttr": "out_type" + } + ] + }, + { + "name": "ShardDataset", + "summary": "Creates a `Dataset` that includes only 1/`num_shards` of this dataset.", + "attributes": [ + { + "name": "require_non_empty", + "type": "boolean", + "default": false + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_shards", + "description": "An integer representing the number of shards operating in parallel.", + "type": 9 + }, + { + "name": "index", + "description": "An integer representing the current worker index.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShardedFilename", + "summary": "Generate a sharded filename. The filename is printf formatted as", + "description": " %s-%05d-of-%05d, basename, shard, num_shards.", + "inputs": [ + { + "name": "basename", + "type": 7 + }, + { + "name": "shard", + "type": 3 + }, + { + "name": "num_shards", + "type": 3 + } + ], + "outputs": [ + { + "name": "filename", + "type": 7 + } + ] + }, + { + "name": "ShardedFilespec", + "summary": "Generate a glob pattern matching all sharded file names.", + "inputs": [ + { + "name": "basename", + "type": 7 + }, + { + "name": "num_shards", + "type": 3 + } + ], + "outputs": [ + { + "name": "filename", + "type": 7 + } + ] + }, + { + "name": "ShuffleAndRepeatDataset", + "summary": "Creates a dataset that shuffles and repeats elements from `input_dataset`", + "description": "pseudorandomly.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "reshuffle_each_iteration", + "type": "boolean", + "default": true + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "description": "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`.", + "type": 9 + }, + { + "name": "seed", + "description": "A scalar seed for the random number generator. If either `seed` or\n`seed2` is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "type": 9 + }, + { + "name": "seed2", + "description": "A second scalar seed to avoid seed collision.", + "type": 9 + }, + { + "name": "count", + "description": "A scalar representing the number of times the underlying dataset\nshould be repeated. The default is `-1`, which results in infinite repetition.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShuffleAndRepeatDatasetV2", + "attributes": [ + { + "name": "reshuffle_each_iteration", + "type": "boolean", + "default": true + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "count", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShuffleDataset", + "summary": "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.", + "attributes": [ + { + "name": "reshuffle_each_iteration", + "type": "boolean", + "description": "If true, each iterator over this dataset will be given\na different pseudorandomly generated seed, based on a sequence seeded by the\n`seed` and `seed2` inputs. If false, each iterator will be given the same\nseed, and repeated iteration over this dataset will yield the exact same\nsequence of results.", + "default": true + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "description": "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`.", + "type": 9 + }, + { + "name": "seed", + "description": "A scalar seed for the random number generator. If either `seed` or\n`seed2` is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "type": 9 + }, + { + "name": "seed2", + "description": "A second scalar seed to avoid seed collision.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShuffleDatasetV2", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShuffleDatasetV3", + "attributes": [ + { + "name": "reshuffle_each_iteration", + "type": "boolean", + "default": true + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ShutdownDistributedTPU", + "summary": "Shuts down a running distributed TPU system.", + "description": "The op returns an error if no system is running." + }, + { + "name": "ShutdownTPUSystem", + "summary": "An op that shuts down the TPU system.", + "outputs": [ + { + "name": "success", + "description": "A boolean that indicates if the shut down process succeeds.", + "type": 10 + } + ] + }, + { + "name": "Sigmoid", + "category": "Activation", + "summary": "Computes sigmoid of `x` element-wise.", + "description": "Specifically, `y = 1 / (1 + exp(-x))`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "SigmoidGrad", + "summary": "Computes the gradient of the sigmoid of `x` wrt its input.", + "description": "Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and\n`dy` is the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Sign", + "summary": "Returns an element-wise indication of the sign of a number.", + "description": "`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.\n\nFor complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.\n\nExample usage:\n>>> tf.math.sign([0., 2., -3.])\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Sin", + "summary": "Computes sine of x element-wise.", + "description": " Given an input tensor, this function computes sine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10, float(\"inf\")])\n tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Sinh", + "summary": "Computes hyperbolic sine of x element-wise.", + "description": " Given an input tensor, this function computes hyperbolic sine of every\n element in the tensor. Input range is `[-inf,inf]` and output range\n is `[-inf,inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Size", + "summary": "Returns the size of a tensor.", + "description": "This operation returns an integer representing the number of elements in\n`input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]\nsize(t) ==> 12\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ] + }, + { + "name": "SkipDataset", + "summary": "Creates a dataset that skips `count` elements from the `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "count", + "description": "A scalar representing the number of elements from the `input_dataset`\nthat should be skipped. If count is -1, skips everything.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Skipgram", + "summary": "Parses a text file and creates a batch of examples.", + "attributes": [ + { + "name": "filename", + "type": "string", + "description": "The corpus's text file name." + }, + { + "name": "batch_size", + "type": "int64", + "description": "The size of produced batch." + }, + { + "name": "window_size", + "type": "int64", + "description": "The number of words to predict to the left and right of the target.", + "default": 5 + }, + { + "name": "min_count", + "type": "int64", + "description": "The minimum number of word occurrences for it to be included in the\nvocabulary.", + "default": 5 + }, + { + "name": "subsample", + "type": "float32", + "description": "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable.", + "default": 0.0010000000474974513 + } + ], + "outputs": [ + { + "name": "vocab_word", + "description": "A vector of words in the corpus.", + "type": 7 + }, + { + "name": "vocab_freq", + "description": "Frequencies of words. Sorted in the non-ascending order.", + "type": 3 + }, + { + "name": "words_per_epoch", + "description": "Number of words per epoch in the data file.", + "type": 9 + }, + { + "name": "current_epoch", + "description": "The current epoch number.", + "type": 3 + }, + { + "name": "total_words_processed", + "description": "The total number of words processed so far.", + "type": 9 + }, + { + "name": "examples", + "description": "A vector of word ids.", + "type": 3 + }, + { + "name": "labels", + "description": "A vector of word ids.", + "type": 3 + } + ] + }, + { + "name": "SleepDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "sleep_microseconds", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "summary": "Return a slice from 'input'.", + "description": "The output tensor is a tensor with dimensions described by 'size'\nwhose values are extracted from 'input' starting at the offsets in\n'begin'.\n\n*Requirements*:\n 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "begin", + "description": "begin[i] specifies the offset into the 'i'th dimension of\n'input' to slice from.", + "typeAttr": "Index" + }, + { + "name": "size", + "description": "size[i] specifies the number of elements of the 'i'th dimension\nof 'input' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i]).", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SlidingWindowDataset", + "summary": "Creates a dataset that passes a sliding window over `input_dataset`.", + "attributes": [ + { + "name": "drop_remainder", + "type": "boolean", + "default": true + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "window_size", + "description": "A scalar representing the number of elements in the\nsliding window.", + "type": 9 + }, + { + "name": "window_shift", + "description": "A scalar representing the steps moving the sliding window\nforward in one iteration. It must be positive.", + "type": 9 + }, + { + "name": "window_stride", + "description": "A scalar representing the stride of the input elements of the sliding window.\nIt must be positive.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Snapshot", + "summary": "Returns a copy of the input tensor.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SnapshotChunkDataset", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "compression", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "chunk_file", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SnapshotDataset", + "summary": "Creates a dataset that will write to / read from a snapshot.", + "description": "This dataset attempts to determine whether a valid snapshot exists at the\n`snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.\nIf not, it will run the preprocessing pipeline as usual, and write out a\nsnapshot of the data processed for future use.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "compression", + "type": "string", + "default": "" + }, + { + "name": "reader_path_prefix", + "type": "string", + "default": "" + }, + { + "name": "writer_path_prefix", + "type": "string", + "default": "" + }, + { + "name": "shard_size_bytes", + "type": "int64", + "default": 10737418240 + }, + { + "name": "pending_snapshot_expiry_seconds", + "type": "int64", + "default": 86400 + }, + { + "name": "num_reader_threads", + "type": "int64", + "default": 1 + }, + { + "name": "reader_buffer_size", + "type": "int64", + "default": 1 + }, + { + "name": "num_writer_threads", + "type": "int64", + "default": 1 + }, + { + "name": "writer_buffer_size", + "type": "int64", + "default": 1 + }, + { + "name": "shuffle_on_read", + "type": "boolean", + "default": false + }, + { + "name": "seed", + "type": "int64", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "default": 0 + }, + { + "name": "mode", + "type": "string", + "default": "auto" + }, + { + "name": "snapshot_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "path", + "description": "The path we should write snapshots to / read snapshots from.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SnapshotDatasetReader", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "compression", + "type": "string", + "default": "" + }, + { + "name": "version", + "type": "int64" + } + ], + "inputs": [ + { + "name": "shard_dir", + "type": 7 + }, + { + "name": "start_index", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SnapshotDatasetV2", + "summary": "Creates a dataset that will write to / read from a snapshot.", + "description": "This dataset attempts to determine whether a valid snapshot exists at the\n`snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.\nIf not, it will run the preprocessing pipeline as usual, and write out a\nsnapshot of the data processed for future use.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "compression", + "type": "string", + "description": "The type of compression to be applied to the saved snapshot files.", + "default": "" + }, + { + "name": "reader_prefix", + "type": "string", + "default": "" + }, + { + "name": "writer_prefix", + "type": "string", + "default": "" + }, + { + "name": "hash_valid", + "type": "boolean", + "default": false + }, + { + "name": "hash", + "type": "int64", + "default": 0 + }, + { + "name": "reader_func", + "type": "function", + "description": "Optional. A function to control how to read data from snapshot shards." + }, + { + "name": "shard_func", + "type": "function", + "description": "Optional. A function to control how to shard data when writing a snapshot." + }, + { + "name": "Treader_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tshard_func_args", + "type": "type[]", + "minimum": 0 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "description": "A variant tensor representing the input dataset.", + "type": 21 + }, + { + "name": "path", + "description": "The path we should write snapshots to / read snapshots from.", + "type": 7 + }, + { + "name": "reader_func_other_args", + "typeListAttr": "Treader_func_args" + }, + { + "name": "shard_func_other_args", + "typeListAttr": "Tshard_func_args" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SnapshotNestedDatasetReader", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SobolSample", + "summary": "Generates points from the Sobol sequence.", + "description": "Creates a Sobol sequence with `num_results` samples. Each sample has dimension\n`dim`. Skips the first `skip` samples.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the sample. One of: `float32` or `float64`. Must be one of the following: `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "dim", + "description": "Positive scalar `Tensor` representing each sample's dimension.", + "type": 3 + }, + { + "name": "num_results", + "description": "Positive scalar `Tensor` of dtype int32. The number of Sobol points to return\nin the output.", + "type": 3 + }, + { + "name": "skip", + "description": "Positive scalar `Tensor` of dtype int32. The number of initial points of the\nSobol sequence to skip.", + "type": 3 + } + ], + "outputs": [ + { + "name": "samples", + "description": "`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].", + "typeAttr": "dtype" + } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "summary": "Computes softmax activations.", + "description": "For each batch `i` and class `j` we have\n\n $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "logits", + "description": "2-D with shape `[batch_size, num_classes]`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "softmax", + "description": "Same shape as `logits`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SoftmaxCrossEntropyWithLogits", + "summary": "Computes softmax cross entropy cost and gradients to backpropagate.", + "description": "Inputs are the logits, not probabilities.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "features", + "description": "batch_size x num_classes matrix", + "typeAttr": "T" + }, + { + "name": "labels", + "description": "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "loss", + "description": "Per example loss (batch_size vector).", + "typeAttr": "T" + }, + { + "name": "backprop", + "description": "backpropagated gradients (batch_size x num_classes matrix).", + "typeAttr": "T" + } + ] + }, + { + "name": "Softplus", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "SoftplusGrad", + "summary": "Computes softplus gradients for a softplus operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding softplus operation.", + "typeAttr": "T" + }, + { + "name": "features", + "description": "The features passed as input to the corresponding softplus operation.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "The gradients: `gradients / (1 + exp(-features))`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Softsign", + "summary": "Computes softsign: `features / (abs(features) + 1)`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ] + }, + { + "name": "SoftsignGrad", + "summary": "Computes softsign gradients for a softsign operation.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "gradients", + "description": "The backpropagated gradients to the corresponding softsign operation.", + "typeAttr": "T" + }, + { + "name": "features", + "description": "The features passed as input to the corresponding softsign operation.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "backprops", + "description": "The gradients: `gradients / (1 + abs(features)) ** 2`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SpaceToBatch", + "summary": "SpaceToBatch for 4-D tensors of type T.", + "description": "This is a legacy version of the more general SpaceToBatchND.\n\nZero-pads and then rearranges (permutes) blocks of spatial data into batch.\nMore specifically, this op outputs a copy of the input tensor where values from\nthe `height` and `width` dimensions are moved to the `batch` dimension. After\nthe zero-padding, both `height` and `width` of the input must be divisible by the\nblock size.\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n * Non-overlapping blocks of size `block_size x block size` in the height and\n width dimensions are rearranged into the batch dimension at each location.\n * The batch of the output tensor is `batch * block_size * block_size`.\n * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "block_size", + "type": "int64", + "minimum": 2 + } + ], + "inputs": [ + { + "name": "input", + "description": "4-D with shape `[batch, height, width, depth]`.", + "typeAttr": "T" + }, + { + "name": "paddings", + "description": "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n the padding of the input with zeros across the spatial dimensions as follows:\n\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n The effective spatial dimensions of the zero-padded input tensor will be:\n\n height_pad = pad_top + height + pad_bottom\n width_pad = pad_left + width + pad_right", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SpaceToBatchND", + "summary": "SpaceToBatch for N-D tensors of type T.", + "description": "This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\ngrid of blocks of shape `block_shape`, and interleaves these blocks with the\n\"batch\" dimension (0) such that in the output, the spatial dimensions\n`[1, ..., M]` correspond to the position within the grid, and the batch\ndimension combines both the position within a spatial block and the original\nbatch position. Prior to division into blocks, the spatial dimensions of the\ninput are optionally zero padded according to `paddings`. See below for a\nprecise description.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tblock_shape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tpaddings", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions.", + "typeAttr": "T" + }, + { + "name": "block_shape", + "description": "1-D with shape `[M]`, all values must be >= 1.", + "typeAttr": "Tblock_shape" + }, + { + "name": "paddings", + "description": "2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SpaceToDepth", + "summary": "SpaceToDepth for tensors of type T.", + "description": "Rearranges blocks of spatial data, into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the `height`\nand `width` dimensions are moved to the `depth` dimension.\nThe attr `block_size` indicates the input block size.\n\n * Non-overlapping blocks of size `block_size x block size` are rearranged\n into depth at each location.\n * The depth of the output tensor is `block_size * block_size * input_depth`.\n * The Y, X coordinates within each block of the input become the high order\n component of the output channel index.\n * The input tensor's height and width must be divisible by block_size.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n \"NHWC\": `[ batch, height, width, channels ]`\n \"NCHW\": `[ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n Each element in the input tensor can be specified via 6 coordinates,\n ordered by decreasing memory layout significance as:\n n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates\n within the output image, bX, bY means coordinates\n within the input block, iC means input channels).\n The output would be a transpose to the following layout:\n n,oY,oX,bY,bX,iC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 2, 2, 1]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1], [2]],\n [[3], [4]]]]\n```\n\nThis operation will output a tensor of shape `[1, 1, 1, 4]`:\n\n```\n[[[[1, 2, 3, 4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,\nthe corresponding output will have a single element (i.e. width and height are\nboth 1) and will have a depth of 4 channels (1 * block_size * block_size).\nThe output element shape is `[1, 1, 4]`.\n\nFor an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThis operation, for block_size of 2, will return the following tensor of shape\n`[1, 1, 1, 12]`\n\n```\n[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nSimilarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:\n\n```\nx = [[[[1], [2], [5], [6]],\n [[3], [4], [7], [8]],\n [[9], [10], [13], [14]],\n [[11], [12], [15], [16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 2 2 4]`:\n\n```\nx = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "block_size", + "type": "int64", + "description": "The size of the spatial block.", + "minimum": 2 + }, + { + "name": "data_format", + "type": "string", + "description": "Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "default": "NHWC" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseAccumulatorApplyGradient", + "summary": "Applies a sparse gradient to a given accumulator.", + "description": "Does not add if local_step is smaller than the accumulator's\nglobal_step.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "has_known_shape", + "type": "boolean", + "description": "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a accumulator.", + "type": 7, + "isRef": true + }, + { + "name": "local_step", + "description": "The local_step value at which the sparse gradient was computed.", + "type": 9 + }, + { + "name": "gradient_indices", + "description": "Indices of the sparse gradient to be accumulated. Must be a\nvector.", + "type": 9 + }, + { + "name": "gradient_values", + "description": "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent.", + "typeAttr": "dtype" + }, + { + "name": "gradient_shape", + "description": "Shape of the sparse gradient to be accumulated.", + "type": 9 + } + ] + }, + { + "name": "SparseAccumulatorTakeGradient", + "summary": "Extracts the average sparse gradient in a SparseConditionalAccumulator.", + "description": "The op will blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it will return its\naverage of the accumulated gradients. Also automatically increments\nthe recorded global_step in the accumulator by 1, and resets the\naggregate to 0.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a SparseConditionalAccumulator.", + "type": 7, + "isRef": true + }, + { + "name": "num_required", + "description": "Number of gradients required before we return an aggregate.", + "type": 3 + } + ], + "outputs": [ + { + "name": "indices", + "description": "Indices of the average of the accumulated sparse gradients.", + "type": 9 + }, + { + "name": "values", + "description": "Values of the average of the accumulated sparse gradients.", + "typeAttr": "dtype" + }, + { + "name": "shape", + "description": "Shape of the average of the accumulated sparse gradients.", + "type": 9 + } + ] + }, + { + "name": "SparseAdd", + "summary": "Adds two `SparseTensor` objects to produce another `SparseTensor`.", + "description": "The input `SparseTensor` objects' indices are assumed ordered in standard\nlexicographic order. If this is not the case, before this step run\n`SparseReorder` to restore index ordering.\n\nBy default, if two values sum to zero at some index, the output `SparseTensor`\nwould still include that particular location in its index, storing a zero in the\ncorresponding value slot. To override this, callers can specify `thresh`,\nindicating that if the sum has a magnitude strictly smaller than `thresh`, its\ncorresponding value and index would then not be included. In particular,\n`thresh == 0` (default) means everything is kept and actual thresholding happens\nonly for a positive value.\n\nIn the following shapes, `nnz` is the count after taking `thresh` into account.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Treal", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "a_indices", + "description": "2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.", + "type": 9 + }, + { + "name": "a_values", + "description": "1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.", + "typeAttr": "T" + }, + { + "name": "a_shape", + "description": "1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.", + "type": 9 + }, + { + "name": "b_indices", + "description": "2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.", + "type": 9 + }, + { + "name": "b_values", + "description": "1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.", + "typeAttr": "T" + }, + { + "name": "b_shape", + "description": "1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.", + "type": 9 + }, + { + "name": "thresh", + "description": "0-D. The magnitude threshold that determines if an output value/index\npair takes space.", + "typeAttr": "Treal" + } + ], + "outputs": [ + { + "name": "sum_indices", + "type": 9 + }, + { + "name": "sum_values", + "typeAttr": "T" + }, + { + "name": "sum_shape", + "type": 9 + } + ] + }, + { + "name": "SparseAddGrad", + "summary": "The gradient operator for the SparseAdd op.", + "description": "The SparseAdd op calculates A + B, where A, B, and the sum are all represented\nas `SparseTensor` objects. This op takes in the upstream gradient w.r.t.\nnon-empty values of the sum, and outputs the gradients w.r.t. the non-empty\nvalues of A and B.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "backprop_val_grad", + "description": "1-D with shape `[nnz(sum)]`. The gradient with respect to\nthe non-empty values of the sum.", + "typeAttr": "T" + }, + { + "name": "a_indices", + "description": "2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.", + "type": 9 + }, + { + "name": "b_indices", + "description": "2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.", + "type": 9 + }, + { + "name": "sum_indices", + "description": "2-D. The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "a_val_grad", + "description": "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A.", + "typeAttr": "T" + }, + { + "name": "b_val_grad", + "description": "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseApplyAdadelta", + "summary": "var: Should be from a Variable().", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum_update", + "description": ": Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyAdagrad", + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme.", + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$var -= lr * grad * (1 / sqrt(accum))$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyAdagradDA", + "summary": "Update entries in '*var' and '*accum' according to the proximal adagrad scheme.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "gradient_accumulator", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "gradient_squared_accumulator", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "global_step", + "description": "Training step number. Must be a scalar.", + "type": 9 + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyAdagradV2", + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme.", + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$var -= lr * grad * (1 / sqrt(accum))$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "update_slots", + "type": "boolean", + "default": true + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Constant factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyCenteredRMSProp", + "summary": "Update '*var' according to the centered RMSProp algorithm.", + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\n$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$\n$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$\n$$var <- var - mom$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mg", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var, ms and mom.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyFtrl", + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme.", + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\n$$accum_new = accum + grad * grad$$\n$$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$\n$$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$\n$$var = (sign(linear) * l1 - linear) / quadratic\\ if\\ |linear| > l1\\ else\\ 0.0$$\n$$accum = accum_{new}$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyFtrlV2", + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme.", + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad * grad\nlinear += grad_with_shrinkage -\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "multiply_linear_by_lr", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "linear", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 shrinkage regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "name": "lr_power", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyMomentum", + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme.", + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\n$$accum = accum * momentum + grad$$\n$$var -= lr * accum$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + }, + { + "name": "use_nesterov", + "type": "boolean", + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + }, + { + "name": "momentum", + "description": "Momentum. Must be a scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyProximalAdagrad", + "summary": "Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.", + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$prox_v = var$$\n$$prox_v -= lr * grad * (1 / sqrt(accum))$$\n$$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "accum", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Learning rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyProximalGradientDescent", + "summary": "Sparse update '*var' as FOBOS algorithm with fixed learning rate.", + "description": "That is for rows we have grad for, we update var as follows:\n$$prox_v = var - alpha * grad$$\n$$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "alpha", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l1", + "description": "L1 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "l2", + "description": "L2 regularization. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var and accum.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseApplyRMSProp", + "summary": "Update '*var' according to the RMSProp algorithm.", + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\n$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$\n$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$\n$$var <- var - mom$$", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "use_locking", + "type": "boolean", + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "default": false + } + ], + "inputs": [ + { + "name": "var", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "ms", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "mom", + "description": "Should be from a Variable().", + "typeAttr": "T", + "isRef": true + }, + { + "name": "lr", + "description": "Scaling factor. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "rho", + "description": "Decay rate. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "name": "epsilon", + "description": "Ridge term. Must be a scalar.", + "typeAttr": "T" + }, + { + "name": "grad", + "description": "The gradient.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A vector of indices into the first dimension of var, ms and mom.", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "out", + "description": "Same as \"var\".", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "SparseBincount", + "summary": "Counts the number of occurrences of each value in an integer array.", + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`." + }, + { + "name": "binary_output", + "type": "boolean", + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "default": false + } + ], + "inputs": [ + { + "name": "indices", + "description": "2D int64 `Tensor`.", + "type": 9 + }, + { + "name": "values", + "description": "1D int `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "dense_shape", + "description": "1D int64 `Tensor`.", + "type": 9 + }, + { + "name": "size", + "description": "non-negative int scalar `Tensor`.", + "typeAttr": "Tidx" + }, + { + "name": "weights", + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `input`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseConcat", + "summary": "Concatenates a list of `SparseTensor` along the specified dimension.", + "description": "Concatenation is with respect to the dense versions of these sparse tensors.\nIt is assumed that each input is a `SparseTensor` whose elements are ordered\nalong increasing dimension number.\n\nAll inputs' shapes must match, except for the concat dimension. The\n`indices`, `values`, and `shapes` lists must have the same length.\n\nThe output shape is identical to the inputs', except along the concat\ndimension, where it is the sum of the inputs' sizes along that dimension.\n\nThe output elements will be resorted to preserve the sort order along\nincreasing dimension number.\n\nThis op runs in `O(M log M)` time, where `M` is the total number of non-empty\nvalues across all inputs. This is due to the need for an internal sort in\norder to concatenate efficiently across an arbitrary dimension.\n\nFor example, if `concat_dim = 1` and the inputs are\n\n sp_inputs[0]: shape = [2, 3]\n [0, 2]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n sp_inputs[1]: shape = [2, 4]\n [0, 1]: \"d\"\n [0, 2]: \"e\"\n\nthen the output will be\n\n shape = [2, 7]\n [0, 2]: \"a\"\n [0, 4]: \"d\"\n [0, 5]: \"e\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\nGraphically this is equivalent to doing\n\n [ a] concat [ d e ] = [ a d e ]\n [b c ] [ ] [b c ]", + "attributes": [ + { + "name": "concat_dim", + "type": "int64", + "description": "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`." + }, + { + "name": "N", + "type": "int64", + "minimum": 2 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D. Indices of each input `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "values", + "description": "1-D. Non-empty values of each `SparseTensor`.", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "name": "shapes", + "description": "1-D. Shapes of each `SparseTensor`.", + "numberAttr": "N", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. Non-empty values of the concatenated `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "output_shape", + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "type": 9 + } + ] + }, + { + "name": "SparseConditionalAccumulator", + "summary": "A conditional accumulator for aggregating sparse gradients.", + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "shape", + "type": "shape", + "description": "The shape of the values." + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions.", + "default": "" + }, + { + "name": "reduction_type", + "type": "string", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "default": "MEAN" + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the accumulator.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "SparseCountSparseOutput", + "summary": "Performs sparse-output bin counting for a sparse tensor input.", + "description": " Counts the number of times each value occurs in the input.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Dtype of the input values tensor. Must be one of the following: `int32`, `int64`." + }, + { + "name": "minlength", + "type": "int64", + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "default": -1 + }, + { + "name": "maxlength", + "type": "int64", + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "default": -1 + }, + { + "name": "binary_output", + "type": "boolean", + "description": "Whether to output the number of occurrences of each value or 1." + }, + { + "name": "output_type", + "type": "type", + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "Tensor containing the indices of the sparse tensor to count.", + "type": 9 + }, + { + "name": "values", + "description": "Tensor containing values of the sparse tensor to count.", + "typeAttr": "T" + }, + { + "name": "dense_shape", + "description": "Tensor containing the dense shape of the sparse tensor to count.", + "type": 9 + }, + { + "name": "weights", + "description": "A Tensor of the same shape as indices containing per-index weight values.\nMay also be the empty tensor if no weights are used.", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "Indices tensor for the resulting sparse tensor object.", + "type": 9 + }, + { + "name": "output_values", + "description": "Values tensor for the resulting sparse tensor object.", + "typeAttr": "output_type" + }, + { + "name": "output_dense_shape", + "description": "Shape tensor for the resulting sparse tensor object.", + "type": 9 + } + ] + }, + { + "name": "SparseCross", + "summary": "Generates sparse cross from a list of sparse and dense tensors.", + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + }, + { + "name": "hashed_output", + "type": "boolean", + "description": "If true, returns the hash of the cross instead of the string.\nThis will allow us avoiding string manipulations." + }, + { + "name": "num_buckets", + "type": "int64", + "description": "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value.", + "minimum": 0 + }, + { + "name": "hash_key", + "type": "int64", + "description": "Specify the hash_key that will be used by the `FingerprintCat64`\nfunction to combine the crosses fingerprints." + }, + { + "name": "sparse_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + }, + { + "name": "dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int64`, `string`." + }, + { + "name": "internal_type", + "type": "type", + "description": "Must be one of the following: `int64`, `string`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D. Indices of each input `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "values", + "description": "1-D. values of each `SparseTensor`.", + "typeListAttr": "sparse_types" + }, + { + "name": "shapes", + "description": "1-D. Shapes of each `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "dense_inputs", + "description": "2-D. Columns represented by dense `Tensor`.", + "typeListAttr": "dense_types" + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "typeAttr": "out_type" + }, + { + "name": "output_shape", + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "type": 9 + } + ] + }, + { + "name": "SparseCrossHashed", + "summary": "Generates sparse cross from a list of sparse and dense tensors.", + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + }, + { + "name": "dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D. Indices of each input `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "values", + "description": "1-D. values of each `SparseTensor`.", + "typeListAttr": "sparse_types" + }, + { + "name": "shapes", + "description": "1-D. Shapes of each `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "dense_inputs", + "description": "2-D. Columns represented by dense `Tensor`.", + "typeListAttr": "dense_types" + }, + { + "name": "num_buckets", + "description": "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value.", + "type": 9 + }, + { + "name": "strong_hash", + "description": "boolean, if true, siphash with salt will be used instead of farmhash.", + "type": 10 + }, + { + "name": "salt", + "description": "Specify the salt that will be used by the siphash function.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "type": 9 + }, + { + "name": "output_shape", + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "type": 9 + } + ] + }, + { + "name": "SparseCrossV2", + "summary": "Generates sparse cross from a list of sparse and dense tensors.", + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + }, + { + "name": "sparse_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + }, + { + "name": "dense_types", + "type": "type[]", + "minimum": 0, + "description": "Must be one of the following: `int64`, `string`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D. Indices of each input `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "values", + "description": "1-D. values of each `SparseTensor`.", + "typeListAttr": "sparse_types" + }, + { + "name": "shapes", + "description": "1-D. Shapes of each `SparseTensor`.", + "numberAttr": "N", + "type": 9 + }, + { + "name": "dense_inputs", + "description": "2-D. Columns represented by dense `Tensor`.", + "typeListAttr": "dense_types" + }, + { + "name": "sep", + "description": "string used when joining a list of string inputs, can be used as separator later.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "type": 7 + }, + { + "name": "output_shape", + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "type": 9 + } + ] + }, + { + "name": "SparseDenseCwiseAdd", + "summary": "Adds up a SparseTensor and a dense Tensor, using these special rules:", + "description": "(1) Broadcasts the dense side to have the same shape as the sparse side, if\n eligible;\n(2) Then, only the dense values pointed to by the indices of the SparseTensor\n participate in the cwise addition.\n\nBy these rules, the result is a logical SparseTensor with exactly the same\nindices and shape, but possibly with different non-zero values. The output of\nthis Op is the resultant non-zero values.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "sp_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "sp_values", + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "typeAttr": "T" + }, + { + "name": "sp_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "dense", + "description": "`R`-D. The dense Tensor operand.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D. The `N` values that are operated on.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseDenseCwiseDiv", + "summary": "Component-wise divides a SparseTensor by a dense Tensor.", + "description": "*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "sp_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "sp_values", + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "typeAttr": "T" + }, + { + "name": "sp_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "dense", + "description": "`R`-D. The dense Tensor operand.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D. The `N` values that are operated on.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseDenseCwiseMul", + "summary": "Component-wise multiplies a SparseTensor by a dense Tensor.", + "description": "The output locations corresponding to the implicitly zero elements in the sparse\ntensor will be zero (i.e., will not take up storage space), regardless of the\ncontents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).\n\n*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "sp_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "sp_values", + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "typeAttr": "T" + }, + { + "name": "sp_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "dense", + "description": "`R`-D. The dense Tensor operand.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D. The `N` values that are operated on.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseFillEmptyRows", + "summary": "Fills empty rows in the input 2-D `SparseTensor` with a default value.", + "description": "The input `SparseTensor` is represented via the tuple of inputs\n(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the\nsame `dense_shape` but with indices `output_indices` and values\n`output_values`.\n\nThis op inserts a single entry for every row that doesn't have any values.\nThe index is created as `[row, 0, ..., 0]` and the inserted value\nis `default_value`.\n\nFor example, suppose `sp_input` has shape `[5, 6]` and non-empty values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\nRows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:\n\n [0, 1]: a\n [0, 3]: b\n [1, 0]: default_value\n [2, 0]: c\n [3, 1]: d\n [4, 0]: default_value\n\nThe output `SparseTensor` will be in row-major order and will have the\nsame shape as the input.\n\nThis op also returns an indicator vector shaped `[dense_shape[0]]` such that\n\n empty_row_indicator[i] = True iff row i was an empty row.\n\nAnd a reverse index map vector shaped `[indices.shape[0]]` that is used during\nbackpropagation,\n\n reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D. the indices of the sparse tensor.", + "type": 9 + }, + { + "name": "values", + "description": "1-D. the values of the sparse tensor.", + "typeAttr": "T" + }, + { + "name": "dense_shape", + "description": "1-D. the shape of the sparse tensor.", + "type": 9 + }, + { + "name": "default_value", + "description": "0-D. default value to insert into location `[row, 0, ..., 0]`\n for rows missing from the input sparse tensor.\noutput indices: 2-D. the indices of the filled sparse tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. the values of the filled sparse tensor.", + "typeAttr": "T" + }, + { + "name": "empty_row_indicator", + "description": "1-D. whether the dense row was missing in the\ninput sparse tensor.", + "type": 10 + }, + { + "name": "reverse_index_map", + "description": "1-D. a map from the input indices to the output indices.", + "type": 9 + } + ] + }, + { + "name": "SparseFillEmptyRowsGrad", + "summary": "The gradient of SparseFillEmptyRows.", + "description": "Takes vectors reverse_index_map, shaped `[N]`, and grad_values,\nshaped `[N_full]`, where `N_full >= N` and copies data into either\n`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and\n`d_default_value` is a scalar.\n\n d_values[j] = grad_values[reverse_index_map[j]]\n d_default_value = sum_{k : 0 .. N_full - 1} (\n grad_values[k] * 1{k not in reverse_index_map})", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "reverse_index_map", + "description": "1-D. The reverse index map from SparseFillEmptyRows.", + "type": 9 + }, + { + "name": "grad_values", + "description": "1-D. The gradients from backprop.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "d_values", + "description": "1-D. The backprop into values.", + "typeAttr": "T" + }, + { + "name": "d_default_value", + "description": "0-D. The backprop into default_value.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseMatMul", + "summary": "Multiply matrix \"a\" by matrix \"b\".", + "description": "The inputs must be two-dimensional matrices and the inner dimension of \"a\" must\nmatch the outer dimension of \"b\". Both \"a\" and \"b\" must be `Tensor`s not\n`SparseTensor`s. This op is optimized for the case where at least one of \"a\" or\n\"b\" is sparse, in the sense that they have a large proportion of zero values.\nThe breakeven for using this versus a dense matrix multiply on one platform was\n30% zero values in the sparse matrix.\n\nThe gradient computation of this operation will only take advantage of sparsity\nin the input gradient when that gradient comes from a Relu.", + "attributes": [ + { + "name": "transpose_a", + "type": "boolean", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "default": false + }, + { + "name": "a_is_sparse", + "type": "boolean", + "default": false + }, + { + "name": "b_is_sparse", + "type": "boolean", + "default": false + }, + { + "name": "Ta", + "type": "type", + "description": "Must be one of the following: `float32`, `bfloat16`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tb", + "type": "type", + "description": "Must be one of the following: `float32`, `bfloat16`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "Ta" + }, + { + "name": "b", + "typeAttr": "Tb" + } + ], + "outputs": [ + { + "name": "product", + "type": 1 + } + ] + }, + { + "name": "SparseMatrixAdd", + "summary": "Sparse addition of two CSR matrices, C = alpha * A + beta * B.", + "description": "The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not\ncurrently defined (TensorFlow will return zeros for these entries).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "a", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "b", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "alpha", + "description": "A constant scalar.", + "typeAttr": "T" + }, + { + "name": "beta", + "description": "A constant scalar.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "c", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixMatMul", + "summary": "Matrix-multiplies a sparse matrix with a dense matrix.", + "description": "Returns a dense matrix.\nFor inputs A and B, where A is CSR and B is dense; this op returns a dense C;\n\nIf transpose_output is false, returns:\n```\n C = A . B\n```\n\nIf transpose_output is `true`, returns:\n```\n C = transpose(A . B) = transpose(B) . transpose(A)\n```\nwhere the transposition is performed along the two innermost (matrix)\ndimensions.\n\nIf conjugate_output is `true`, returns:\n```\n C = conjugate(A . B) = conjugate(A) . conjugate(B)\n```\n\nIf both conjugate_output and transpose_output are `true`, returns:\n```\n C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .\n conjugate(transpose(A))\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "Indicates whether `a` should be transposed.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "Indicates whether `b` should be transposed.", + "default": false + }, + { + "name": "adjoint_a", + "type": "boolean", + "description": "Indicates whether `a` should be conjugate-transposed.", + "default": false + }, + { + "name": "adjoint_b", + "type": "boolean", + "description": "Indicates whether `b` should be conjugate-transposed.", + "default": false + }, + { + "name": "transpose_output", + "type": "boolean", + "description": "Transposes the product of `a` and `b`.", + "default": false + }, + { + "name": "conjugate_output", + "type": "boolean", + "description": "Conjugates the product of `a` and `b`.", + "default": false + } + ], + "inputs": [ + { + "name": "a", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "b", + "description": "A dense tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A dense output tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseMatrixMul", + "summary": "Element-wise multiplication of a sparse matrix with a dense tensor.", + "description": "Returns a sparse matrix.\n\nThe dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3\n`SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the\nmultiply operation broadcasts.\n\n**NOTE** even if `b` is zero, the sparsity structure of the output does not\nchange.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "a", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "b", + "description": "A dense tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A dense output tensor.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixNNZ", + "summary": "Returns the number of nonzeroes of `sparse_matrix`.", + "inputs": [ + { + "name": "sparse_matrix", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "nnz", + "description": "The number of nonzeroes of `sparse_matrix`.", + "type": 3 + } + ] + }, + { + "name": "SparseMatrixOrderingAMD", + "summary": "Computes the Approximate Minimum Degree (AMD) ordering of `input`.", + "description": "Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.\n\nThe returned permutation may be used to permute the rows and columns of the\ngiven sparse matrix. This typically results in permuted sparse matrix's sparse\nCholesky (or other decompositions) in having fewer zero fill-in compared to\ndecomposition of the original matrix.\n\nThe input sparse matrix may have rank 2 or rank 3. The output Tensor,\nrepresenting would then have rank 1 or 2 respectively, with the same batch\nshape as the input.\n\nEach component of the input sparse matrix must represent a square symmetric\nmatrix; only the lower triangular part of the matrix is read. The values of the\nsparse matrix does not affect the returned permutation, only the sparsity\npattern of the sparse matrix is used. Hence, a single AMD ordering may be\nreused for the Cholesky decompositions of sparse matrices with the same sparsity\npattern but with possibly different values.\n\nEach batch component of the output permutation represents a permutation of `N`\nelements, where the input sparse matrix components each have `N` rows. That is,\nthe component contains each of the integers `{0, .. N-1}` exactly once. The\n`i`th element represents the row index that the `i`th row maps to.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])\n a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)\n a_dense_shape = [4, 4]\n\n with tf.Session() as sess:\n # Define (COO format) SparseTensor over Numpy array.\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix.\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n\n # Obtain the AMD Ordering for the CSR SparseMatrix.\n ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)\n\n ordering_amd_value = sess.run(ordering_amd)\n```\n\n`ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.\n\ninput: A `CSRSparseMatrix`.", + "inputs": [ + { + "name": "input", + "description": "A `CSRSparseMatrix`.", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "description": "The Approximate Minimum Degree (AMD) ordering of `input`.", + "type": 3 + } + ] + }, + { + "name": "SparseMatrixSoftmax", + "summary": "Calculates the softmax of a CSRSparseMatrix.", + "description": "Calculate the softmax of the innermost dimensions of a SparseMatrix.\n\nMissing values are treated as `-inf` (i.e., logits of zero probability); and\nthe output has the same sparsity structure as the input (though missing values\nin the output may now be treated as having probability zero).", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "logits", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "softmax", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixSoftmaxGrad", + "summary": "Calculates the gradient of the SparseMatrixSoftmax op.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "softmax", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "grad_softmax", + "description": "The gradient of `softmax`.", + "type": 21 + } + ], + "outputs": [ + { + "name": "gradient", + "description": "The output gradient.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixSparseCholesky", + "summary": "Computes the sparse Cholesky decomposition of `input`.", + "description": "Computes the Sparse Cholesky decomposition of a sparse matrix, with the given\nfill-in reducing permutation.\n\nThe input sparse matrix and the fill-in reducing permutation `permutation` must\nhave compatible shapes. If the sparse matrix has rank 3; with the batch\ndimension `B`, then the `permutation` must be of rank 2; with the same batch\ndimension `B`. There is no support for broadcasting.\n\nFurthermore, each component vector of `permutation` must be of length `N`,\ncontaining each of the integers {0, 1, ..., N - 1} exactly once, where `N` is\nthe number of rows of each component of the sparse matrix.\n\nEach component of the input sparse matrix must represent a symmetric positive\ndefinite (SPD) matrix; although only the lower triangular part of the matrix is\nread. If any individual component is not SPD, then an InvalidArgument error is\nthrown.\n\nThe returned sparse matrix has the same dense shape as the input sparse matrix.\nFor each component `A` of the input sparse matrix, the corresponding output\nsparse matrix represents `L`, the lower triangular Cholesky factor satisfying\nthe following identity:\n\n```\n A = L * Lt\n```\n\nwhere Lt denotes the transpose of L (or its conjugate transpose, if `type` is\n`complex64` or `complex128`).\n\nThe `type` parameter denotes the type of the matrix elements. The supported\ntypes are: `float32`, `float64`, `complex64` and `complex128`.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])\n a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)\n a_dense_shape = [4, 4]\n\n with tf.Session() as sess:\n # Define (COO format) SparseTensor over Numpy array.\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix.\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n\n # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero\n # fill-in (number of structural non-zeros in the sparse Cholesky factor).\n ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)\n cholesky_sparse_matrices = (\n sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(\n sparse_matrix, ordering_amd, type=tf.float32))\n\n # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor\n dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(\n cholesky_sparse_matrices, tf.float32)\n\n # Evaluate the dense Tensor value.\n dense_cholesky_value = sess.run(dense_cholesky)\n```\n\n`dense_cholesky_value` stores the dense Cholesky factor:\n\n```\n [[ 1. 0. 0. 0.]\n [ 0. 1.41 0. 0.]\n [ 0. 0.70 1.58 0.]\n [ 0. 0. 0. 2.]]\n```\n\n\ninput: A `CSRSparseMatrix`.\npermutation: A `Tensor`.\ntype: The type of `input`.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "A `CSRSparseMatrix`.", + "type": 21 + }, + { + "name": "permutation", + "description": "A fill-in reducing permutation matrix.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The sparse Cholesky decompsition of `input`.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixSparseMatMul", + "summary": "Sparse-matrix-multiplies two CSR matrices `a` and `b`.", + "description": "Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix\n`b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or\nadjointed.\n\nEach matrix may be transposed or adjointed (conjugated and transposed)\naccording to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b`\nand `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True.\nSimilarly, at most one of `transpose_b` or `adjoint_b` may be True.\n\nThe inputs must have compatible shapes. That is, the inner dimension of `a`\nmust be equal to the outer dimension of `b`. This requirement is adjusted\naccording to whether either `a` or `b` is transposed or adjointed.\n\nThe `type` parameter denotes the type of the matrix elements. Both `a` and `b`\nmust have the same type. The supported types are: `float32`, `float64`,\n`complex64` and `complex128`.\n\nBoth `a` and `b` must have the same rank. Broadcasting is not supported. If they\nhave rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the\nsame dense shape.\n\nThe sparse matrix product may have numeric (non-structural) zeros.\nTODO(anudhyan): Consider adding a boolean attribute to control whether to prune\nzeros.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])\n a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)\n a_dense_shape = [4, 5]\n\n b_indices = np.array([[0, 0], [3, 0], [3, 1]])\n b_values = np.array([2.0, 7.0, 8.0], np.float32)\n b_dense_shape = [5, 3]\n\n with tf.Session() as sess:\n # Define (COO format) Sparse Tensors over Numpy arrays\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n b_st.indices, b_st.values, b_st.dense_shape)\n\n # Compute the CSR SparseMatrix matrix multiplication\n c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(\n a=a_sm, b=b_sm, type=tf.float32)\n\n # Convert the CSR SparseMatrix product to a dense Tensor\n c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(\n c_sm, tf.float32)\n # Evaluate the dense Tensor value\n c_sm_dense_value = sess.run(c_sm_dense)\n```\n\n`c_sm_dense_value` stores the dense matrix product:\n\n```\n [[ 2. 0. 0.]\n [ 0. 0. 0.]\n [ 35. 40. 0.]\n [ -4. 0. 0.]]\n```\n\na: A `CSRSparseMatrix`.\nb: A `CSRSparseMatrix` with the same type and rank as `a`.\ntype: The type of both `a` and `b`.\ntranspose_a: If True, `a` transposed before multiplication.\ntranspose_b: If True, `b` transposed before multiplication.\nadjoint_a: If True, `a` adjointed before multiplication.\nadjoint_b: If True, `b` adjointed before multiplication.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + }, + { + "name": "transpose_a", + "type": "boolean", + "description": "Indicates whether `a` should be transposed.", + "default": false + }, + { + "name": "transpose_b", + "type": "boolean", + "description": "Indicates whether `b` should be transposed.", + "default": false + }, + { + "name": "adjoint_a", + "type": "boolean", + "description": "Indicates whether `a` should be conjugate-transposed.", + "default": false + }, + { + "name": "adjoint_b", + "type": "boolean", + "description": "Indicates whether `b` should be conjugate-transposed.", + "default": false + } + ], + "inputs": [ + { + "name": "a", + "description": "A CSRSparseMatrix.", + "type": 21 + }, + { + "name": "b", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "c", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixTranspose", + "summary": "Transposes the inner (matrix) dimensions of a CSRSparseMatrix.", + "description": "Transposes the inner (matrix) dimensions of a SparseMatrix and optionally\nconjugates its values.", + "attributes": [ + { + "name": "conjugate", + "type": "boolean", + "description": "Indicates whether `input` should be conjugated.", + "default": false + }, + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "description": "A CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "SparseMatrixZeros", + "summary": "Creates an all-zeros CSRSparseMatrix with shape `dense_shape`.", + "attributes": [ + { + "name": "type", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "dense_shape", + "description": "The desired matrix shape.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_matrix", + "description": "An empty CSR matrix with shape `dense_shape`.", + "type": 21 + } + ] + }, + { + "name": "SparseReduceMax", + "summary": "Computes the max of elements across dimensions of a SparseTensor.", + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "input_values", + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "typeAttr": "T" + }, + { + "name": "input_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "reduction_axes", + "description": "1-D. Length-`K` vector containing the reduction axes.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "`R-K`-D. The reduced Tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseReduceMaxSparse", + "summary": "Computes the max of elements across dimensions of a SparseTensor.", + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "input_values", + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "typeAttr": "T" + }, + { + "name": "input_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "reduction_axes", + "description": "1-D. Length-`K` vector containing the reduction axes.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "typeAttr": "T" + }, + { + "name": "output_shape", + "type": 9 + } + ] + }, + { + "name": "SparseReduceSum", + "summary": "Computes the sum of elements across dimensions of a SparseTensor.", + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "input_values", + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "typeAttr": "T" + }, + { + "name": "input_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "reduction_axes", + "description": "1-D. Length-`K` vector containing the reduction axes.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "`R-K`-D. The reduced Tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseReduceSumSparse", + "summary": "Computes the sum of elements across dimensions of a SparseTensor.", + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "input_values", + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "typeAttr": "T" + }, + { + "name": "input_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "reduction_axes", + "description": "1-D. Length-`K` vector containing the reduction axes.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "typeAttr": "T" + }, + { + "name": "output_shape", + "type": 9 + } + ] + }, + { + "name": "SparseReorder", + "summary": "Reorders a SparseTensor into the canonical, row-major ordering.", + "description": "Note that by convention, all sparse ops preserve the canonical ordering along\nincreasing dimension number. The only time ordering can be violated is during\nmanual manipulation of the indices and values vectors to add entries.\n\nReordering does not affect the shape of the SparseTensor.\n\nIf the tensor has rank `R` and `N` non-empty values, `input_indices` has\nshape `[N, R]`, input_values has length `N`, and input_shape has length `R`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "type": 9 + }, + { + "name": "input_values", + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "typeAttr": "T" + }, + { + "name": "input_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. `N` non-empty values corresponding to `output_indices`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseReshape", + "summary": "Reshapes a SparseTensor to represent values in a new dense shape.", + "description": "This operation has the same semantics as reshape on the represented dense\ntensor. The `input_indices` are recomputed based on the requested `new_shape`.\n\nIf one component of `new_shape` is the special value -1, the size of that\ndimension is computed so that the total dense size remains constant. At\nmost one component of `new_shape` can be -1. The number of dense elements\nimplied by `new_shape` must be the same as the number of dense elements\noriginally implied by `input_shape`.\n\nReshaping does not affect the order of values in the SparseTensor.\n\nIf the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`\nhas length `R_out`, then `input_indices` has shape `[N, R_in]`,\n`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and\n`output_shape` has length `R_out`.", + "inputs": [ + { + "name": "input_indices", + "description": "2-D. `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor.", + "type": 9 + }, + { + "name": "input_shape", + "description": "1-D. `R_in` vector with the input SparseTensor's dense shape.", + "type": 9 + }, + { + "name": "new_shape", + "description": "1-D. `R_out` vector with the requested new dense shape.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor.", + "type": 9 + }, + { + "name": "output_shape", + "description": "1-D. `R_out` vector with the full dense shape of the output\nSparseTensor. This is the same as `new_shape` but with any -1 dimensions\nfilled in.", + "type": 9 + } + ] + }, + { + "name": "SparseSegmentMean", + "summary": "Computes the mean along sparse segments of a tensor.", + "description": "See `tf.sparse.segment_sum` for usage examples.\n\nLike `SegmentMean`, but `segment_ids` can have rank less than `data`'s first\ndimension, selecting a subset of dimension 0, specified by `indices`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentMeanGrad", + "summary": "Computes gradients for SparseSegmentMean.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentMean op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentMean op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentMean op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentMean op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentMeanGradV2", + "summary": "Computes gradients for SparseSegmentMean.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is the number of unique indexes in \"indices\". Also returns vector\n\"sorted_unique_indices\" containing the corresponding indexes from \"indices\".", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentMean op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentMean op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentMean op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "dense_output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentMean op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "sorted_unique_indices", + "typeAttr": "Tidx" + } + ] + }, + { + "name": "SparseSegmentMeanWithNumSegments", + "summary": "Computes the mean along sparse segments of a tensor.", + "description": "Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + }, + { + "name": "num_segments", + "description": "Should equal the number of distinct segment IDs.", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSqrtN", + "summary": "Computes the sum along sparse segments of a tensor divided by the sqrt of N.", + "description": "N is the size of the segment being reduced.\n\nSee `tf.sparse.segment_sum` for usage examples.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSqrtNGrad", + "summary": "Computes gradients for SparseSegmentSqrtN.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentSqrtN op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentSqrtN op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentSqrtN op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentSqrtN op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSqrtNGradV2", + "summary": "Computes gradients for SparseSegmentSqrtN.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is the number of unique indexes in \"indices\". Also returns vector\n\"sorted_unique_indices\" containing the corresponding indexes from \"indices\".", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentSqrtN op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentSqrtN op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentSqrtN op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "dense_output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentSqrtN op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "sorted_unique_indices", + "typeAttr": "Tidx" + } + ] + }, + { + "name": "SparseSegmentSqrtNWithNumSegments", + "summary": "Computes the sum along sparse segments of a tensor divided by the sqrt of N.", + "description": "N is the size of the segment being reduced.\n\nLike `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + }, + { + "name": "num_segments", + "description": "Should equal the number of distinct segment IDs.", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSum", + "summary": "Computes the sum along sparse segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nLike `SegmentSum`, but `segment_ids` can have rank less than `data`'s first\ndimension, selecting a subset of dimension 0, specified by `indices`.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n# Select two rows, one segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n# => [[0 0 0 0]]\n\n# Select two rows, two segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n# => [[ 1 2 3 4]\n# [-1 -2 -3 -4]]\n\n# Select all rows, two segments.\ntf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n# => [[0 0 0 0]\n# [5 6 7 8]]\n\n# Which is equivalent to:\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSumGrad", + "summary": "Computes gradients for SparseSegmentSum.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentSum op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentSum op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentSum op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentSum op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSegmentSumGradV2", + "summary": "Computes gradients for SparseSegmentSum.", + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is the number of unique indexes in \"indices\". Also returns vector\n\"sorted_unique_indices\" containing the corresponding indexes from \"indices\".", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "grad", + "description": "gradient propagated to the SparseSegmentSum op.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "indices passed to the corresponding SparseSegmentSum op.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "segment_ids passed to the corresponding SparseSegmentSum op.", + "typeAttr": "Tsegmentids" + }, + { + "name": "dense_output_dim0", + "description": "dimension 0 of \"data\" passed to SparseSegmentSum op.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "sorted_unique_indices", + "typeAttr": "Tidx" + } + ] + }, + { + "name": "SparseSegmentSumWithNumSegments", + "summary": "Computes the sum along sparse segments of a tensor.", + "description": "Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)\nfor an explanation of segments.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\ntf.sparse_segment_sum_with_num_segments(\n c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)\n# => [[0 0 0 0]\n# [0 0 0 0]\n# [0 0 0 0]]\n\ntf.sparse_segment_sum_with_num_segments(c,\n tf.constant([0, 1]),\n tf.constant([0, 2],\n num_segments=4))\n# => [[ 1 2 3 4]\n# [ 0 0 0 0]\n# [-1 -2 -3 -4]\n# [ 0 0 0 0]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tsegmentids", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "sparse_gradient", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "typeAttr": "Tidx" + }, + { + "name": "segment_ids", + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "typeAttr": "Tsegmentids" + }, + { + "name": "num_segments", + "description": "Should equal the number of distinct segment IDs.", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for dimension 0 which\nhas size `num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSlice", + "summary": "Slice a `SparseTensor` based on the `start` and `size`.", + "description": "For example, if the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n sparse_slice([0, 0], [2, 4]) = shape = [2, 4]\n [ a ]\n [b c ]\n\n sparse_slice([0, 4], [2, 3]) = shape = [2, 3]\n [ d e ]\n [ ]", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "description": "2-D tensor represents the indices of the sparse tensor.", + "type": 9 + }, + { + "name": "values", + "description": "1-D tensor represents the values of the sparse tensor.", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "1-D. tensor represents the shape of the sparse tensor.", + "type": 9 + }, + { + "name": "start", + "description": "1-D. tensor represents the start of the slice.", + "type": 9 + }, + { + "name": "size", + "description": "1-D. tensor represents the size of the slice.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "description": "A list of 1-D tensors represents the values of the output sparse\ntensors.", + "typeAttr": "T" + }, + { + "name": "output_shape", + "description": "A list of 1-D tensors represents the shape of the output sparse\ntensors.", + "type": 9 + } + ] + }, + { + "name": "SparseSliceGrad", + "summary": "The gradient operator for the SparseSlice op.", + "description": "This op takes in the upstream gradient w.r.t. non-empty values of\nthe sliced `SparseTensor`, and outputs the gradients w.r.t.\nthe non-empty values of input `SparseTensor`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "backprop_val_grad", + "description": "1-D. The gradient with respect to\nthe non-empty values of the sliced `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "input_indices", + "description": "2-D. The `indices` of the input `SparseTensor`.", + "type": 9 + }, + { + "name": "input_start", + "description": "1-D. tensor represents the start of the slice.", + "type": 9 + }, + { + "name": "output_indices", + "description": "2-D. The `indices` of the sliced `SparseTensor`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "val_grad", + "description": "1-D. The gradient with respect to the non-empty values of input `SparseTensor`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSoftmax", + "summary": "Applies softmax to a batched N-D `SparseTensor`.", + "description": "The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`\n(where `N >= 2`), and with indices sorted in the canonical lexicographic order.\n\nThis op is equivalent to applying the normal `tf.nn.softmax()` to each innermost\nlogical submatrix with shape `[B, C]`, but with the catch that *the implicitly\nzero elements do not participate*. Specifically, the algorithm is equivalent\nto the following:\n\n (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix\n with shape `[B, C]`, along the size-C dimension;\n (2) Masks out the original implicitly-zero locations;\n (3) Renormalizes the remaining elements.\n\nHence, the `SparseTensor` result has exactly the same non-zero indices and\nshape.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "sp_indices", + "description": "2-D. `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering.", + "type": 9 + }, + { + "name": "sp_values", + "description": "1-D. `NNZ` non-empty values corresponding to `sp_indices`.", + "typeAttr": "T" + }, + { + "name": "sp_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output", + "description": "1-D. The `NNZ` values for the result `SparseTensor`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSoftmaxCrossEntropyWithLogits", + "summary": "Computes softmax cross entropy cost and gradients to backpropagate.", + "description": "Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept\na matrix of label probabilities, but rather a single label per row\nof features. This label is considered to have probability 1.0 for the\ngiven row.\n\nInputs are the logits, not probabilities.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "Tlabels", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "features", + "description": "batch_size x num_classes matrix", + "typeAttr": "T" + }, + { + "name": "labels", + "description": "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry.", + "typeAttr": "Tlabels" + } + ], + "outputs": [ + { + "name": "loss", + "description": "Per example loss (batch_size vector).", + "typeAttr": "T" + }, + { + "name": "backprop", + "description": "backpropagated gradients (batch_size x num_classes matrix).", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSparseMaximum", + "summary": "Returns the element-wise max of two SparseTensors.", + "description": "Assumes the two SparseTensors have the same shape, i.e., no broadcasting.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "a_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering.", + "type": 9 + }, + { + "name": "a_values", + "description": "1-D. `N` non-empty values corresponding to `a_indices`.", + "typeAttr": "T" + }, + { + "name": "a_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "b_indices", + "description": "counterpart to `a_indices` for the other operand.", + "type": 9 + }, + { + "name": "b_values", + "description": "counterpart to `a_values` for the other operand; must be of the same dtype.", + "typeAttr": "T" + }, + { + "name": "b_shape", + "description": "counterpart to `a_shape` for the other operand; the two shapes must be equal.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. The indices of the output SparseTensor.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. The values of the output SparseTensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSparseMinimum", + "summary": "Returns the element-wise min of two SparseTensors.", + "description": "Assumes the two SparseTensors have the same shape, i.e., no broadcasting.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "a_indices", + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering.", + "type": 9 + }, + { + "name": "a_values", + "description": "1-D. `N` non-empty values corresponding to `a_indices`.", + "typeAttr": "T" + }, + { + "name": "a_shape", + "description": "1-D. Shape of the input SparseTensor.", + "type": 9 + }, + { + "name": "b_indices", + "description": "counterpart to `a_indices` for the other operand.", + "type": 9 + }, + { + "name": "b_values", + "description": "counterpart to `a_values` for the other operand; must be of the same dtype.", + "typeAttr": "T" + }, + { + "name": "b_shape", + "description": "counterpart to `a_shape` for the other operand; the two shapes must be equal.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "description": "2-D. The indices of the output SparseTensor.", + "type": 9 + }, + { + "name": "output_values", + "description": "1-D. The values of the output SparseTensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseSplit", + "summary": "Split a `SparseTensor` into `num_split` tensors along one dimension.", + "description": "If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices\n`[0 : shape[split_dim] % num_split]` gets one extra dimension.\nFor example, if `split_dim = 1` and `num_split = 2` and the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n output_tensor[0] = shape = [2, 4]\n [ a ]\n [b c ]\n\n output_tensor[1] = shape = [2, 3]\n [ d e ]\n [ ]", + "attributes": [ + { + "name": "num_split", + "type": "int64", + "description": "The number of ways to split.", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "split_dim", + "description": "0-D. The dimension along which to split. Must be in the range\n`[0, rank(shape))`.", + "type": 9 + }, + { + "name": "indices", + "description": "2-D tensor represents the indices of the sparse tensor.", + "type": 9 + }, + { + "name": "values", + "description": "1-D tensor represents the values of the sparse tensor.", + "typeAttr": "T" + }, + { + "name": "shape", + "description": "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors.", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "numberAttr": "num_split", + "type": 9 + }, + { + "name": "output_values", + "description": "A list of 1-D tensors represents the values of the output sparse\ntensors.", + "numberAttr": "num_split", + "typeAttr": "T" + }, + { + "name": "output_shape", + "description": "A list of 1-D tensors represents the shape of the output sparse\ntensors.", + "numberAttr": "num_split", + "type": 9 + } + ] + }, + { + "name": "SparseTensorDenseAdd", + "summary": "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.", + "description": "This Op does not require `a_indices` be sorted in standard lexicographic order.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "a_indices", + "description": "2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.", + "typeAttr": "Tindices" + }, + { + "name": "a_values", + "description": "1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.", + "typeAttr": "T" + }, + { + "name": "a_shape", + "description": "1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.", + "typeAttr": "Tindices" + }, + { + "name": "b", + "description": "`ndims`-D Tensor. With shape `a_shape`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseTensorDenseMatMul", + "summary": "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\".", + "description": "No validity checking is performed on the indices of A. However, the following\ninput format is recommended for optimal behavior:\n\nif adjoint_a == false:\n A should be sorted in lexicographically increasing order. Use SparseReorder\n if you're not sure.\nif adjoint_a == true:\n A should be sorted in order of increasing dimension 1 (i.e., \"column major\"\n order instead of \"row major\" order).", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "adjoint_a", + "type": "boolean", + "description": "Use the adjoint of A in the matrix multiply. If A is complex, this\nis transpose(conj(A)). Otherwise it's transpose(A).", + "default": false + }, + { + "name": "adjoint_b", + "type": "boolean", + "description": "Use the adjoint of B in the matrix multiply. If B is complex, this\nis transpose(conj(B)). Otherwise it's transpose(B).", + "default": false + } + ], + "inputs": [ + { + "name": "a_indices", + "description": "2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.", + "typeAttr": "Tindices" + }, + { + "name": "a_values", + "description": "1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.", + "typeAttr": "T" + }, + { + "name": "a_shape", + "description": "1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.", + "type": 9 + }, + { + "name": "b", + "description": "2-D. A dense Matrix.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseTensorSliceDataset", + "summary": "Creates a dataset that splits a SparseTensor into elements row-wise.", + "attributes": [ + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "type": 9 + }, + { + "name": "values", + "typeAttr": "Tvalues" + }, + { + "name": "dense_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "SparseTensorToCSRSparseMatrix", + "summary": "Converts a SparseTensor to a (possibly batched) CSRSparseMatrix.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "indices", + "description": "SparseTensor indices.", + "type": 9 + }, + { + "name": "values", + "description": "SparseTensor values.", + "typeAttr": "T" + }, + { + "name": "dense_shape", + "description": "SparseTensor dense shape.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_matrix", + "description": "A (possibly batched) CSRSparseMatrix.", + "type": 21 + } + ] + }, + { + "name": "SparseToDense", + "summary": "Converts a sparse representation into a dense tensor.", + "description": "Builds an array `dense` with shape `output_shape` such that\n\n```\n# If sparse_indices is scalar\ndense[i] = (i == sparse_indices ? sparse_values : default_value)\n\n# If sparse_indices is a vector, then for each i\ndense[sparse_indices[i]] = sparse_values[i]\n\n# If sparse_indices is an n by d matrix, then for each i in [0, n)\ndense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]\n```\n\nAll other values in `dense` are set to `default_value`. If `sparse_values` is a\nscalar, all sparse indices are set to this single value.\n\nIndices should be sorted in lexicographic order, and indices must not\ncontain any repeats. If `validate_indices` is true, these properties\nare checked during execution.", + "attributes": [ + { + "name": "validate_indices", + "type": "boolean", + "description": "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats.", + "default": true + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "sparse_indices", + "description": "0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed.", + "typeAttr": "Tindices" + }, + { + "name": "output_shape", + "description": "1-D. Shape of the dense output tensor.", + "typeAttr": "Tindices" + }, + { + "name": "sparse_values", + "description": "1-D. Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices.", + "typeAttr": "T" + }, + { + "name": "default_value", + "description": "Scalar value to set for indices not specified in\n`sparse_indices`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "dense", + "description": "Dense output tensor of shape `output_shape`.", + "typeAttr": "T" + } + ] + }, + { + "name": "SparseToSparseSetOperation", + "summary": "Applies set operation along last dimension of 2 `SparseTensor` inputs.", + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nIf `validate_indices` is `True`, `SparseToSparseSetOperation` validates the\norder and range of `set1` and `set2` indices.\n\nInput `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,\nand `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same\nas `set2`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set1`\nand `set2` indices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "name": "validate_indices", + "type": "boolean", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`." + } + ], + "inputs": [ + { + "name": "set1_indices", + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "type": 9 + }, + { + "name": "set1_values", + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "typeAttr": "T" + }, + { + "name": "set1_shape", + "description": "1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\nbe the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\nmax set size across `0...n-1` dimensions.", + "type": 9 + }, + { + "name": "set2_indices", + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "type": 9 + }, + { + "name": "set2_values", + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "typeAttr": "T" + }, + { + "name": "set2_shape", + "description": "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\nmax set size across `0...n-1` dimensions.", + "type": 9 + } + ], + "outputs": [ + { + "name": "result_indices", + "description": "2D indices of a `SparseTensor`.", + "type": 9 + }, + { + "name": "result_values", + "description": "1D values of a `SparseTensor`.", + "typeAttr": "T" + }, + { + "name": "result_shape", + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "type": 9 + } + ] + }, + { + "name": "Spence", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Split", + "category": "Tensor", + "summary": "Splits a tensor into `num_split` tensors along one dimension.", + "attributes": [ + { + "name": "num_split", + "type": "int64", + "description": "The number of ways to split. Must evenly divide\n`value.shape[split_dim]`.", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "split_dim", + "description": "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`.", + "type": 3 + }, + { + "name": "value", + "description": "The tensor to split.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`.", + "numberAttr": "num_split", + "typeAttr": "T" + } + ] + }, + { + "name": "SplitDedupData", + "summary": "An op splits input deduplication data XLA tuple into integer and floating point\ntensors.", + "description": "Deduplication data is an XLA tuple, which consists of integer and floating point\nvalues. This op is to split these values into two groups for two types, and\nconstruct each group as one tensor to return.", + "attributes": [ + { + "name": "integer_type", + "type": "type", + "description": "integer_tensor type. Allowed types: int32, int64, uint32, uint64. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`." + }, + { + "name": "float_type", + "type": "type", + "description": "float_tensor type. Allowed types: half, bfloat16, float. Must be one of the following: `float16`, `bfloat16`, `float32`." + }, + { + "name": "tuple_mask", + "type": "string", + "description": "A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor,\nwith first column as tuple element type, and second column as span of this type.\nFor example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0,\n1]]. We expect only two types of elements: integer(0) and float(1)." + }, + { + "name": "config", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "description": "An XLA tuple including integer and float elements as deduplication data tuple.", + "type": 21 + } + ], + "outputs": [ + { + "name": "integer_tensor", + "description": "A 1-D integer tensor, includes integer elements of deduplication data tuple.", + "typeAttr": "integer_type" + }, + { + "name": "float_tensor", + "description": "A 1-D float tensor, includes float elements of deduplication data tuple.", + "typeAttr": "float_type" + } + ] + }, + { + "name": "SplitV", + "summary": "Splits a tensor into `num_split` tensors along one dimension.", + "attributes": [ + { + "name": "num_split", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "Tlen", + "type": "type", + "description": "Must be one of the following: `int8`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "value", + "description": "The tensor to split.", + "typeAttr": "T" + }, + { + "name": "size_splits", + "description": "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred.", + "typeAttr": "Tlen" + }, + { + "name": "split_dim", + "description": "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`.", + "numberAttr": "num_split", + "typeAttr": "T" + } + ] + }, + { + "name": "SqlDataset", + "summary": "Creates a dataset that executes a SQL query and emits rows of the result set.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "driver_name", + "description": "The database type. Currently, the only supported type is 'sqlite'.", + "type": 7 + }, + { + "name": "data_source_name", + "description": "A connection string to connect to the database.", + "type": 7 + }, + { + "name": "query", + "description": "A SQL query to execute.", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Sqrt", + "summary": "Computes square root of x element-wise.", + "description": "I.e., \\\\(y = \\sqrt{x} = x^{1/2}\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "SqrtGrad", + "summary": "Computes the gradient for the sqrt of `x` wrt its input.", + "description": "Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`\nis the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Square", + "summary": "Computes square of x element-wise.", + "description": "I.e., \\\\(y = x * x = x^2\\\\).", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "SquaredDifference", + "summary": "Returns conj(x - y)(x - y) element-wise.", + "description": "*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Squeeze", + "category": "Transform", + "summary": "Removes dimensions of size 1 from the shape of a tensor.", + "description": "Given a tensor `input`, this operation returns a tensor of the same type with\nall dimensions of size 1 removed. If you don't want to remove all size 1\ndimensions, you can remove specific size 1 dimensions by specifying\n`squeeze_dims`.\n\nFor example:\n\n```\n# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t)) ==> [2, 3]\n```\n\nOr, to remove specific size 1 dimensions:\n\n```\n# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "squeeze_dims", + "type": "int64[]", + "description": "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1. Must\nbe in the range `[-rank(input), rank(input))`.", + "minimum": 0, + "default": [] + } + ], + "inputs": [ + { + "name": "input", + "description": "The `input` to squeeze.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed.", + "typeAttr": "T" + } + ] + }, + { + "name": "Stack", + "summary": "Deprecated, use StackV2.", + "attributes": [ + { + "name": "elem_type", + "type": "type" + }, + { + "name": "stack_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "StackClose", + "summary": "Deprecated, use StackCloseV2.", + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "StackCloseV2", + "summary": "Delete the stack from its resource container.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a stack.", + "type": 20 + } + ] + }, + { + "name": "StackPop", + "summary": "Deprecated, use StackPopV2.", + "attributes": [ + { + "name": "elem_type", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ], + "outputs": [ + { + "name": "elem", + "typeAttr": "elem_type" + } + ] + }, + { + "name": "StackPopV2", + "summary": "Pop the element at the top of the stack.", + "attributes": [ + { + "name": "elem_type", + "type": "type", + "description": "The type of the elem that is popped." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a stack.", + "type": 20 + } + ], + "outputs": [ + { + "name": "elem", + "description": "The tensor that is popped from the top of the stack.", + "typeAttr": "elem_type" + } + ] + }, + { + "name": "StackPush", + "summary": "Deprecated, use StackPushV2.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "swap_memory", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "elem", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "StackPushV2", + "summary": "Push an element onto the stack.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "swap_memory", + "type": "boolean", + "description": "Swap `elem` to CPU. Default to false.", + "default": false + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a stack.", + "type": 20 + }, + { + "name": "elem", + "description": "The tensor to be pushed onto the stack.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The same tensor as the input 'elem'.", + "typeAttr": "T" + } + ] + }, + { + "name": "StackV2", + "summary": "A stack that produces elements in first-in last-out order.", + "attributes": [ + { + "name": "elem_type", + "type": "type", + "description": "The type of the elements on the stack." + }, + { + "name": "stack_name", + "type": "string", + "description": "Overrides the name used for the temporary stack resource. Default\nvalue is the name of the 'Stack' op (which is guaranteed unique).", + "default": "" + } + ], + "inputs": [ + { + "name": "max_size", + "description": "The maximum size of the stack if non-negative. If negative, the stack\nsize is unlimited.", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the stack.", + "type": 20 + } + ] + }, + { + "name": "Stage", + "summary": "Stage values similar to a lightweight Enqueue.", + "description": "The basic functionality of this Op is similar to a queue with many\nfewer capabilities and options. This Op is optimized for performance.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "description": "The maximum number of bytes allowed for Tensors in the Staging Area.\nIf > 0, inserts will block until sufficient space is available.", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "It is necessary to match this name to the matching Unstage Op.", + "default": "" + } + ], + "inputs": [ + { + "name": "values", + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "StageClear", + "summary": "Op removes all elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ] + }, + { + "name": "StagePeek", + "summary": "Op peeks at the values at the specified index. If the", + "description": "underlying container does not contain sufficient elements\nthis op will block until it does. This Op is optimized for\nperformance.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "index", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "StageSize", + "summary": "Op returns the number of elements in the underlying container.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "StatefulPartitionedCall", + "summary": "returns `f(inputs)`, where `f`'s body is placed and partitioned.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "f", + "type": "function", + "description": " A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op. This op is\n stateful." + }, + { + "name": "config", + "type": "string", + "default": "" + }, + { + "name": "config_proto", + "type": "string", + "default": "" + }, + { + "name": "executor_type", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "args", + "description": "A list of input tensors.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "StatefulRandomBinomial", + "attributes": [ + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 2 + } + }, + { + "name": "dtype", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "algorithm", + "type": 9 + }, + { + "name": "shape", + "typeAttr": "S" + }, + { + "name": "counts", + "typeAttr": "T" + }, + { + "name": "probs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulStandardNormal", + "summary": "Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'", + "description": "The generated values will have mean 0 and standard deviation 1.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with random normal values.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulStandardNormalV2", + "summary": "Outputs random values from a normal distribution.", + "description": "The generated values will have mean 0 and standard deviation 1.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with random normal values.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulTruncatedNormal", + "summary": "Outputs random values from a truncated normal distribution.", + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulUniform", + "summary": "Outputs random values from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulUniformFullInt", + "summary": "Outputs random integers from a uniform distribution.", + "description": "The generated values are uniform integers covering the whole range of `dtype`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 23 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatefulUniformInt", + "summary": "Outputs random integers from a uniform distribution.", + "description": "The generated values are uniform integers in the range `[minval, maxval)`.\nThe lower bound `minval` is included in the range, while the upper bound\n`maxval` is excluded.\n\nThe random integers are slightly biased unless `maxval - minval` is an exact\npower of two. The bias is small for values of `maxval - minval` significantly\nsmaller than the range of the output (either `2^32` or `2^64`).", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "shape_dtype", + "type": "type", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "resource", + "description": "The handle of the resource variable that stores the state of the RNG.", + "type": 20 + }, + { + "name": "algorithm", + "description": "The RNG algorithm.", + "type": 9 + }, + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + }, + { + "name": "minval", + "description": "Minimum value (inclusive, scalar).", + "typeAttr": "dtype" + }, + { + "name": "maxval", + "description": "Maximum value (exclusive, scalar).", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessCase", + "summary": "An n-way switch statement which calls a single branch function.", + "description": " An n-way switch statement, implementing the following:\n ```\n switch (branch_index) {\n case 0:\n output = branches[0](input);\n break;\n case 1:\n output = branches[1](input);\n break;\n ...\n case [[nbranches-1]]:\n default:\n output = branches[nbranches-1](input);\n break;\n }\n ```\n\n This should only be used when the none of branches has stateful ops.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "branches", + "type": "function[]", + "description": " A list of functions each of which takes 'inputs' and returns a list of\n tensors, whose types are the same as what every other branch returns.", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + } + ], + "inputs": [ + { + "name": "branch_index", + "description": "The branch selector, an int32 Tensor.", + "type": 3 + }, + { + "name": "input", + "description": "A list of input tensors passed to the branch function.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "StatelessIf", + "summary": "output = cond ? then_branch(input) : else_branch(input)", + "attributes": [ + { + "name": "Tcond", + "type": "type" + }, + { + "name": "Tin", + "type": "type[]", + "description": "A list of input types.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "A list of output types.", + "minimum": 0 + }, + { + "name": "then_branch", + "type": "function", + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns." + }, + { + "name": "else_branch", + "type": "function", + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns." + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + } + ], + "inputs": [ + { + "name": "cond", + "description": " A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n\n This should only be used when the if then/else body functions do not\n have stateful ops.", + "typeAttr": "Tcond" + }, + { + "name": "input", + "description": "A list of input tensors.", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of return values.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "StatelessMultinomial", + "summary": "Draws samples from a multinomial distribution.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "output_dtype", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "logits", + "description": "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes.", + "typeAttr": "T" + }, + { + "name": "num_samples", + "description": "0-D. Number of independent samples to draw for each row slice.", + "type": 3 + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "output", + "description": "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`.", + "typeAttr": "output_dtype" + } + ] + }, + { + "name": "StatelessParameterizedTruncatedNormal", + "attributes": [ + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "S" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + }, + { + "name": "means", + "description": "The mean parameter of each batch.", + "typeAttr": "dtype" + }, + { + "name": "stddevs", + "description": "The standard deviation parameter of each batch. Must be greater than 0.", + "typeAttr": "dtype" + }, + { + "name": "minvals", + "description": "The minimum cutoff. May be -infinity.", + "typeAttr": "dtype" + }, + { + "name": "maxvals", + "description": "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "The outputs are truncated normal samples and are a deterministic function of\n`shape`, `seed`, `minvals`, `maxvals`, `means` and `stddevs`.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomBinomial", + "summary": "Outputs deterministic pseudorandom random numbers from a binomial distribution.", + "description": "Outputs random values from a binomial distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.", + "attributes": [ + { + "name": "S", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 2 + } + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "S" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + }, + { + "name": "counts", + "description": "The counts of the binomial distribution. Must be broadcastable with `probs`,\nand broadcastable with the rightmost dimensions of `shape`.", + "typeAttr": "T" + }, + { + "name": "probs", + "description": "The probability of success for the binomial distribution. Must be broadcastable\nwith `counts` and broadcastable with the rightmost dimensions of `shape`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomGammaV2", + "summary": "Outputs deterministic pseudorandom random numbers from a gamma distribution.", + "description": "Outputs random values from a gamma distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, and `alpha`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + }, + { + "name": "alpha", + "description": "The concentration of the gamma distribution. Shape must match the rightmost\ndimensions of `shape`.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomGammaV3", + "summary": "Outputs deterministic pseudorandom random numbers from a gamma distribution.", + "description": "Outputs random values from a gamma distribution.\n\nThe outputs are a deterministic function of the inputs.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`." + }, + { + "name": "shape_dtype", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "shape_dtype" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + }, + { + "name": "alpha", + "description": "The concentration of the gamma distribution. Shape must match the rightmost\ndimensions of `shape`.", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomGetAlg", + "summary": "Picks the best counter-based RNG algorithm based on device.", + "description": "This op picks the best counter-based RNG algorithm based on device.", + "outputs": [ + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ] + }, + { + "name": "StatelessRandomGetKeyCounter", + "summary": "Scrambles seed into key and counter, using the best algorithm based on device.", + "description": "This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).", + "attributes": [ + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).", + "type": 23 + } + ] + }, + { + "name": "StatelessRandomGetKeyCounterAlg", + "summary": "Picks the best algorithm based on device, and scrambles seed into key and counter.", + "description": "This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).", + "attributes": [ + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ] + }, + { + "name": "StatelessRandomNormal", + "summary": "Outputs deterministic pseudorandom values from a normal distribution.", + "description": "The generated values will have mean 0 and standard deviation 1.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomNormalV2", + "summary": "Outputs deterministic pseudorandom values from a normal distribution.", + "description": "The generated values will have mean 0 and standard deviation 1.\n\nThe outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomPoisson", + "summary": "Outputs deterministic pseudorandom random numbers from a Poisson distribution.", + "description": "Outputs random values from a Poisson distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, and `lam`.", + "attributes": [ + { + "name": "Rtype", + "type": "type", + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + }, + { + "name": "lam", + "description": "The rate of the Poisson distribution. Shape must match the rightmost dimensions\nof `shape`.", + "typeAttr": "Rtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniform", + "summary": "Outputs deterministic pseudorandom random values from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniformFullInt", + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution.", + "description": "The generated values are uniform integers covering the whole range of `dtype`.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 23 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniformFullIntV2", + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution.", + "description": "The generated values are uniform integers covering the whole range of `dtype`.\n\nThe outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`.", + "default": { + "type": "type", + "value": 23 + } + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniformInt", + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[minval, maxval)`.\n\nThe outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `int32`, `int64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + }, + { + "name": "minval", + "description": "Minimum value (inclusive, scalar).", + "typeAttr": "dtype" + }, + { + "name": "maxval", + "description": "Maximum value (exclusive, scalar).", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniformIntV2", + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[minval, maxval)`.\n\nThe outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`." + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + }, + { + "name": "minval", + "description": "Minimum value (inclusive, scalar).", + "typeAttr": "dtype" + }, + { + "name": "maxval", + "description": "Maximum value (exclusive, scalar).", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessRandomUniformV2", + "summary": "Outputs deterministic pseudorandom random values from a uniform distribution.", + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.\n\nThe outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessSampleDistortedBoundingBox", + "summary": "Generate a randomly distorted bounding box for an image deterministically.", + "description": "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving its\ncontent, i.e. *data augmentation*. This Op, given the same `seed`,\ndeterministically outputs a randomly distorted localization of an object, i.e.\nbounding box, given an `image_size`, `bounding_boxes` and a series of\nconstraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nthe height of the underlying image.\n\nThe output of this Op is guaranteed to be the same given the same `seed` and is\nindependent of how many times the function is called, and independent of global\nseed settings (e.g. `tf.random.set_seed`).\n\nExample usage:\n\n>>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])\n>>> bbox = tf.constant(\n... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n>>> seed = (1, 2)\n>>> # Generate a single distorted bounding box.\n>>> bbox_begin, bbox_size, bbox_draw = (\n... tf.image.stateless_sample_distorted_bounding_box(\n... tf.shape(image), bounding_boxes=bbox, seed=seed))\n>>> # Employ the bounding box to distort the image.\n>>> tf.slice(image, bbox_begin, bbox_size)\n\n>>> # Draw the bounding box in an image summary.\n>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n>>> tf.image.draw_bounding_boxes(\n... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)\n\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`." + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "aspect_ratio_range", + "type": "float32[]", + "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.", + "default": [ + 0.75, + 1.3300000429153442 + ] + }, + { + "name": "area_range", + "type": "float32[]", + "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.", + "default": [ + 0.05000000074505806, + 1.0 + ] + }, + { + "name": "max_attempts", + "type": "int64", + "description": "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage.", + "default": 100 + }, + { + "name": "use_image_if_no_bounding_boxes", + "type": "boolean", + "description": "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error.", + "default": false + } + ], + "inputs": [ + { + "name": "image_size", + "description": "1-D, containing `[height, width, channels]`.", + "typeAttr": "T" + }, + { + "name": "bounding_boxes", + "description": "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image.", + "type": 1 + }, + { + "name": "min_object_covered", + "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.", + "type": 1 + }, + { + "name": "seed", + "description": "1-D with shape `[2]`. The seed to the random number generator. Must have dtype\n`int32` or `int64`. (When using XLA, only `int32` is allowed.)", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "begin", + "description": "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "size", + "description": "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`.", + "typeAttr": "T" + }, + { + "name": "bboxes", + "description": "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`.", + "type": 1 + } + ] + }, + { + "name": "StatelessShuffle", + "summary": "Randomly and deterministically shuffles a tensor along its first dimension.", + "description": "The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\nto one and only one `output[i]`. For example, a mapping that might occur for a\n3x2 tensor is:\n\n```\n[[1, 2], [[5, 6],\n [3, 4], ==> [1, 2],\n [5, 6]] [3, 4]]\n```\n\nThe outputs are a deterministic function of `value`, `key`, `counter` and `alg`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "value", + "description": "The tensor to be shuffled.", + "typeAttr": "T" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of same shape and type as `value`, shuffled along its first\ndimension.", + "typeAttr": "T" + } + ] + }, + { + "name": "StatelessTruncatedNormal", + "summary": "Outputs deterministic pseudorandom values from a truncated normal distribution.", + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "Tseed", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + }, + { + "name": "seed", + "description": "2 seeds (shape [2]).", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessTruncatedNormalV2", + "summary": "Outputs deterministic pseudorandom values from a truncated normal distribution.", + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.\n\nThe outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "default": { + "type": "type", + "value": 1 + } + }, + { + "name": "Tshape", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "Tshape" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "Random values with specified shape.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "StatelessWhile", + "summary": "output = input; While (Cond(output)) { output = Body(output) }", + "attributes": [ + { + "name": "T", + "type": "type[]", + "description": "dtype in use.", + "minimum": 0 + }, + { + "name": "cond", + "type": "function", + "description": " A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n\n This should only be used when the while condition and body functions\n do not have stateful ops." + }, + { + "name": "body", + "type": "function", + "description": " A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T." + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + }, + { + "name": "parallel_iterations", + "type": "int64", + "default": 10 + } + ], + "inputs": [ + { + "name": "input", + "description": "A list of input tensors whose types are T.", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of output tensors whose types are T.", + "typeListAttr": "T" + } + ] + }, + { + "name": "StaticRegexFullMatch", + "summary": "Check if the input matches the regex pattern.", + "description": "The input is a string tensor of any shape. The pattern is the\nregular expression to be matched with every element of the input tensor.\nThe boolean values (True or False) of the output tensor indicate\nif the input matches the regex pattern provided.\n\nThe pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "attributes": [ + { + "name": "pattern", + "type": "string", + "description": "The regular expression to match the input." + } + ], + "inputs": [ + { + "name": "input", + "description": "A string tensor of the text to be processed.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A bool tensor with the same shape as `input`.", + "type": 10 + } + ] + }, + { + "name": "StaticRegexReplace", + "summary": "Replaces the match of pattern in input with rewrite.", + "description": "It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "attributes": [ + { + "name": "pattern", + "type": "string", + "description": "The regular expression to match the input." + }, + { + "name": "rewrite", + "type": "string", + "description": "The rewrite to be applied to the matched expression." + }, + { + "name": "replace_global", + "type": "boolean", + "description": "If True, the replacement is global, otherwise the replacement\nis done only on the first match.", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "description": "The text to be processed.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "The text after applying pattern and rewrite.", + "type": 7 + } + ] + }, + { + "name": "StatsAggregatorHandle", + "summary": "Creates a statistics manager resource.", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "StatsAggregatorHandleV2", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + }, + { + "name": "StatsAggregatorSetSummaryWriter", + "summary": "Set a summary_writer_interface to record statistics using given stats_aggregator.", + "inputs": [ + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "summary", + "type": 20 + } + ] + }, + { + "name": "StatsAggregatorSummary", + "summary": "Produces a summary of any statistics recorded by the given statistics manager.", + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ] + }, + { + "name": "StochasticCastToInt", + "summary": "Stochastically cast a given tensor from floats to ints.", + "description": "The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN. \n\nThe outputs are a deterministic function of `input`, `key`, `counter`, `alg`.\n", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of the input. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of the output. Must be one of the following: `int8`, `int16`, `int32`." + } + ], + "inputs": [ + { + "name": "input", + "description": "The operand to stochastically cast to int.", + "typeAttr": "Tin" + }, + { + "name": "key", + "description": "Key for the counter-based RNG algorithm (shape uint64[1]).", + "type": 23 + }, + { + "name": "counter", + "description": "Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.", + "type": 23 + }, + { + "name": "alg", + "description": "The RNG algorithm (shape int32[]).", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The cast result with the same shape as the input.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "StopGradient", + "summary": "Stops gradient computation.", + "description": "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, this op prevents the contribution of\nits inputs to be taken into account. Normally, the gradient generator adds ops\nto a graph to compute the derivatives of a specified 'loss' by recursively\nfinding out inputs that contributed to its computation. If you insert this op\nin the graph it inputs are masked from the gradient generator. They are not\ntaken into account for computing gradients.\n\nThis is useful any time you want to compute a value with TensorFlow but need\nto pretend that the value was a constant. For example, the softmax function\nfor a vector x can be written as\n\n```python\n\n def softmax(x):\n numerator = tf.exp(x)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n```\n\nThis however is susceptible to overflow if the values in x are large. An\nalternative more stable way is to subtract the maximum of x from each of the\nvalues.\n\n```python\n\n def stable_softmax(x):\n z = x - tf.reduce_max(x)\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n```\n\nHowever, when we backprop through the softmax to x, we dont want to backprop\nthrough the `tf.reduce_max(x)` (if the max values are not unique then the\ngradient could flow to the wrong input) calculation and treat that as a\nconstant. Therefore, we should write this out as\n\n```python\n\n def stable_softmax(x):\n z = x - tf.stop_gradient(tf.reduce_max(x))\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n```\n\nSome other examples include:\n\n* The *EM* algorithm where the *M-step* should not involve backpropagation\n through the output of the *E-step*.\n* Contrastive divergence training of Boltzmann machines where, when\n differentiating the energy function, the training must not backpropagate\n through the graph that generated the samples from the model.\n* Adversarial training, where no backprop should happen through the adversarial\n example generation process.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "StoreMinibatchStatisticsInFdo", + "attributes": [ + { + "name": "sample_count", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_replica", + "type": "int64", + "minimum": 1 + }, + { + "name": "feature_width", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_sc_per_chip", + "type": "int64", + "minimum": 1 + }, + { + "name": "table_name", + "type": "string" + }, + { + "name": "mini_batch_splits", + "type": "string" + } + ], + "inputs": [ + { + "name": "program_key", + "type": 7 + }, + { + "name": "max_ids", + "type": 3 + }, + { + "name": "max_uniques", + "type": 3 + } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "summary": "Return a strided slice from `input`.", + "description": "Note, most python users will want to use the Python `Tensor.__getitem__`\nor `Variable.__getitem__` rather than this op directly.\n\nThe goal of this op is to produce a new tensor with a subset of\nthe elements from the `n` dimensional `input` tensor. The subset is chosen using\na sequence of `m` sparse range specifications encoded into the arguments\nof this function. Note, in some cases\n`m` could be equal to `n`, but this need not be the case. Each\nrange specification entry can be one of the following:\n\n- An ellipsis (...). Ellipses are used to imply zero or more\n dimensions of full-dimension selection and are produced using\n `ellipsis_mask`. For example, `foo[...]` is the identity slice.\n\n- A new axis. This is used to insert a new shape=1 dimension and is\n produced using `new_axis_mask`. For example, `foo[:, ...]` where\n `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.\n\n\n- A range `begin:end:stride`. This is used to specify how much to choose from\n a given dimension. `stride` can be any integer but 0. `begin` is an integer\n which represents the index of the first value to select while `end` represents\n the index of the last value to select. The number of values selected in each\n dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.\n `begin` and `end` can be negative where `-1` is the last element, `-2` is\n the second to last. `begin_mask` controls whether to replace the explicitly\n given `begin` with an implicit effective value of `0` if `stride > 0` and\n `-1` if `stride < 0`. `end_mask` is analogous but produces the number\n required to create the largest open interval. For example, given a shape\n `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do\n not assume this is equivalent to `foo[0:-1]` which has an effective `begin`\n and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the\n first dimension of a tensor while dropping the last two (in the original\n order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.\n\n- A single index. This is used to keep only elements that have a given\n index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a\n shape `(6,)` tensor. This is encoded in `begin` and `end` and\n `shrink_axis_mask`.\n\nEach conceptual range specification is encoded in the op's argument. This\nencoding is best understand by considering a non-trivial example. In\nparticular,\n`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as\n\n```\nbegin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)\nend = [2, 4, x, x, -3, x]\nstrides = [1, 1, x, x, -1, 1]\nbegin_mask = 1<<4 | 1<<5 = 48\nend_mask = 1<<5 = 32\nellipsis_mask = 1<<3 = 8\nnew_axis_mask = 1<<2 = 4\nshrink_axis_mask = 1<<0 = 1\n```\n\nIn this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of\nthe slice becomes (2, 1, 5, 5, 2, 5).\nLet us walk step by step through each argument specification.\n\n1. The first argument in the example slice is turned into `begin = 1` and\n`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we\nalso set the appropriate bit in `shrink_axis_mask`.\n\n2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have\nzero bits contributed.\n\n3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1\ndimension in the final shape. Dummy values are contributed to begin,\nend and stride, while the new_axis_mask bit is set.\n\n4. `...` grab the full ranges from as many dimensions as needed to\nfully specify a slice for every dimension of the input shape.\n\n5. `:-3:-1` shows the use of negative indices. A negative index `i` associated\nwith a dimension that has shape `s` is converted to a positive index\n`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion\nis done internally so begin, end and strides receive x, -3, and -1.\nThe appropriate begin_mask bit is set to indicate the start range is the\nfull range (ignoring the x).\n\n6. `:` indicates that the entire contents of the corresponding dimension\nis selected. This is equivalent to `::` or `0::1`. begin, end, and strides\nreceive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and\n`end_mask` are also set.\n\n*Requirements*:\n `0 != strides[i] for i in [0, m)`\n `ellipsis_mask must be a power of two (only one ellipsis)`", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`." + }, + { + "name": "begin_mask", + "type": "int64", + "description": "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`", + "default": 0 + }, + { + "name": "end_mask", + "type": "int64", + "description": "analogous to `begin_mask`", + "default": 0 + }, + { + "name": "ellipsis_mask", + "type": "int64", + "description": "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.", + "default": 0 + }, + { + "name": "new_axis_mask", + "type": "int64", + "description": "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.", + "default": 0 + }, + { + "name": "shrink_axis_mask", + "type": "int64", + "description": "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2.", + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "begin", + "description": "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.", + "typeAttr": "Index" + }, + { + "name": "end", + "description": "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges.", + "typeAttr": "Index" + }, + { + "name": "strides", + "description": "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "StridedSliceAssign", + "summary": "Assign `value` to the sliced l-value reference of `ref`.", + "description": "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s\nshape must be exactly the shape produced by the slice of `ref`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "begin_mask", + "type": "int64", + "default": 0 + }, + { + "name": "end_mask", + "type": "int64", + "default": 0 + }, + { + "name": "ellipsis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "new_axis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "shrink_axis_mask", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "ref", + "typeAttr": "T", + "isRef": true + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_ref", + "typeAttr": "T", + "isRef": true + } + ] + }, + { + "name": "StridedSliceGrad", + "summary": "Returns the gradient of `StridedSlice`.", + "description": "Since `StridedSlice` cuts out pieces of its `input` which is size\n`shape`, its gradient will have the same shape (which is passed here\nas `shape`). The gradient will be zero in any element that the slice\ndoes not select.\n\nArguments are the same as StridedSliceGrad with the exception that\n`dy` is the input gradient to be propagated and `shape` is the\nshape of `StridedSlice`'s `input`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "begin_mask", + "type": "int64", + "default": 0 + }, + { + "name": "end_mask", + "type": "int64", + "default": 0 + }, + { + "name": "ellipsis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "new_axis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "shrink_axis_mask", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "shape", + "typeAttr": "Index" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "StringFormat", + "summary": "Formats a string template using a list of tensors.", + "description": "Formats a string template using a list of tensors, pretty-printing tensor summaries.", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 0 + }, + { + "name": "template", + "type": "string", + "description": "A string, the template to format tensor summaries into.", + "default": "%s" + }, + { + "name": "placeholder", + "type": "string", + "description": "A string, at each placeholder in the template a subsequent tensor summary will be inserted.", + "default": "%s" + }, + { + "name": "summarize", + "type": "int64", + "description": "When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.", + "default": 3 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "The list of tensors to format into the placeholder string.", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "= The resulting string scalar.", + "type": 7 + } + ] + }, + { + "name": "StringJoin", + "summary": "Joins the strings in the given list of string tensors into one tensor;", + "description": "with the given separator (default is an empty separator).\n\nExamples:\n\n>>> s = [\"hello\", \"world\", \"tensorflow\"]\n>>> tf.strings.join(s, \" \")\n", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + }, + { + "name": "separator", + "type": "string", + "description": "string, an optional join separator.", + "default": "" + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of string tensors. The tensors must all have the same shape,\nor be scalars. Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs.", + "numberAttr": "N", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "StringLength", + "summary": "String lengths of `input`.", + "description": "Computes the length of each string given in the input tensor.\n\n>>> strings = tf.constant(['Hello','TensorFlow', '\\U0001F642'])\n>>> tf.strings.length(strings).numpy() # default counts bytes\narray([ 5, 10, 4], dtype=int32)\n>>> tf.strings.length(strings, unit=\"UTF8_CHAR\").numpy()\narray([ 5, 10, 1], dtype=int32)\n", + "attributes": [ + { + "name": "unit", + "type": "string", + "description": "The unit that is counted to compute string length. One of: `\"BYTE\"` (for\nthe number of bytes in each string) or `\"UTF8_CHAR\"` (for the number of UTF-8\nencoded Unicode code points in each string). Results are undefined\nif `unit=UTF8_CHAR` and the `input` strings do not contain structurally\nvalid UTF-8. Must be one of the following: `BYTE`, `UTF8_CHAR`.", + "default": "BYTE" + } + ], + "inputs": [ + { + "name": "input", + "description": "The strings for which to compute the length for each element.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "Integer tensor that has the same shape as `input`. The output contains the\nelement-wise string lengths of `input`.", + "type": 3 + } + ] + }, + { + "name": "StringLower", + "summary": "Converts all uppercase characters into their respective lowercase replacements.", + "description": "Example:\n\n>>> tf.strings.lower(\"CamelCase string and ALL CAPS\")\n\n", + "attributes": [ + { + "name": "encoding", + "type": "string", + "description": "Character encoding of `input`. Allowed values are '' and 'utf-8'.\nValue '' is interpreted as ASCII.", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "description": "The input to be lower-cased.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "StringNGrams", + "summary": "Creates ngrams from ragged string data.", + "description": "This op accepts a ragged tensor with 1 ragged dimension containing only\nstrings and outputs a ragged tensor with 1 ragged dimension containing ngrams\nof that string, joined along the innermost axis.", + "attributes": [ + { + "name": "separator", + "type": "string", + "description": "The string to append between elements of the token. Use \"\" for no separator." + }, + { + "name": "ngram_widths", + "type": "int64[]", + "description": "The sizes of the ngrams to create.", + "minimum": 0 + }, + { + "name": "left_pad", + "type": "string", + "description": "The string to use to pad the left side of the ngram sequence. Only used if\npad_width != 0." + }, + { + "name": "right_pad", + "type": "string", + "description": "The string to use to pad the right side of the ngram sequence. Only used if\npad_width != 0." + }, + { + "name": "pad_width", + "type": "int64", + "description": "The number of padding elements to add to each side of each\nsequence. Note that padding will never be greater than 'ngram_widths'-1\nregardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1`\nelements." + }, + { + "name": "preserve_short_sequences", + "type": "boolean" + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "data", + "description": "The values tensor of the ragged string tensor to make ngrams out of. Must be a\n1D string tensor.", + "type": 7 + }, + { + "name": "data_splits", + "description": "The splits tensor of the ragged string tensor to make ngrams out of.", + "typeAttr": "Tsplits" + } + ], + "outputs": [ + { + "name": "ngrams", + "description": "The values tensor of the output ngrams ragged tensor.", + "type": 7 + }, + { + "name": "ngrams_splits", + "description": "The splits tensor of the output ngrams ragged tensor.", + "typeAttr": "Tsplits" + } + ] + }, + { + "name": "StringSplit", + "summary": "Split elements of `input` based on `delimiter` into a `SparseTensor`.", + "description": "Let N be the size of source (typically N will be the batch size). Split each\nelement of `input` based on `delimiter` and return a `SparseTensor`\ncontaining the splitted tokens. Empty tokens are ignored.\n\n`delimiter` can be empty, or a string of split characters. If `delimiter` is an\n empty string, each element of `input` is split into individual single-byte\n character strings, including splitting of UTF-8 multibyte sequences. Otherwise\n every character of `delimiter` is a potential split point.\n\nFor example:\n N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output\n will be\n\n indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n shape = [2, 3]\n values = ['hello', 'world', 'a', 'b', 'c']", + "attributes": [ + { + "name": "skip_empty", + "type": "boolean", + "description": "A `bool`. If `True`, skip the empty strings from the result.", + "default": true + } + ], + "inputs": [ + { + "name": "input", + "description": "1-D. Strings to split.", + "type": 7 + }, + { + "name": "delimiter", + "description": "0-D. Delimiter characters (bytes), or empty string.", + "type": 7 + } + ], + "outputs": [ + { + "name": "indices", + "description": "A dense matrix of int64 representing the indices of the sparse tensor.", + "type": 9 + }, + { + "name": "values", + "description": "A vector of strings corresponding to the splited values.", + "type": 7 + }, + { + "name": "shape", + "description": "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry.", + "type": 9 + } + ] + }, + { + "name": "StringSplitV2", + "summary": "Split elements of `source` based on `sep` into a `SparseTensor`.", + "description": "Let N be the size of source (typically N will be the batch size). Split each\nelement of `source` based on `sep` and return a `SparseTensor`\ncontaining the split tokens. Empty tokens are ignored.\n\nFor example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',\nthen the output will be\n```\nst.indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\nst.shape = [2, 3]\nst.values = ['hello', 'world', 'a', 'b', 'c']\n```\n\nIf `sep` is given, consecutive delimiters are not grouped together and are\ndeemed to delimit empty strings. For example, source of `\"1<>2<><>3\"` and\nsep of `\"<>\"` returns `[\"1\", \"2\", \"\", \"3\"]`. If `sep` is None or an empty\nstring, consecutive whitespace are regarded as a single separator, and the\nresult will contain no empty strings at the startor end if the string has\nleading or trailing whitespace.\n\nNote that the above mentioned behavior matches python's str.split.", + "attributes": [ + { + "name": "maxsplit", + "type": "int64", + "description": "An `int`. If `maxsplit > 0`, limit of the split of the result.", + "default": -1 + } + ], + "inputs": [ + { + "name": "input", + "description": "`1-D` string `Tensor`, the strings to split.", + "type": 7 + }, + { + "name": "sep", + "description": "`0-D` string `Tensor`, the delimiter character.", + "type": 7 + } + ], + "outputs": [ + { + "name": "indices", + "type": 9 + }, + { + "name": "values", + "type": 7 + }, + { + "name": "shape", + "type": 9 + } + ] + }, + { + "name": "StringStrip", + "summary": "Strip leading and trailing whitespaces from the Tensor.", + "description": "Examples:\n\n>>> tf.strings.strip([\"\\nTensorFlow\", \" The python library \"]).numpy()\narray([b'TensorFlow', b'The python library'], dtype=object)", + "inputs": [ + { + "name": "input", + "description": "A string `Tensor` of any shape.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A string `Tensor` of the same shape as the input.", + "type": 7 + } + ] + }, + { + "name": "StringToHashBucket", + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets.", + "description": "The hash function is deterministic on the content of the string within the\nprocess.\n\nNote that the hash function may change from time to time.\nThis functionality will be deprecated and it's recommended to use\n`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.", + "attributes": [ + { + "name": "num_buckets", + "type": "int64", + "description": "The number of buckets.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "string_tensor", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of the same shape as the input `string_tensor`.", + "type": 9 + } + ] + }, + { + "name": "StringToHashBucketFast", + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets.", + "description": "The hash function is deterministic on the content of the string within the\nprocess and will never change. However, it is not suitable for cryptography.\nThis function may be used when CPU time is scarce and inputs are trusted or\nunimportant. There is a risk of adversaries constructing inputs that all hash\nto the same bucket. To prevent this problem, use a strong hash function with\n`tf.string_to_hash_bucket_strong`.\n\nExamples:\n\n>>> tf.strings.to_hash_bucket_fast([\"Hello\", \"TensorFlow\", \"2.x\"], 3).numpy()\narray([0, 2, 2])", + "attributes": [ + { + "name": "num_buckets", + "type": "int64", + "description": "The number of buckets.", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input", + "description": "The strings to assign a hash bucket.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of the same shape as the input `string_tensor`.", + "type": 9 + } + ] + }, + { + "name": "StringToHashBucketStrong", + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets.", + "description": "The hash function is deterministic on the content of the string within the\nprocess. The hash function is a keyed hash function, where attribute `key`\ndefines the key of the hash function. `key` is an array of 2 elements.\n\nA strong hash is important when inputs may be malicious, e.g. URLs with\nadditional components. Adversaries could try to make their inputs hash to the\nsame bucket for a denial-of-service attack or to skew the results. A strong\nhash can be used to make it difficult to find inputs with a skewed hash value\ndistribution over buckets. This requires that the hash function is\nseeded by a high-entropy (random) \"key\" unknown to the adversary.\n\nThe additional robustness comes at a cost of roughly 4x higher compute\ntime than `tf.string_to_hash_bucket_fast`.\n\nExamples:\n\n>>> tf.strings.to_hash_bucket_strong([\"Hello\", \"TF\"], 3, [1, 2]).numpy()\narray([2, 0])", + "attributes": [ + { + "name": "num_buckets", + "type": "int64", + "description": "The number of buckets.", + "minimum": 1 + }, + { + "name": "key", + "type": "int64[]", + "description": "The key used to seed the hash function, passed as a list of two uint64\nelements." + } + ], + "inputs": [ + { + "name": "input", + "description": "The strings to assign a hash bucket.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of the same shape as the input `string_tensor`.", + "type": 9 + } + ] + }, + { + "name": "StringToNumber", + "summary": "Converts each string in the input Tensor to the specified numeric type.", + "description": "(Note that int32 overflow results in an error while float overflow\nresults in a rounded value.)\n\nExample:\n\n>>> strings = [\"5.0\", \"3.0\", \"7.0\"]\n>>> tf.strings.to_number(strings)\n\n", + "attributes": [ + { + "name": "out_type", + "type": "type", + "description": "The numeric type to interpret each string in `string_tensor` as. Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "string_tensor", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of the same shape as the input `string_tensor`.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "StringUpper", + "summary": "Converts all lowercase characters into their respective uppercase replacements.", + "description": "Example:\n\n>>> tf.strings.upper(\"CamelCase string and ALL CAPS\")\n\n", + "attributes": [ + { + "name": "encoding", + "type": "string", + "description": "Character encoding of `input`. Allowed values are '' and 'utf-8'.\nValue '' is interpreted as ASCII.", + "default": "" + } + ], + "inputs": [ + { + "name": "input", + "description": "The input to be upper-cased.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "Sub", + "summary": "Returns x - y element-wise.", + "description": "*NOTE*: `Sub` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "Substr", + "summary": "Return substrings from `Tensor` of strings.", + "description": "For each string in the input `Tensor`, creates a substring starting at index\n`pos` with a total length of `len`.\n\nIf `len` defines a substring that would extend beyond the length of the input\nstring, or if `len` is negative, then as many characters as possible are used.\n\nA negative `pos` indicates distance within the string backwards from the end.\n\nIf `pos` specifies an index which is out of range for any of the input strings,\nthen an `InvalidArgumentError` is thrown.\n\n`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on\nOp creation.\n\n*NOTE*: `Substr` supports broadcasting up to two dimensions. More about\nbroadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n---\n\nExamples\n\nUsing scalar `pos` and `len`:\n\n```python\ninput = [b'Hello', b'World']\nposition = 1\nlength = 3\n\noutput = [b'ell', b'orl']\n```\n\nUsing `pos` and `len` with same shape as `input`:\n\n```python\ninput = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen']]\nposition = [[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]\nlength = [[2, 3, 4],\n [4, 3, 2],\n [5, 5, 5]]\n\noutput = [[b'en', b'eve', b'lve'],\n [b'hirt', b'urt', b'te'],\n [b'ixtee', b'vente', b'hteen']]\n```\n\nBroadcasting `pos` and `len` onto `input`:\n\n```\ninput = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen'],\n [b'nineteen', b'twenty', b'twentyone']]\nposition = [1, 2, 3]\nlength = [1, 2, 3]\n\noutput = [[b'e', b'ev', b'lve'],\n [b'h', b'ur', b'tee'],\n [b'i', b've', b'hte'],\n [b'i', b'en', b'nty']]\n```\n\nBroadcasting `input` onto `pos` and `len`:\n\n```\ninput = b'thirteen'\nposition = [1, 5, 7]\nlength = [3, 2, 1]\n\noutput = [b'hir', b'ee', b'n']\n```\n\nRaises:\n\n * `ValueError`: If the first argument cannot be converted to a\n Tensor of `dtype string`.\n * `InvalidArgumentError`: If indices are out of range.\n * `ValueError`: If `pos` and `len` are not the same shape.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "unit", + "type": "string", + "description": "The unit that is used to create the substring. One of: `\"BYTE\"` (for\ndefining position and length by bytes) or `\"UTF8_CHAR\"` (for the UTF-8\nencoded Unicode code points). The default is `\"BYTE\"`. Results are undefined if\n`unit=UTF8_CHAR` and the `input` strings do not contain structurally valid\nUTF-8. Must be one of the following: `BYTE`, `UTF8_CHAR`.", + "default": "BYTE" + } + ], + "inputs": [ + { + "name": "input", + "description": "Tensor of strings", + "type": 7 + }, + { + "name": "pos", + "description": "Scalar defining the position of first character in each substring", + "typeAttr": "T" + }, + { + "name": "len", + "description": "Scalar defining the number of characters to include in each substring", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Tensor of substrings", + "type": 7 + } + ] + }, + { + "name": "Sum", + "summary": "Computes the sum of elements across dimensions of a tensor.", + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "attributes": [ + { + "name": "keep_dims", + "type": "boolean", + "description": "If true, retain reduced dimensions with length 1.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The tensor to reduce.", + "typeAttr": "T" + }, + { + "name": "reduction_indices", + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "The reduced tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "SummaryWriter", + "attributes": [ + { + "name": "shared_name", + "type": "string", + "default": "" + }, + { + "name": "container", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "writer", + "type": 20 + } + ] + }, + { + "name": "Svd", + "summary": "Computes the singular value decompositions of one or more matrices.", + "description": "Computes the SVD of each inner matrix in `input` such that\n`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`\n\n```python\n# a is a tensor containing a batch of matrices.\n# s is a tensor of singular values for each matrix.\n# u is the tensor containing the left singular vectors for each matrix.\n# v is the tensor containing the right singular vectors for each matrix.\ns, u, v = svd(a)\ns, _, _ = svd(a, compute_uv=False)\n```", + "attributes": [ + { + "name": "compute_uv", + "type": "boolean", + "description": "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced.", + "default": true + }, + { + "name": "full_matrices", + "type": "boolean", + "description": "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`.", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "input", + "description": "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "s", + "description": "Singular values. Shape is `[..., P]`.", + "typeAttr": "T" + }, + { + "name": "u", + "description": "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`. Undefined if `compute_uv` is `False`.", + "typeAttr": "T" + }, + { + "name": "v", + "description": "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.\nUndefined if `compute_uv` is false.", + "typeAttr": "T" + } + ] + }, + { + "name": "Switch", + "summary": "Forwards `data` to the output port determined by `pred`.", + "description": "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `RefSwitch` and `Merge`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "data", + "description": "The tensor to be forwarded to the appropriate output.", + "typeAttr": "T" + }, + { + "name": "pred", + "description": "A scalar that specifies which output port will receive data.", + "type": 10 + } + ], + "outputs": [ + { + "name": "output_false", + "description": "If `pred` is false, data will be forwarded to this output.", + "typeAttr": "T" + }, + { + "name": "output_true", + "description": "If `pred` is true, data will be forwarded to this output.", + "typeAttr": "T" + } + ] + }, + { + "name": "SymbolicGradient", + "summary": "Computes the gradient function for function f via backpropagation.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "the type list for the input list.", + "minimum": 1 + }, + { + "name": "Tout", + "type": "type[]", + "description": "the type list for the input list.", + "minimum": 1 + }, + { + "name": "f", + "type": "function", + "description": "The function we want to compute the gradient for.\n\nThe function 'f' must be a numerical function which takes N inputs and\nproduces M outputs. Its gradient function 'g', which is computed by\nthis SymbolicGradient op is a function taking N + M inputs and\nproduces N outputs.\n\nI.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\nthen, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\nwhere L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\nloss function). dL/dx_i is the partial derivative of L with respect\nto x_i.\n\n(Needs some math expert to say the comment above better.)" + } + ], + "inputs": [ + { + "name": "input", + "description": "a list of input tensors of size N + M;", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "description": "a list of output tensors of size N;", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "SyncDevice", + "summary": "Synchronizes the device this op is run on.", + "description": "Only GPU ops are asynchrous in TensorFlow, and so this only has an effect when\nrun on GPUs. On GPUs, this op synchronizes the GPU's compute stream." + }, + { + "name": "TFRecordDataset", + "summary": "Creates a dataset that emits the records from one or more TFRecord files.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "filenames", + "description": "A scalar or vector containing the name(s) of the file(s) to be\nread.", + "type": 7 + }, + { + "name": "compression_type", + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "type": 7 + }, + { + "name": "buffer_size", + "description": "A scalar representing the number of bytes to buffer. A value of\n0 means no buffering will be performed.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TFRecordDatasetV2", + "summary": "Creates a dataset that emits the records from one or more TFRecord files.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "filenames", + "description": "A scalar or vector containing the name(s) of the file(s) to be\nread.", + "type": 7 + }, + { + "name": "compression_type", + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "type": 7 + }, + { + "name": "buffer_size", + "description": "A scalar representing the number of bytes to buffer. A value of\n0 means no buffering will be performed.", + "type": 9 + }, + { + "name": "byte_offsets", + "description": "A scalar or vector containing the number of bytes for each file\nthat will be skipped prior to reading.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TFRecordReader", + "summary": "A Reader that outputs the records from a TensorFlow Records file.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + }, + { + "name": "compression_type", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "TFRecordReaderV2", + "summary": "A Reader that outputs the records from a TensorFlow Records file.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + }, + { + "name": "compression_type", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 20 + } + ] + }, + { + "name": "TPUAnnotateTensorsWithDynamicShape", + "attributes": [ + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "tensors", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "tpu_tensors", + "typeListAttr": "T" + } + ] + }, + { + "name": "TPUCompilationResult", + "summary": "Returns the result of a TPU compilation.", + "description": "This operation returns the result of a TPU compilation as a serialized\nCompilationResultProto, which holds a status and an error message if an error\noccurred during compilation.", + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "TPUCompile", + "summary": "Compiles a computations for execution on one or more TPU devices.", + "description": "For the internal use of the distributed TPU compiler.\n\n'num_computations' is the number of computations to be compiled.\n'function' is a function containing the computation to compile.\n'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not\nknown statically at TPUReplication rewrite time.\n'guaranteed_constants' is a list of tensors which have been guaranteed to not\nchange their values during the session lifetime. These contain tensors marked as\nconstant using the GuaranteeConstOp.\n'metadata' is a serialized TPUCompileMetadataProto describing\nthe shapes and types of the inputs to the computation, as well as a mapping onto\nthe TPU pod topology.\nEach 'program' output is a string key that is passed to the _TPUExecute op and\nused to look up the program in the compilation cache.\n'may_modify_variables' indicates whether variables may be modified.", + "attributes": [ + { + "name": "num_computations", + "type": "int64", + "minimum": 0 + }, + { + "name": "function", + "type": "function" + }, + { + "name": "metadata", + "type": "string" + }, + { + "name": "NumDynamicShapes", + "type": "int64", + "minimum": 0 + }, + { + "name": "Tguaranteed_constants", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "dynamic_shapes", + "numberAttr": "NumDynamicShapes", + "type": 9 + }, + { + "name": "guaranteed_constants", + "typeListAttr": "Tguaranteed_constants" + } + ], + "outputs": [ + { + "name": "compilation_status", + "type": 7 + }, + { + "name": "program", + "numberAttr": "num_computations", + "type": 7 + }, + { + "name": "may_modify_variables", + "numberAttr": "num_computations", + "type": 10 + } + ] + }, + { + "name": "TPUCompileSucceededAssert", + "summary": "Asserts that compilation succeeded.", + "description": "This op produces no output and closes the device during failure to ensure all\npending device interactions fail.\n\n'compilation_status' is a serialized CompilationResultProto.", + "inputs": [ + { + "name": "compilation_status", + "type": 7 + } + ] + }, + { + "name": "TPUCopyWithDynamicShape", + "summary": "Op that copies host tensor to device with dynamic shape support.\nFor internal use only.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + }, + { + "name": "T", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "tensors", + "typeListAttr": "T" + }, + { + "name": "unpadded_sizes", + "numberAttr": "N", + "type": 3 + } + ], + "outputs": [ + { + "name": "tpu_tensors", + "typeListAttr": "T" + } + ] + }, + { + "name": "TPUEmbeddingActivations", + "summary": "An op enabling differentiation of TPU Embeddings.", + "description": "This op simply returns its first input, which is assumed to have been sliced\nfrom the Tensors returned by TPUEmbeddingDequeueActivations. The presence of\nthis op, and its first argument being a trainable Variable, enables automatic\ndifferentiation of graphs containing embeddings via the TPU Embedding Python\nlibraries.", + "attributes": [ + { + "name": "table_id", + "type": "int64", + "description": "The id of the table in the embedding layer configuration from which\nthese activations were computed.", + "minimum": 0 + }, + { + "name": "lookup_id", + "type": "int64", + "description": "Identifier of the set of embedding indices which produced these\nactivations.", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "embedding_variable", + "description": "A trainable variable, enabling optimizers to find this op.", + "type": 1 + }, + { + "name": "sliced_activations", + "description": "The embedding activations Tensor to return.", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ] + }, + { + "name": "TPUExecute", + "summary": "Op that loads and executes a TPU program on a TPU device.", + "description": "For the internal use of the distributed TPU compiler.", + "attributes": [ + { + "name": "Targs", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tresults", + "type": "type[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "args", + "typeListAttr": "Targs" + }, + { + "name": "key", + "type": 7 + } + ], + "outputs": [ + { + "name": "results", + "typeListAttr": "Tresults" + } + ] + }, + { + "name": "TPUExecuteAndUpdateVariables", + "summary": "Op that executes a program with optional in-place variable updates.", + "description": "It (optionally) reads device variables, loads and executes a TPU program on a\nTPU device, and then (optionally) in-place updates variables using the program\noutputs, as specified in attributes device_var_reads_indices (program input\nindices from directly reading variables) and device_var_updates_indices (program\noutput indices used to update variables, -1 means no-update/read-only). Such\nprogram outputs are consumed by these variables will not appear in the op\noutput. For the internal use of the distributed TPU compiler.", + "attributes": [ + { + "name": "Targs", + "type": "type[]", + "minimum": 0 + }, + { + "name": "Tresults", + "type": "type[]", + "minimum": 0 + }, + { + "name": "device_var_reads_indices", + "type": "int64[]", + "minimum": 0 + }, + { + "name": "device_var_updates_indices", + "type": "int64[]", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "args", + "typeListAttr": "Targs" + }, + { + "name": "key", + "type": 7 + } + ], + "outputs": [ + { + "name": "results", + "typeListAttr": "Tresults" + } + ] + }, + { + "name": "TPUOrdinalSelector", + "summary": "A TPU core selector Op.", + "description": "This Op produces a set of TPU cores (for warm-up) or a single TPU core\n(for regular inference) to execute the TPU program on. The output is\nconsumed by TPUPartitionedCall.", + "outputs": [ + { + "name": "device_ordinals", + "description": "A vector 1 or more TPU cores.", + "type": 3 + } + ] + }, + { + "name": "TPUPartitionedCall", + "summary": "Calls a function placed on a specified TPU device.", + "attributes": [ + { + "name": "Tin", + "type": "type[]", + "description": "The types of the arguments to the function.", + "minimum": 0 + }, + { + "name": "Tout", + "type": "type[]", + "description": "The types of the outputs of the function.", + "minimum": 0 + }, + { + "name": "f", + "type": "function", + "description": "The function to call." + }, + { + "name": "autotuner_thresh", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "args", + "description": "The arguments to the function.", + "typeListAttr": "Tin" + }, + { + "name": "device_ordinal", + "description": "The TPU device ordinal to run the function on.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output of the function call.", + "typeListAttr": "Tout" + } + ] + }, + { + "name": "TPUPartitionedInput", + "summary": "An op that groups a list of partitioned inputs together. This op", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "partition_dim", + "type": "int64", + "description": "An integer describles which dimension is partitioned. -1 means\nthose inputs are replicated.", + "default": 0 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of partitioned inputs which must have the same shape.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A handle which represents the full shape of partitioned tensors.", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUPartitionedInputV2", + "summary": "An op that groups a list of partitioned inputs together. Supports ND sharding.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "partition_dims", + "type": "int64[]", + "description": "A list of integers describing how each dimension is partitioned. Emptiness\nindicates the inputs are replicated." + }, + { + "name": "is_packed", + "type": "boolean", + "description": "Indicates whether the input is a packed resource.", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A list of partitioned inputs which must have the same shape.", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A handle which represents the full shape of partitioned tensors.", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUPartitionedOutput", + "summary": "An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned", + "description": "outputs outside the XLA computation.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "num_splits", + "type": "int64", + "minimum": 1 + }, + { + "name": "partition_dim", + "type": "int64", + "description": "An integer describles which dimension is partitioned.", + "default": 0 + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A tensor which represents the full shape of partitioned tensors.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of partitioned inputs which must have the same shape.", + "numberAttr": "num_splits", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUPartitionedOutputV2", + "summary": "An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned", + "description": "outputs outside the XLA computation. Supports ND sharding.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "num_splits", + "type": "int64", + "minimum": 1 + }, + { + "name": "partition_dims", + "type": "int64[]", + "description": "A list of integers describing how each dimension is partitioned. Emptiness\nindicates the inputs are replicated." + } + ], + "inputs": [ + { + "name": "inputs", + "description": "A tensor which represents the full shape of partitioned tensors.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of partitioned outputs which have the same shape.", + "numberAttr": "num_splits", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUReplicateMetadata", + "summary": "Metadata indicating how the TPU computation should be replicated.", + "description": "This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.", + "attributes": [ + { + "name": "num_replicas", + "type": "int64", + "description": "Number of replicas of the computation", + "minimum": 0 + }, + { + "name": "num_cores_per_replica", + "type": "int64", + "description": "Number of cores per replica. Used for model parallelism.", + "default": 1 + }, + { + "name": "topology", + "type": "string", + "description": "TopologyProto indicating the topology of the TPU pod slice.", + "default": "" + }, + { + "name": "use_tpu", + "type": "boolean", + "description": "Whether to place the computation on the TPU.", + "default": true + }, + { + "name": "device_assignment", + "type": "int64[]", + "description": "The assignment of devices for the computation.", + "default": [] + }, + { + "name": "computation_shape", + "type": "int64[]", + "description": "DEPRECATED. Use num_cores_per_replica instead.", + "default": [] + }, + { + "name": "host_compute_core", + "type": "string[]", + "default": [] + }, + { + "name": "padding_map", + "type": "string[]", + "default": [] + }, + { + "name": "step_marker_location", + "type": "string", + "default": "STEP_MARK_AT_ENTRY" + }, + { + "name": "allow_soft_placement", + "type": "boolean", + "default": false + }, + { + "name": "use_spmd_for_xla_partitioning", + "type": "boolean", + "default": false + }, + { + "name": "tpu_compile_options_proto", + "type": "string", + "default": "" + } + ] + }, + { + "name": "TPUReplicatedInput", + "summary": "Connects N inputs to an N-way replicated TPU computation.", + "description": "This operation holds a replicated input to a `tpu.replicate()` computation subgraph.\nEach replicated input has the same shape and type alongside the output.\n\nFor example:\n```\n%a = \"tf.opA\"()\n%b = \"tf.opB\"()\n%replicated_input = \"tf.TPUReplicatedInput\"(%a, %b)\n%computation = \"tf.Computation\"(%replicated_input)\n```\nThe above computation has a replicated input of two replicas.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "is_mirrored_variable", + "type": "boolean", + "default": false + }, + { + "name": "index", + "type": "int64", + "default": -1 + }, + { + "name": "is_packed", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUReplicatedOutput", + "summary": "Connects N outputs from an N-way replicated TPU computation.", + "description": "This operation holds a replicated output from a `tpu.replicate()` computation subgraph.\nEach replicated output has the same shape and type alongside the input.\n\nFor example:\n```\n%computation = \"tf.Computation\"()\n%replicated_output:2 = \"tf.TPUReplicatedOutput\"(%computation)\n```\nThe above computation has a replicated output of two replicas.", + "attributes": [ + { + "name": "num_replicas", + "type": "int64", + "minimum": 1 + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "outputs", + "numberAttr": "num_replicas", + "typeAttr": "T" + } + ] + }, + { + "name": "TPUReshardVariables", + "summary": "Op that reshards on-device TPU variables to specified state.", + "description": "Op that reshards on-device TPU variables to specified state. Internal use only.\n\nThe sharding state is represented as the key of the compilation that generated\nthe sharding/unsharding programs along with the main program. new_format_key\nspecifies the desired state, and format_state_var is the current state of the\nvariables.", + "attributes": [ + { + "name": "N", + "type": "int64", + "minimum": 0 + } + ], + "inputs": [ + { + "name": "vars", + "numberAttr": "N", + "type": 20 + }, + { + "name": "new_format_key", + "type": 7 + }, + { + "name": "format_state_var", + "type": 20 + } + ] + }, + { + "name": "TPURoundRobin", + "summary": "Round-robin load balancing on TPU cores.", + "description": "A load balancing op that round-robins among TPU cores.\n\nThis op round-robins between the integers in [0, NumTPUCoresVisiblePerHost]. It\nis useful for interfacing with TensorFlow ops that take as input a TPU core on\nwhich to execute computations, such as `TPUPartitionedCall`.\n\ndevice_ordinal: An integer in [0, NumTPUCoresVisiblePerHost].", + "outputs": [ + { + "name": "device_ordinal", + "type": 3 + } + ] + }, + { + "name": "TakeDataset", + "summary": "Creates a dataset that contains `count` elements from the `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "count", + "description": "A scalar representing the number of elements from the `input_dataset`\nthat should be taken. A value of `-1` indicates that all of `input_dataset`\nis taken.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TakeManySparseFromTensorsMap", + "summary": "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.", + "description": "The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where\n`N` is the minibatch size and the rows correspond to the output handles of\n`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the\noriginal `SparseTensor` objects that went into the given input ops must all\nmatch. When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension on the left).\n\nThe output `SparseTensor` object's shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects' shape values\nfor the corresponding dimensions. Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the handles represent an input, which is a `[2, 3]` matrix\nrepresenting two original `SparseTensor` objects:\n\n```\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n```\n\nand\n\n```\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n```\n\nthen the final `SparseTensor` will be:\n\n```\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]\n```", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`." + }, + { + "name": "container", + "type": "string", + "description": "The container name for the `SparseTensorsMap` read by this op.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used.", + "default": "" + } + ], + "inputs": [ + { + "name": "sparse_handles", + "description": "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sparse_indices", + "description": "2-D. The `indices` of the minibatch `SparseTensor`.", + "type": 9 + }, + { + "name": "sparse_values", + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "typeAttr": "dtype" + }, + { + "name": "sparse_shape", + "description": "1-D. The `shape` of the minibatch `SparseTensor`.", + "type": 9 + } + ] + }, + { + "name": "TakeWhileDataset", + "summary": "Creates a dataset that stops iteration when predicate` is false.", + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "attributes": [ + { + "name": "predicate", + "type": "function", + "description": "A function returning a scalar boolean." + }, + { + "name": "Targuments", + "type": "type[]", + "minimum": 0 + }, + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "Tan", + "summary": "Computes tan of x element-wise.", + "description": " Given an input tensor, this function computes tangent of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `(-inf, inf)`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]\n ```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "Tanh", + "summary": "Computes hyperbolic tangent of `x` element-wise.", + "description": " Given an input tensor, this function computes hyperbolic tangent of every\n element in the tensor. Input range is `[-inf, inf]` and\n output range is `[-1,1]`.\n\n >>> x = tf.constant([-float(\"inf\"), -5, -0.5, 1, 1.2, 2, 3, float(\"inf\")])\n >>> tf.math.tanh(x)\n \n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "TanhGrad", + "summary": "Computes the gradient for the tanh of `x` wrt its input.", + "description": "Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`\nis the corresponding input gradient.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "TemporaryVariable", + "summary": "Returns a tensor that may be mutated, but only persists within a single step.", + "description": "This is an experimental op for internal use only and it is possible to use this\nop in unsafe ways. DO NOT USE unless you fully understand the risks.\n\nIt is the caller's responsibility to ensure that 'ref' is eventually passed to a\nmatching 'DestroyTemporaryVariable' op after all other uses have completed.\n\nOutputs a ref to the tensor state so it may be read or modified.\n\n E.g.\n var = state_ops._temporary_variable([1, 2], types.float_)\n var_name = var.op.name\n var = state_ops.assign(var, [[4.0, 5.0]])\n var = state_ops.assign_add(var, [[6.0, 7.0]])\n final = state_ops._destroy_temporary_variable(var, var_name=var_name)", + "attributes": [ + { + "name": "shape", + "type": "shape", + "description": "The shape of the variable tensor." + }, + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the variable tensor." + }, + { + "name": "var_name", + "type": "string", + "description": "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the 'TemporaryVariable' op (which is guaranteed unique).", + "default": "" + } + ], + "outputs": [ + { + "name": "ref", + "description": "A reference to the variable tensor.", + "typeAttr": "dtype", + "isRef": true + } + ] + }, + { + "name": "TensorArray", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "dynamic_size", + "type": "boolean", + "default": false + }, + { + "name": "clear_after_read", + "type": "boolean", + "default": true + }, + { + "name": "tensor_array_name", + "type": "string", + "default": "" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "TensorArrayClose", + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "TensorArrayCloseV2", + "summary": "Deprecated. Use TensorArrayCloseV3", + "inputs": [ + { + "name": "handle", + "type": 7 + } + ] + }, + { + "name": "TensorArrayCloseV3", + "summary": "Delete the TensorArray from its resource container.", + "description": "This enables the user to close and release the resource in the middle\nof a step/run.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray (output of TensorArray or TensorArrayGrad).", + "type": 20 + } + ] + }, + { + "name": "TensorArrayConcat", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape_except0", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + }, + { + "name": "lengths", + "type": 9 + } + ] + }, + { + "name": "TensorArrayConcatV2", + "summary": "Deprecated. Use TensorArrayConcatV3", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape_except0", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + }, + { + "name": "lengths", + "type": 9 + } + ] + }, + { + "name": "TensorArrayConcatV3", + "summary": "Concat the elements from the TensorArray into value `value`.", + "description": "Takes `T` elements of shapes\n\n ```\n (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)\n ```\n\nand concatenates them into a Tensor of shape:\n\n ```\n (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)\n ```\n\nAll elements must have the same shape (excepting the first dimension).", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the elem that is returned." + }, + { + "name": "element_shape_except0", + "type": "shape", + "description": "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error.", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "description": "All of the elements in the TensorArray, concatenated along the first\naxis.", + "typeAttr": "dtype" + }, + { + "name": "lengths", + "description": "A vector of the row sizes of the original T elements in the\nvalue output. In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`.", + "type": 9 + } + ] + }, + { + "name": "TensorArrayGather", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayGatherV2", + "summary": "Deprecated. Use TensorArrayGatherV3", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayGatherV3", + "summary": "Gather specific elements from the TensorArray into output `value`.", + "description": "All elements selected by `indices` must have the same shape.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the elem that is returned." + }, + { + "name": "element_shape", + "type": "shape", + "description": "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error.", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "indices", + "description": "The locations in the TensorArray from which to read tensor elements.", + "type": 3 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "description": "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0).", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayGrad", + "attributes": [ + { + "name": "source", + "type": "string" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "TensorArrayGradV2", + "summary": "Deprecated. Use TensorArrayGradV3", + "attributes": [ + { + "name": "source", + "type": "string" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 7 + } + ] + }, + { + "name": "TensorArrayGradV3", + "summary": "Creates a TensorArray for storing the gradients of values in the given handle.", + "description": "If the given TensorArray gradient already exists, returns a reference to it.\n\nLocks the size of the original TensorArray by disabling its dynamic size flag.\n\n**A note about the input flow_in:**\n\nThe handle flow_in forces the execution of the gradient lookup to occur\nonly after certain other operations have occurred. For example, when\nthe forward TensorArray is dynamically sized, writes to this TensorArray\nmay resize the object. The gradient TensorArray is statically sized based\non the size of the forward TensorArray when this operation executes.\nFurthermore, the size of the forward TensorArray is frozen by this call.\nAs a result, the flow is used to ensure that the call to generate the gradient\nTensorArray only happens after all writes are executed.\n\nIn the case of dynamically sized TensorArrays, gradient computation should\nonly be performed on read operations that have themselves been chained via\nflow to occur only after all writes have executed. That way the final size\nof the forward TensorArray is known when this operation is called.\n\n**A note about the source attribute:**\n\nTensorArray gradient calls use an accumulator TensorArray object. If\nmultiple gradients are calculated and run in the same session, the multiple\ngradient nodes may accidentally flow through the same accumulator TensorArray.\nThis double counts and generally breaks the TensorArray gradient flow.\n\nThe solution is to identify which gradient call this particular\nTensorArray gradient is being called in. This is performed by identifying\na unique string (e.g. \"gradients\", \"gradients_1\", ...) from the input\ngradient Tensor's name. This string is used as a suffix when creating\nthe TensorArray gradient object here (the attribute `source`).\n\nThe attribute `source` is added as a suffix to the forward TensorArray's\nname when performing the creation / lookup, so that each separate gradient\ncalculation gets its own TensorArray accumulator.", + "attributes": [ + { + "name": "source", + "type": "string", + "description": "The gradient source string, used to decide which gradient TensorArray\nto return." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to the forward TensorArray.", + "type": 20 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 20 + }, + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayGradWithShape", + "summary": "Creates a TensorArray for storing multiple gradients of values in the given handle.", + "description": "Similar to TensorArrayGradV3. However it creates an accumulator with an\nexpanded shape compared to the input TensorArray whose gradient is being\ncomputed. This enables multiple gradients for the same TensorArray to be\ncalculated using the same accumulator.", + "attributes": [ + { + "name": "source", + "type": "string", + "description": "The gradient source string, used to decide which gradient TensorArray\nto return." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to the forward TensorArray.", + "type": 20 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + }, + { + "name": "shape_to_prepend", + "description": "An int32 vector representing a shape. Elements in the gradient accumulator will\nhave shape which is this shape_to_prepend value concatenated with shape of the\nelements in the TensorArray corresponding to the input handle.", + "type": 3 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 20 + }, + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayPack", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayRead", + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "index", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayReadV2", + "summary": "Deprecated. Use TensorArrayReadV3", + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayReadV3", + "summary": "Read an element from the TensorArray into output `value`.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the elem that is returned." + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "description": "The tensor that is read from the TensorArray.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "TensorArrayScatter", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayScatterV2", + "summary": "Deprecated. Use TensorArrayScatterV3", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayScatterV3", + "summary": "Scatter the data from the input value into specific TensorArray elements.", + "description": "`indices` must be a vector, its length must match the first dim of `value`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "indices", + "description": "The locations at which to write the tensor elements.", + "type": 3 + }, + { + "name": "value", + "description": "The concatenated tensor to write to the TensorArray.", + "typeAttr": "T" + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ] + }, + { + "name": "TensorArraySize", + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "TensorArraySizeV2", + "summary": "Deprecated. Use TensorArraySizeV3", + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "TensorArraySizeV3", + "summary": "Get the current size of the TensorArray.", + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray (output of TensorArray or TensorArrayGrad).", + "type": 20 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "size", + "description": "The current size of the TensorArray.", + "type": 3 + } + ] + }, + { + "name": "TensorArraySplit", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "lengths", + "type": 9 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArraySplitV2", + "summary": "Deprecated. Use TensorArraySplitV3", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "lengths", + "type": 9 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArraySplitV3", + "summary": "Split the data from the input value into TensorArray elements.", + "description": "Assuming that `lengths` takes on values\n\n ```\n (n0, n1, ..., n(T-1))\n ```\n\nand that `value` has shape\n\n ```\n (n0 + n1 + ... + n(T-1) x d0 x d1 x ...),\n ```\n\nthis splits values into a TensorArray with T tensors.\n\nTensorArray index t will be the subtensor of values with starting position\n\n ```\n (n0 + n1 + ... + n(t-1), 0, 0, ...)\n ```\n\nand having size\n\n ```\n nt x d0 x d1 x ...\n ```", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "value", + "description": "The concatenated tensor to write to the TensorArray.", + "typeAttr": "T" + }, + { + "name": "lengths", + "description": "The vector of lengths, how to split the rows of value into the\nTensorArray.", + "type": 9 + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ] + }, + { + "name": "TensorArrayUnpack", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayV2", + "summary": "Deprecated. Use TensorArrayV3", + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "dynamic_size", + "type": "boolean", + "default": false + }, + { + "name": "clear_after_read", + "type": "boolean", + "default": true + }, + { + "name": "tensor_array_name", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 7 + } + ] + }, + { + "name": "TensorArrayV3", + "summary": "An array of Tensors of given size.", + "description": "Write data via Write and read via Read or Pack.", + "attributes": [ + { + "name": "dtype", + "type": "type", + "description": "The type of the elements on the tensor_array." + }, + { + "name": "element_shape", + "type": "shape", + "description": "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error.", + "default": { + "type": "shape", + "value": "?" + } + }, + { + "name": "dynamic_size", + "type": "boolean", + "description": "A boolean that determines whether writes to the TensorArray\nare allowed to grow the size. By default, this is not allowed.", + "default": false + }, + { + "name": "clear_after_read", + "type": "boolean", + "description": "If true (default), Tensors in the TensorArray are cleared\nafter being read. This disables multiple read semantics but allows early\nrelease of memory.", + "default": true + }, + { + "name": "identical_element_shapes", + "type": "boolean", + "description": "If true (default is false), then all\nelements in the TensorArray will be expected to have identical shapes.\nThis allows certain behaviors, like dynamically checking for\nconsistent shapes on write, and being able to fill in properly\nshaped zero tensors on stack -- even if the element_shape attribute\nis not fully defined.", + "default": false + }, + { + "name": "tensor_array_name", + "type": "string", + "description": "Overrides the name used for the temporary tensor_array\nresource. Default value is the name of the 'TensorArray' op (which\nis guaranteed unique).", + "default": "" + } + ], + "inputs": [ + { + "name": "size", + "description": "The size of the array.", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "description": "The handle to the TensorArray.", + "type": 20 + }, + { + "name": "flow", + "description": "A scalar used to control gradient flow.", + "type": 1 + } + ] + }, + { + "name": "TensorArrayWrite", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7, + "isRef": true + }, + { + "name": "index", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayWriteV2", + "summary": "Deprecated. Use TensorArrayGradV3", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + }, + { + "name": "TensorArrayWriteV3", + "summary": "Push an element onto the tensor_array.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "description": "The handle to a TensorArray.", + "type": 20 + }, + { + "name": "index", + "description": "The position to write to inside the TensorArray.", + "type": 3 + }, + { + "name": "value", + "description": "The tensor to write to the TensorArray.", + "typeAttr": "T" + }, + { + "name": "flow_in", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "description": "A float scalar that enforces proper chaining of operations.", + "type": 1 + } + ] + }, + { + "name": "TensorDataset", + "summary": "Creates a dataset that emits `components` as a tuple of tensors once.", + "attributes": [ + { + "name": "Toutput_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TensorListConcat", + "summary": "Concats all tensors in the list along the 0th dimension.", + "description": "Requires that all tensors have the same shape except the first dimension.\n\ninput_handle: The input list.\ntensor: The concated result.\nlengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.\n", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "element_shape", + "type": "shape", + "default": { + "type": "shape", + "value": "?" + } + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "lengths", + "type": 9 + } + ] + }, + { + "name": "TensorListConcatLists", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_a", + "type": 21 + }, + { + "name": "input_b", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ] + }, + { + "name": "TensorListConcatV2", + "summary": "Concats all tensors in the list along the 0th dimension.", + "description": "Requires that all tensors have the same shape except the first dimension.\n\ninput_handle: The input list.\nelement_shape: The shape of the uninitialized elements in the list. If the first\n dimension is not -1, it is assumed that all list elements have the same\n leading dim.\nleading_dims: The list of leading dims of uninitialized list elements. Used if\n the leading dim of input_handle.element_shape or the element_shape input arg\n is not already set.\ntensor: The concated result.\nlengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.\n", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "leading_dims", + "type": 9 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "lengths", + "type": 9 + } + ] + }, + { + "name": "TensorListElementShape", + "summary": "The shape of the elements of the given list, as a tensor.", + "description": " input_handle: the list\n element_shape: the shape of elements of the list", + "attributes": [ + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ] + }, + { + "name": "TensorListFromTensor", + "summary": "Creates a TensorList which, when stacked, has the value of `tensor`.", + "description": "Each tensor in the result list corresponds to one row of the input tensor.\n\ntensor: The input tensor.\noutput_handle: The list.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListGather", + "summary": "Creates a Tensor by indexing into the TensorList.", + "description": "Each row in the produced Tensor corresponds to the element in the TensorList\nspecified by the given index (see `tf.gather`).\n\ninput_handle: The input tensor list.\nindices: The indices used to index into the list.\nvalues: The tensor.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeAttr": "element_dtype" + } + ] + }, + { + "name": "TensorListGetItem", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "item", + "typeAttr": "element_dtype" + } + ] + }, + { + "name": "TensorListLength", + "summary": "Returns the number of tensors in the input tensor list.", + "description": "input_handle: the input list\nlength: the number of tensors in the list", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "length", + "type": 3 + } + ] + }, + { + "name": "TensorListPopBack", + "summary": "Returns the last element of the input list as well as a list with all but that element.", + "description": "Fails if the list is empty.\n\ninput_handle: the input list\ntensor: the withdrawn last element of the list\nelement_dtype: the type of elements in the list\nelement_shape: the shape of the output tensor", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ] + }, + { + "name": "TensorListPushBack", + "summary": "Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.", + "description": "tensor: The tensor to put on the list.\ninput_handle: The old list.\noutput_handle: A list with the elements of the old list followed by tensor.\nelement_dtype: the type of elements in the list.\nelement_shape: a shape compatible with that of elements in the list.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListPushBackBatch", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handles", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handles", + "type": 21 + } + ] + }, + { + "name": "TensorListReserve", + "summary": "List of the given size with empty elements.", + "description": "element_shape: the shape of the future elements of the list\nnum_elements: the number of elements to reserve\nhandle: the output list\nelement_dtype: the desired type of elements in the list.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TensorListResize", + "summary": "Resizes the list.", + "description": "\ninput_handle: the input list\nsize: size of the output list\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListScatter", + "summary": "Creates a TensorList by indexing into a Tensor.", + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ntensor: The input tensor.\nindices: The indices used to index into the list.\nelement_shape: The shape of the elements in the list (can be less specified than\n the shape of the tensor).\noutput_handle: The TensorList.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListScatterIntoExistingList", + "summary": "Scatters tensor at indices in an input list.", + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ninput_handle: The list to scatter into.\ntensor: The input tensor.\nindices: The indices used to index into the list.\noutput_handle: The TensorList.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListScatterV2", + "summary": "Creates a TensorList by indexing into a Tensor.", + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ntensor: The input tensor.\nindices: The indices used to index into the list.\nelement_shape: The shape of the elements in the list (can be less specified than\n the shape of the tensor).\nnum_elements: The size of the output list. Must be large enough to accommodate\n the largest index in indices. If -1, the list is just large enough to include\n the largest index in indices.\noutput_handle: The TensorList.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListSetItem", + "summary": "Sets the index-th position of the list to contain the given tensor.", + "description": "input_handle: the list\nindex: the position in the list to which the tensor will be assigned\nitem: the element to be assigned to that position\noutput_handle: the new list, with the element in the proper position\n", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "resize_if_index_out_of_bounds", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "item", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListSplit", + "summary": "Splits a tensor into a list.", + "description": "list[i] corresponds to lengths[i] tensors from the input tensor.\nThe tensor must have rank at least 1 and contain exactly sum(lengths) elements.\n\ntensor: The input tensor.\nelement_shape: A shape compatible with that of elements in the tensor.\nlengths: Vector of sizes of the 0th dimension of tensors in the list.\noutput_handle: The list.", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "shape_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "lengths", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorListStack", + "summary": "Stacks all tensors in the list.", + "description": "Requires that all tensors have the same shape.\n\ninput_handle: the input list\ntensor: the gathered result\nnum_elements: optional. If not -1, the number of elements in the list.\n", + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "name": "num_elements", + "type": "int64", + "default": -1 + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ] + }, + { + "name": "TensorMapErase", + "summary": "Returns a tensor map with item from given key erased.", + "description": "input_handle: the original map\noutput_handle: the map with value from given key removed\nkey: the key of the value to be erased", + "attributes": [ + { + "name": "key_dtype", + "type": "type" + }, + { + "name": "value_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorMapHasKey", + "summary": "Returns whether the given key exists in the map.", + "description": "input_handle: the input map\nkey: the key to check\nhas_key: whether the key is already in the map or not", + "attributes": [ + { + "name": "key_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "has_key", + "type": 10 + } + ] + }, + { + "name": "TensorMapInsert", + "summary": "Returns a map that is the 'input_handle' with the given key-value pair inserted.", + "description": "input_handle: the original map\noutput_handle: the map with key and value inserted\nkey: the key to be inserted\nvalue: the value to be inserted", + "attributes": [ + { + "name": "key_dtype", + "type": "type" + }, + { + "name": "value_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "key", + "typeAttr": "key_dtype" + }, + { + "name": "value", + "typeAttr": "value_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "TensorMapLookup", + "summary": "Returns the value from a given key in a tensor map.", + "description": "input_handle: the input map\nkey: the key to be looked up\nvalue: the value found from the given key", + "attributes": [ + { + "name": "key_dtype", + "type": "type" + }, + { + "name": "value_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "value_dtype" + } + ] + }, + { + "name": "TensorMapSize", + "summary": "Returns the number of tensors in the input tensor map.", + "description": "input_handle: the input map\nsize: the number of tensors in the map", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + }, + { + "name": "TensorMapStackKeys", + "summary": "Returns a Tensor stack of all keys in a tensor map.", + "description": "input_handle: the input map\nkeys: the returned Tensor of all keys in the map", + "attributes": [ + { + "name": "key_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "keys", + "typeAttr": "key_dtype" + } + ] + }, + { + "name": "TensorScatterAdd", + "summary": "Adds sparse `updates` to an existing tensor according to `indices`.", + "description": "This operation creates a new tensor by adding sparse `updates` to the passed\nin `tensor`.\nThis operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the\nupdates are added onto an existing tensor (as opposed to a variable). If the\nmemory for the existing tensor cannot be re-used, a copy is made and updated.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`tensor.shape`. The last dimension of `indices` can be at most the rank of\n`tensor.shape`:\n\n```\nindices.shape[-1] <= tensor.shape.rank\n```\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = tensor.shape.rank`) or slices\n(if `indices.shape[-1] < tensor.shape.rank`) along dimension\n`indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape\n\n```\nindices.shape[:-1] + tensor.shape[indices.shape[-1]:]\n```\n\nThe simplest form of `tensor_scatter_nd_add` is to add individual elements to a\ntensor by index. For example, say we want to add 4 elements in a rank-1\ntensor with 8 elements.\n\nIn Python, this scatter add operation would look like this:\n\n>>> indices = tf.constant([[4], [3], [1], [7]])\n>>> updates = tf.constant([9, 10, 11, 12])\n>>> tensor = tf.ones([8], dtype=tf.int32)\n>>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n>>> updated\n\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\nIn Python, this scatter add operation would look like this:\n\n>>> indices = tf.constant([[0], [2]])\n>>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n... [7, 7, 7, 7], [8, 8, 8, 8]],\n... [[5, 5, 5, 5], [6, 6, 6, 6],\n... [7, 7, 7, 7], [8, 8, 8, 8]]])\n>>> tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n>>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n>>> updated\n\n\nNote: on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Tensor to copy/update.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Updates to scatter into output.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor copied from tensor and updates added according to the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorScatterMax", + "summary": "Apply a sparse update to a tensor taking the element-wise maximum.", + "description": "Returns a new tensor copied from `tensor` whose values are element-wise maximum between\ntensor and updates according to the indices.\n\n>>> tensor = [0, 0, 0, 0, 0, 0, 0, 0]\n>>> indices = [[1], [4], [5]]\n>>> updates = [1, -1, 1]\n>>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy()\narray([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32)\n\nRefer to `tf.tensor_scatter_nd_update` for more details.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Tensor to update.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Updates to scatter into output.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorScatterMin", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Tensor to update.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Updates to scatter into output.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorScatterSub", + "summary": "Subtracts sparse `updates` from an existing tensor according to `indices`.", + "description": "This operation creates a new tensor by subtracting sparse `updates` from the\npassed in `tensor`.\nThis operation is very similar to `tf.scatter_nd_sub`, except that the updates\nare subtracted from an existing tensor (as opposed to a variable). If the memory\nfor the existing tensor cannot be re-used, a copy is made and updated.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of tensor_scatter_sub is to subtract individual elements\nfrom a tensor by index. For example, say we want to insert 4 scattered elements\nin a rank-1 tensor with 8 elements.\n\nIn Python, this scatter subtract operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n tensor = tf.ones([8], dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [1, -10, 1, -9, -8, 1, 1, -11]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\nIn Python, this scatter add operation would look like this:\n\n```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],\n [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Tensor to copy/update.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Updates to scatter into output.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor copied from tensor and updates subtracted according to the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorScatterUpdate", + "summary": "Scatter `updates` into an existing tensor according to `indices`.", + "description": "This operation creates a new tensor by applying sparse `updates` to the passed\nin `tensor`.\nThis operation is very similar to `tf.scatter_nd`, except that the updates are\nscattered onto an existing tensor (as opposed to a zero-tensor). If the memory\nfor the existing tensor cannot be re-used, a copy is made and updated.\n\nIf `indices` contains duplicates, then we pick the last update for the index.\n\nIf an out of bound index is found on CPU, an error is returned.\n\n**WARNING**: There are some GPU specific semantics for this operation.\n- If an out of bound index is found, the index is ignored.\n- The order in which updates are applied is nondeterministic, so the output\nwill be nondeterministic if `indices` contains duplicates.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`.\n\n* `indices` must have at least 2 axes: `(num_updates, index_depth)`.\n* The last axis of `indices` is how deep to index into `tensor` so this index\n depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`\n\nif `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.\nif `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input\n`tensor`.\n\nEach `update` has a rank of `tensor.rank - indices.shape[-1]`.\nThe overall shape of `updates` is:\n\n```\nindices.shape[:-1] + tensor.shape[indices.shape[-1]:]\n```\n\nFor usage examples see the python [tf.tensor_scatter_nd_update](\nhttps://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function\n", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`, `uint16`." + } + ], + "inputs": [ + { + "name": "tensor", + "description": "Tensor to copy/update.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "Index tensor.", + "typeAttr": "Tindices" + }, + { + "name": "updates", + "description": "Updates to scatter into output.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A new tensor with the given shape and updates applied according\nto the indices.", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorSliceDataset", + "summary": "Creates a dataset that emits each dim-0 slice of `components` once.", + "attributes": [ + { + "name": "Toutput_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "is_files", + "type": "boolean", + "default": false + }, + { + "name": "metadata", + "type": "string", + "default": "" + }, + { + "name": "replicate_on_split", + "type": "boolean", + "default": false + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TensorStridedSliceUpdate", + "summary": "Assign `value` to the sliced l-value reference of `input`.", + "description": "The values of `value` are assigned to the positions in the tensor `input` that\nare selected by the slice parameters. The slice parameters `begin` `end`\n`strides` etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s shape\nmust be exactly the shape produced by the slice of `input`.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Index", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "begin_mask", + "type": "int64", + "default": 0 + }, + { + "name": "end_mask", + "type": "int64", + "default": 0 + }, + { + "name": "ellipsis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "new_axis_mask", + "type": "int64", + "default": 0 + }, + { + "name": "shrink_axis_mask", + "type": "int64", + "default": 0 + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "TensorSummary", + "summary": "Outputs a `Summary` protocol buffer with a tensor.", + "description": "This op is being phased out in favor of TensorSummaryV2, which lets callers pass\na tag as well as a serialized SummaryMetadata proto string that contains\nplugin-specific data. We will keep this op to maintain backwards compatibility.", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "description", + "type": "string", + "description": "A json-encoded SummaryDescription proto.", + "default": "" + }, + { + "name": "labels", + "type": "string[]", + "description": "An unused list of strings.", + "default": [] + }, + { + "name": "display_name", + "type": "string", + "description": "An unused string.", + "default": "" + } + ], + "inputs": [ + { + "name": "tensor", + "description": "A tensor to serialize.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ] + }, + { + "name": "TensorSummaryV2", + "summary": "Outputs a `Summary` protocol buffer with a tensor and per-plugin data.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "tag", + "description": "A string attached to this summary. Used for organization in TensorBoard.", + "type": 7 + }, + { + "name": "tensor", + "description": "A tensor to serialize.", + "typeAttr": "T" + }, + { + "name": "serialized_summary_metadata", + "description": "A serialized SummaryMetadata proto. Contains plugin\ndata.", + "type": 7 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ] + }, + { + "name": "TextLineDataset", + "summary": "Creates a dataset that emits the lines of one or more text files.", + "attributes": [ + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "filenames", + "description": "A scalar or a vector containing the name(s) of the file(s) to be\nread.", + "type": 7 + }, + { + "name": "compression_type", + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "type": 7 + }, + { + "name": "buffer_size", + "description": "A scalar containing the number of bytes to buffer.", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "TextLineReader", + "summary": "A Reader that outputs the lines of a file delimited by '\\n'.", + "attributes": [ + { + "name": "skip_header_lines", + "type": "int64", + "description": "Number of lines to skip from the beginning of every file.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "TextLineReaderV2", + "summary": "A Reader that outputs the lines of a file delimited by '\\n'.", + "attributes": [ + { + "name": "skip_header_lines", + "type": "int64", + "description": "Number of lines to skip from the beginning of every file.", + "default": 0 + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 20 + } + ] + }, + { + "name": "ThreadPoolDataset", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "thread_pool", + "description": "A resource produced by the ThreadPoolHandle op.", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "ThreadPoolHandle", + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`.", + "attributes": [ + { + "name": "num_threads", + "type": "int64", + "description": "The number of threads in the thread pool." + }, + { + "name": "max_intra_op_parallelism", + "type": "int64", + "description": "The maximum degree of parallelism to use within operations that execute on this\nthreadpool.", + "default": 1 + }, + { + "name": "display_name", + "type": "string", + "description": "A human-readable name for the threads that may be visible in some\nvisualizations.\nthreadpool." + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "handle", + "description": "A resource that can be consumed by one or more ExperimentalThreadPoolDataset\nops.", + "type": 20 + } + ] + }, + { + "name": "ThreadUnsafeUnigramCandidateSampler", + "summary": "Generates labels for candidate sampling with a learned unigram distribution.", + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to randomly sample.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "range_max", + "type": "int64", + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "Tile", + "summary": "Constructs a tensor by tiling a given tensor.", + "description": "This operation creates a new tensor by replicating `input` `multiples` times.\nThe output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,\nand the values of `input` are replicated `multiples[i]` times along the 'i'th\ndimension. For example, tiling `[a b c d]` by `[2]` produces\n`[a b c d a b c d]`.\n\n>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)\n>>> b = tf.constant([1,2], tf.int32)\n>>> tf.tile(a, b)\n\n>>> c = tf.constant([2,1], tf.int32)\n>>> tf.tile(a, c)\n\n>>> d = tf.constant([2,2], tf.int32)\n>>> tf.tile(a, d)\n", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tmultiples", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "Can be of any rank.", + "typeAttr": "T" + }, + { + "name": "multiples", + "description": "1-D. Length must be the same as the number of dimensions in `input`", + "typeAttr": "Tmultiples" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "TileGrad", + "summary": "Returns the gradient of `Tile`.", + "description": "Since `Tile` takes an input and repeats the input `multiples` times\nalong each dimension, `TileGrad` takes in `multiples` and aggregates\neach repeated tile of `input` into `output`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "multiples", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + }, + { + "name": "Timestamp", + "summary": "Provides the time since epoch in seconds.", + "description": "Returns the timestamp as a `float64` for seconds since the Unix epoch.\n\nCommon usages include:\n* Logging\n* Providing a random number seed\n* Debugging graph execution\n* Generating timing information, mainly through comparison of timestamps\n\nNote: In graph mode, the timestamp is computed when the op is executed,\nnot when it is added to the graph. In eager mode, the timestamp is computed\nwhen the op is eagerly executed.", + "outputs": [ + { + "name": "ts", + "type": 2 + } + ] + }, + { + "name": "ToBool", + "summary": "Converts a tensor to a scalar predicate.", + "description": "Converts a tensor to a scalar predicate with the following rules:\n\n- For 0D tensors, truthiness is determined by comparing against a \"zero\"\n value. For numerical types it is the obvious zero. For strings it is the\n empty string.\n\n- For >0D tensors, truthiness is determined by looking at the number of\n elements. If has zero elements, then the result is false. Otherwise the\n result is true.\n\nThis matches the behavior of If and While for determining if a tensor counts\nas true/false for a branch condition.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 10 + } + ] + }, + { + "name": "TopK", + "summary": "Finds values and indices of the `k` largest elements for the last dimension.", + "description": "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first.\n\nIf `k` varies dynamically, use `TopKV2` below.", + "attributes": [ + { + "name": "k", + "type": "int64", + "description": "Number of top elements to look for along the last dimension (along each\nrow for matrices).", + "minimum": 0 + }, + { + "name": "sorted", + "type": "boolean", + "description": "If true the resulting `k` elements will be sorted by the values in\ndescending order.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "input", + "description": "1-D or higher with last dimension at least `k`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "values", + "description": "The `k` largest elements along each last dimensional slice.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "The indices of `values` within the last dimension of `input`.", + "type": 3 + } + ] + }, + { + "name": "TopKUnique", + "summary": "Returns the TopK unique values in the array in sorted order.", + "description": "The running time is proportional to the product of K and the input\nsize. Sorting the whole array is more efficient for sufficiently large\nvalues of K. The median-of-medians algorithm is probably faster, but\ndifficult to implement efficiently in XLA. If there are fewer than K\nunique numbers (not NANs), the results are padded with negative\ninfinity. NaNs are never returned. Subnormal numbers are flushed to\nzero. If an element appears at multiple indices, the highest index is\nreturned. If a TopK element never appears in the input due to padding\nvalues, the indices are padded with negative one. If a padding value\nappears in the input and padding is needed, the highest index of the\npadding value will be returned. The semantics are not the same as\nkth_order_statistic.", + "attributes": [ + { + "name": "k", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input", + "type": 1 + } + ], + "outputs": [ + { + "name": "topk", + "type": 1 + }, + { + "name": "topk_indices", + "type": 3 + } + ] + }, + { + "name": "TopKV2", + "summary": "Finds values and indices of the `k` largest elements for the last dimension.", + "description": "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first.", + "attributes": [ + { + "name": "sorted", + "type": "boolean", + "description": "If true the resulting `k` elements will be sorted by the values in\ndescending order.", + "default": true + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tk", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + }, + { + "name": "index_type", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "1-D or higher with last dimension at least `k`.", + "typeAttr": "T" + }, + { + "name": "k", + "description": "0-D. Number of top elements to look for along the last dimension (along each\nrow for matrices).", + "typeAttr": "Tk" + } + ], + "outputs": [ + { + "name": "values", + "description": "The `k` largest elements along each last dimensional slice.", + "typeAttr": "T" + }, + { + "name": "indices", + "description": "The indices of `values` within the last dimension of `input`.", + "typeAttr": "index_type" + } + ] + }, + { + "name": "TopKWithUnique", + "summary": "Returns the TopK values in the array in sorted order.", + "description": "This is a combination of MakeUnique and TopKUnique. The returned top-K will\nhave its lower bits replaced by iota, thus it will be close to the original\nvalue but not exactly the same. The running time is proportional to the product\nof K and the input size. NaNs are never returned. Subnormal numbers are flushed\nto zero.", + "attributes": [ + { + "name": "k", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input", + "type": 1 + } + ], + "outputs": [ + { + "name": "topk", + "type": 1 + }, + { + "name": "topk_indices", + "type": 3 + } + ] + }, + { + "name": "TpuHandleToProtoKey", + "summary": "Converts XRT's uid handles to TensorFlow-friendly input format.", + "description": "Converts a uid handle for a compiled program into a vector of proto keys.\n\nXRT compile ops return uids, and the TensorFlow execute op takes a proto\nkey. This op enables a client to compile on TPU using XRT and execute using the\nstandard TensorFlow execute op.\n\n'uid' is the input handle.\n'proto_keys' is a vector of proto keys, one for each core program.", + "inputs": [ + { + "name": "uid", + "type": 9 + } + ], + "outputs": [ + { + "name": "proto_keys", + "type": 7 + } + ] + }, + { + "name": "Transpose", + "summary": "Shuffle dimensions of x according to a permutation.", + "description": "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Tperm", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "perm", + "typeAttr": "Tperm" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + }, + { + "name": "TridiagonalMatMul", + "summary": "Calculate product with tridiagonal matrix.", + "description": "Calculates product of two matrices, where left matrix is a tridiagonal matrix.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "superdiag", + "description": "Tensor of shape `[..., 1, M]`, representing superdiagonals of\ntri-diagonal matrices to the left of multiplication. Last element is ignored.", + "typeAttr": "T" + }, + { + "name": "maindiag", + "description": "Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal\nmatrices to the left of multiplication.", + "typeAttr": "T" + }, + { + "name": "subdiag", + "description": "Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal\nmatrices to the left of multiplication. First element is ignored.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Tensor of shape `[..., M, N]`, representing MxN matrices to the right of\nmultiplication.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Tensor of shape `[..., M, N]` containing the product.", + "typeAttr": "T" + } + ] + }, + { + "name": "TridiagonalSolve", + "summary": "Solves tridiagonal systems of equations.", + "description": " Solves tridiagonal systems of equations.\n Supports batch dimensions and multiple right-hand sides per each left-hand\n side.\n On CPU, solution is computed via Gaussian elimination with or without partial\n pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE\n library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv\n Partial pivoting is not yet supported by XLA backends.", + "attributes": [ + { + "name": "partial_pivoting", + "type": "boolean", + "description": "Whether to apply partial pivoting. Partial pivoting makes the procedure more\nstable, but slower.", + "default": true + }, + { + "name": "perturb_singular", + "type": "boolean", + "default": false + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "diagonals", + "description": "Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the\ntridiagonal matrices with three rows being the superdiagonal, diagonals, and\nsubdiagonals, in order. The last element of the superdiagonal and the first\nelement of the subdiagonal is ignored.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Tensor of shape `[..., M, K]`, representing K right-hand sides per each\nleft-hand side.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "Tensor of shape `[..., M, K]` containing the solutions", + "typeAttr": "T" + } + ] + }, + { + "name": "TruncateDiv", + "summary": "Returns x / y element-wise, rounded towards zero.", + "description": "Truncation designates that negative numbers will round fractional quantities\ntoward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different\nthan Python semantics. See `FloorDiv` for a division function that matches\nPython Semantics.\n\n*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "TruncateMod", + "summary": "Returns element-wise remainder of division. This emulates C semantics in that", + "description": "the result here is consistent with a truncating divide. E.g. `truncate(x / y) *\ny + truncate_mod(x, y) = x`.\n\n*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "TruncatedNormal", + "summary": "Outputs random values from a truncated normal distribution.", + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.", + "attributes": [ + { + "name": "seed", + "type": "int64", + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "A second seed to avoid seed collision.", + "default": 0 + }, + { + "name": "dtype", + "type": "type", + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`." + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + } + ], + "inputs": [ + { + "name": "shape", + "description": "The shape of the output tensor.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A tensor of the specified shape filled with random truncated normal\nvalues.", + "typeAttr": "dtype" + } + ] + }, + { + "name": "Unbatch", + "summary": "Reverses the operation of Batch for a single output Tensor.", + "description": "An instance of Unbatch either receives an empty batched_tensor, in which case it\nasynchronously waits until the values become available from a concurrently\nrunning instance of Unbatch with the same container and shared_name, or receives\na non-empty batched_tensor in which case it finalizes all other concurrently\nrunning instances and outputs its own element from the batch.\n\nbatched_tensor: The possibly transformed output of Batch. The size of the first\n dimension should remain unchanged by the transformations for the operation to\n work.\nbatch_index: The matching batch_index obtained from Batch.\nid: The id scalar emitted by Batch.\nunbatched_tensor: The Tensor corresponding to this execution.\ntimeout_micros: Maximum amount of time (in microseconds) to wait to receive the\n batched input tensor associated with a given invocation of the op.\ncontainer: Container to control resource sharing.\nshared_name: Instances of Unbatch with the same container and shared_name are\n assumed to possibly belong to the same batch. If left empty, the op name will\n be used as the shared name.", + "attributes": [ + { + "name": "timeout_micros", + "type": "int64" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "batched_tensor", + "typeAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "id", + "type": 9 + } + ], + "outputs": [ + { + "name": "unbatched_tensor", + "typeAttr": "T" + } + ] + }, + { + "name": "UnbatchDataset", + "summary": "A dataset that splits the elements of its input into multiple elements.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "UnbatchGrad", + "summary": "Gradient of Unbatch.", + "description": "Acts like Batch but using the given batch_index index of batching things as they\nbecome available. This ensures that the gradients are propagated back in the\nsame session which did the forward pass.\n\noriginal_input: The input to the Unbatch operation this is the gradient of.\nbatch_index: The batch_index given to the Unbatch operation this is the gradient\nof.\ngrad: The downstream gradient.\nid: The id scalar emitted by Batch.\nbatched_grad: The return value, either an empty tensor or the batched gradient.\ncontainer: Container to control resource sharing.\nshared_name: Instances of UnbatchGrad with the same container and shared_name\n are assumed to possibly belong to the same batch. If left empty, the op name\n will be used as the shared name.", + "attributes": [ + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "original_input", + "typeAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "grad", + "typeAttr": "T" + }, + { + "name": "id", + "type": 9 + } + ], + "outputs": [ + { + "name": "batched_grad", + "typeAttr": "T" + } + ] + }, + { + "name": "UncompressElement", + "summary": "Uncompresses a compressed dataset element.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "compressed", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ] + }, + { + "name": "UnicodeDecode", + "summary": "Decodes each string in `input` into a sequence of Unicode code points.", + "description": "The character codepoints for all strings are returned using a single vector\n`char_values`, with strings expanded to characters in row-major order.\n\nThe `row_splits` tensor indicates where the codepoints for\neach input string begin and end within the `char_values` tensor.\nIn particular, the values for the `i`th\nstring (in row-major order) are stored in the slice\n`[row_splits[i]:row_splits[i+1]]`. Thus:\n\n* `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\n character in the `i`th string (in row-major order).\n* `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\n string (in row-major order).", + "attributes": [ + { + "name": "input_encoding", + "type": "string", + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`." + }, + { + "name": "errors", + "type": "string", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "default": "replace" + }, + { + "name": "replacement_char", + "type": "int64", + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)", + "default": 65533 + }, + { + "name": "replace_control_characters", + "type": "boolean", + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "default": false + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The text to be decoded. Can have any shape. Note that the output is flattened\nto a vector of char values.", + "type": 7 + } + ], + "outputs": [ + { + "name": "row_splits", + "description": "A 1D int32 tensor containing the row splits.", + "typeAttr": "Tsplits" + }, + { + "name": "char_values", + "description": "A 1D int32 Tensor containing the decoded codepoints.", + "type": 3 + } + ] + }, + { + "name": "UnicodeDecodeWithOffsets", + "summary": "Decodes each string in `input` into a sequence of Unicode code points.", + "description": "The character codepoints for all strings are returned using a single vector\n`char_values`, with strings expanded to characters in row-major order.\nSimilarly, the character start byte offsets are returned using a single vector\n`char_to_byte_starts`, with strings expanded in row-major order.\n\nThe `row_splits` tensor indicates where the codepoints and start offsets for\neach input string begin and end within the `char_values` and\n`char_to_byte_starts` tensors. In particular, the values for the `i`th\nstring (in row-major order) are stored in the slice\n`[row_splits[i]:row_splits[i+1]]`. Thus:\n\n* `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\n character in the `i`th string (in row-major order).\n* `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th\n character in the `i`th string (in row-major order).\n* `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\n string (in row-major order).", + "attributes": [ + { + "name": "input_encoding", + "type": "string", + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`." + }, + { + "name": "errors", + "type": "string", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "default": "replace" + }, + { + "name": "replacement_char", + "type": "int64", + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)", + "default": 65533 + }, + { + "name": "replace_control_characters", + "type": "boolean", + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "default": false + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input", + "description": "The text to be decoded. Can have any shape. Note that the output is flattened\nto a vector of char values.", + "type": 7 + } + ], + "outputs": [ + { + "name": "row_splits", + "description": "A 1D int32 tensor containing the row splits.", + "typeAttr": "Tsplits" + }, + { + "name": "char_values", + "description": "A 1D int32 Tensor containing the decoded codepoints.", + "type": 3 + }, + { + "name": "char_to_byte_starts", + "description": "A 1D int32 Tensor containing the byte index in the input string where each\ncharacter in `char_values` starts.", + "type": 9 + } + ] + }, + { + "name": "UnicodeEncode", + "summary": "Encode a tensor of ints into unicode strings.", + "description": "Returns a vector of strings, where `output[i]` is constructed by encoding the\nUnicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`\nusing `output_encoding`.\n\n---\n\nExample:\n\n```\ninput_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]\ninput_splits = [0, 5, 10]\noutput_encoding = 'UTF-8'\n\noutput = ['Hello', 'World']\n```", + "attributes": [ + { + "name": "errors", + "type": "string", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `ignore`, `replace`, `strict`.", + "default": "replace" + }, + { + "name": "output_encoding", + "type": "string", + "description": "Unicode encoding of the output strings. Valid encodings are: `\"UTF-8\",\n\"UTF-16-BE\", and \"UTF-32-BE\"`. Must be one of the following: `UTF-8`, `UTF-16-BE`, `UTF-32-BE`." + }, + { + "name": "replacement_char", + "type": "int64", + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD (U+65533).", + "default": 65533 + }, + { + "name": "Tsplits", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + } + ], + "inputs": [ + { + "name": "input_values", + "description": "A 1D tensor containing the unicode codepoints that should be encoded.", + "type": 3 + }, + { + "name": "input_splits", + "description": "A 1D tensor specifying how the unicode codepoints should be split into strings.\nIn particular, `output[i]` is constructed by encoding the codepoints in the\nslice `input_values[input_splits[i]:input_splits[i+1]]`.", + "typeAttr": "Tsplits" + } + ], + "outputs": [ + { + "name": "output", + "description": "The 1-D Tensor of strings encoded from the provided unicode codepoints.", + "type": 7 + } + ] + }, + { + "name": "UnicodeScript", + "summary": "Determine the script codes of a given tensor of Unicode integer code points.", + "description": "This operation converts Unicode code points to script codes corresponding to\neach code point. Script codes correspond to International Components for\nUnicode (ICU) UScriptCode values.\n\nSee\n[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html)\nfor more details on script codes.\n\nFor an example, see the unicode strings guide on [unicode scripts]\n(https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).\n\nReturns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will\nmatch input shape.\n\nExamples:\n\n>>> tf.strings.unicode_script([1, 31, 38])\n", + "inputs": [ + { + "name": "input", + "description": "A Tensor of int32 Unicode code points.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "A Tensor of int32 script codes corresponding to each input code point.", + "type": 3 + } + ] + }, + { + "name": "UnicodeTranscode", + "summary": "Transcode the input text from a source encoding to a destination encoding.", + "description": "The input is a string tensor of any shape. The output is a string tensor of\nthe same shape containing the transcoded strings. Output strings are always\nvalid unicode. If the input contains invalid encoding positions, the\n`errors` attribute sets the policy for how to deal with them. If the default\nerror-handling policy is used, invalid formatting will be substituted in the\noutput by the `replacement_char`. If the errors policy is to `ignore`, any\ninvalid encoding positions in the input are skipped and not included in the\noutput. If it set to `strict` then any invalid formatting will result in an\nInvalidArgument error.\n\nThis operation can be used with `output_encoding = input_encoding` to enforce\ncorrect formatting for inputs even if they are already in the desired encoding.\n\nIf the input is prefixed by a Byte Order Mark needed to determine encoding\n(e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that\nBOM will be consumed and not emitted into the output. If the input encoding\nis marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is\ninterpreted as a non-breaking-space and is preserved in the output (including\nalways for UTF-8).\n\nThe end result is that if the input is marked as an explicit endianness the\ntranscoding is faithful to all codepoints in the source. If it is not marked\nwith an explicit endianness, the BOM is not considered part of the string itself\nbut as metadata, and so is not preserved in the output.\n\nExamples:\n\n>>> tf.strings.unicode_transcode([\"Hello\", \"TensorFlow\", \"2.x\"], \"UTF-8\", \"UTF-16-BE\")\n\n>>> tf.strings.unicode_transcode([\"A\", \"B\", \"C\"], \"US ASCII\", \"UTF-8\").numpy()\narray([b'A', b'B', b'C'], dtype=object)", + "attributes": [ + { + "name": "input_encoding", + "type": "string", + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`." + }, + { + "name": "output_encoding", + "type": "string", + "description": "The unicode encoding to use in the output. Must be one of\n`\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`. Multi-byte encodings will be big-endian. Must be one of the following: `UTF-8`, `UTF-16-BE`, `UTF-32-BE`." + }, + { + "name": "errors", + "type": "string", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "default": "replace" + }, + { + "name": "replacement_char", + "type": "int64", + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)\n\nNote that for UTF-8, passing a replacement character expressible in 1 byte, such\nas ' ', will preserve string alignment to the source since invalid bytes will be\nreplaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte\nreplacement character will preserve byte alignment to the source.", + "default": 65533 + }, + { + "name": "replace_control_characters", + "type": "boolean", + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "default": false + } + ], + "inputs": [ + { + "name": "input", + "description": "The text to be processed. Can have any shape.", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "description": "A string tensor containing unicode text encoded using `output_encoding`.", + "type": 7 + } + ] + }, + { + "name": "UniformCandidateSampler", + "summary": "Generates labels for candidate sampling with a uniform distribution.", + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "attributes": [ + { + "name": "num_true", + "type": "int64", + "description": "Number of true labels per context.", + "minimum": 1 + }, + { + "name": "num_sampled", + "type": "int64", + "description": "Number of candidates to randomly sample.", + "minimum": 1 + }, + { + "name": "unique", + "type": "boolean", + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." + }, + { + "name": "range_max", + "type": "int64", + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1 + }, + { + "name": "seed", + "type": "int64", + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "default": 0 + }, + { + "name": "seed2", + "type": "int64", + "description": "An second seed to avoid seed collision.", + "default": 0 + } + ], + "inputs": [ + { + "name": "true_classes", + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "type": 9 + } + ], + "outputs": [ + { + "name": "sampled_candidates", + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "type": 9 + }, + { + "name": "true_expected_count", + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "type": 1 + }, + { + "name": "sampled_expected_count", + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "type": 1 + } + ] + }, + { + "name": "UniformDequantize", + "summary": "Perform dequantization on the quantized Tensor `input`.", + "description": "Given quantized `input` which was quantized using `scales` and `zero_points`, performs dequantization using the formula:\ndequantized_data = (quantized_data - zero_point) * scale.", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of input Tensor. A tf.DType from: tf.float32 Must be one of the following: `qint8`, `qint32`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 Must be one of the following: `float32`." + }, + { + "name": "quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).", + "default": -1 + }, + { + "name": "quantization_min_val", + "type": "int64", + "description": "The quantization min value that was used when input was quantized.\nThe purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:\n`(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.\nFor example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "quantization_max_val", + "type": "int64", + "description": "The quantization max value that was used when input was quantized.\nThe purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:\n`(Tout max)` for both narrow range and not narrow range.\nFor example, if Tin is qint8, this is set to 127." + } + ], + "inputs": [ + { + "name": "input", + "description": "Must be a Tensor of Tin.", + "typeAttr": "Tin" + }, + { + "name": "scales", + "description": "The float value(s) used as scale(s) when quantizing original data that input represents.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).", + "type": 1 + }, + { + "name": "zero_points", + "description": "The int32 value(s) used as zero_point(s) when quantizing original data that input represents.\nSame shape condition as scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output dequantized Tensor of Tout, whose shape is same as input.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformQuantize", + "summary": "Perform quantization on Tensor `input`.", + "description": "Given `input`, `scales` and `zero_points`, performs quantization using the formula:\nquantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of input Tensor. A tf.DType from: tf.qint8, tf.qint32 Must be one of the following: `float32`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. A tf.DType from: tf.float32 Must be one of the following: `qint8`, `qint32`." + }, + { + "name": "quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).", + "default": -1 + }, + { + "name": "quantization_min_val", + "type": "int64", + "description": "The quantization min value to quantize `input`.\nThe purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:\n`(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.\nFor example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "quantization_max_val", + "type": "int64", + "description": "The quantization max value to quantize `input`.\nThe purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:\n`(Tout max)` for both narrow range and not narrow range.\nFor example, if Tin is qint8, this is set to 127." + } + ], + "inputs": [ + { + "name": "input", + "description": "Must be a Tensor of Tin.", + "typeAttr": "Tin" + }, + { + "name": "scales", + "description": "The float value(s) to use as scale(s) to quantize `input`.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).", + "type": 1 + }, + { + "name": "zero_points", + "description": "The int32 value(s) to use as zero_point(s) to quantize `input`.\nSame shape condition as scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output quantized Tensor of Tout, whose shape is same as input.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformQuantizedAdd", + "summary": "Perform quantized add of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`.", + "description": "Given quantized `lhs` and quantized `rhs`, performs quantized add on `lhs` and `rhs` to make quantized `output`.\n\n`UniformQuantizedAdd` follows Numpy broadcasting rules.\nThe two input array shapes are compared element-wise.\nStarting with the trailing dimensions, the two dimensions either have to be equal or one of them needs to be 1.\n\n`lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula:\n```\nquantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val)\n```\n`output` is also quantized, using the same formula.\n\nIf `lhs` and `output` is both per-axis quantized, the quantization axis must match.\nAlso, if `rhs` and `output` is both per-axis quantized, the quantization axis must match.\n*Match* means the axis must match when adding, regarding the broadcasting.\ni.e. For both operands `lhs` and `rhs`,\nif `operand.quantization_axis` >= 0 and `output.quantization_axis` >= 0,\n`operand.dims` - `operand.quantization_axis` must be equal to `output.dims` - `output.quantization_axis`.", + "attributes": [ + { + "name": "lhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `lhs`, only per-tensor quantization is supported.\nThus, this must be set to -1.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "lhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `lhs`.\nFor example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "lhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `lhs`.\nFor example, if `Tin` is `qint8`, this must be set to 127." + }, + { + "name": "rhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `rhs`, only per-tensor quantization\nor per-channel quantization along `kernel_output_feature_dimension` is supported.\nThus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "rhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `rhs`.\nFor example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "rhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `rhs`.\nFor example, if `Tin` is `qint8`, this must be set to 127." + }, + { + "name": "output_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported.\nThus, this must be set to -1 or `dimension_numbers.output_feature_dimension`.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "output_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `output`.\nFor example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "output_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `output`.\nFor example, if `Tout` is `qint8`, this must be set to 127." + }, + { + "name": "T", + "type": "type", + "description": "The type of `lhs`, `rhs`, and `output`. Must be one of the following: `qint32`." + } + ], + "inputs": [ + { + "name": "lhs", + "description": "Must be a quantized tensor.", + "typeAttr": "T" + }, + { + "name": "rhs", + "description": "Must be a quantized tensor.", + "typeAttr": "T" + }, + { + "name": "lhs_scales", + "description": "The float value(s) used as scale factors when quantizing the original data that `lhs` represents.", + "type": 1 + }, + { + "name": "lhs_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that `lhs` represents.\nMust have same shape with `lhs_scales`.", + "type": 3 + }, + { + "name": "rhs_scales", + "description": "The float value(s) used as scale factors when quantizing the original data that `rhs` represents.", + "type": 1 + }, + { + "name": "rhs_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that `rhs` represents.\nMust have same shape with `rhs_scales`.", + "type": 3 + }, + { + "name": "output_scales", + "description": "The float value(s) to use as scale factors when quantizing original data that `output` represents.", + "type": 1 + }, + { + "name": "output_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that output represents.\nMust have same shape with `output_scales`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output quantized tensor.", + "typeAttr": "T" + } + ] + }, + { + "name": "UniformQuantizedClipByValue", + "summary": "Perform clip by value on the quantized Tensor `operand`.", + "description": "Given quantized `operand` which was quantized using `scales` and `zero_points`, performs clip by value using `min` and `max` values.\nIf quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max.\nOtherwise (per-channel quantized), the clipping is also done per-channel.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "The type of operand, min, max, and output. A tf.DType from: tf.qint32 Must be one of the following: `qint32`." + }, + { + "name": "quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, operand.dims()).", + "default": -1 + }, + { + "name": "quantization_min_val", + "type": "int64", + "description": "The quantization min value that was used when operand was quantized." + }, + { + "name": "quantization_max_val", + "type": "int64", + "description": "The quantization max value that was used when operand was quantized." + } + ], + "inputs": [ + { + "name": "operand", + "description": "Must be a Tensor of T.", + "typeAttr": "T" + }, + { + "name": "min", + "description": "The min value(s) to clip operand. Must be a Tensor of T.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).", + "typeAttr": "T" + }, + { + "name": "max", + "description": "The min value(s) to clip operand. Must be a Tensor of T.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).", + "typeAttr": "T" + }, + { + "name": "scales", + "description": "The float value(s) used as scale(s) when quantizing `operand`, `min` and `max`.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).", + "type": 1 + }, + { + "name": "zero_points", + "description": "The int32 value(s) used as zero_point(s) when quantizing `operand`, `min` and `max`.\nSame shape condition as scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output clipped Tensor of T, whose shape is same as operand.", + "typeAttr": "T" + } + ] + }, + { + "name": "UniformQuantizedConvolution", + "summary": "Perform quantized convolution of quantized Tensor `lhs` and quantized Tensor `rhs`. to make quantized `output`.", + "description": "Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`.\n\n`lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions.\n- `lhs_feature` % `feature_group_count` == 0\n- `lhs_feature` % `rhs_input_feature` == 0\n- `lhs_feature` / `feature_group_count` == `rhs_input_feature`\n- `rhs_output_feature` % `feature_group_count` == 0\n- `lhs_batch` % `batch_group_count` == 0\n- `rhs_output_feature` % `batch_group_count` == 0\n\n`lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula:\n```\nquantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val)\n```\n`output` is also quantized, using the same formula.\nIf `rhs` is per-tensor quantized, `output` must be also per-tensor quantized.", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of `lhs` and `rhs` input `Tensor`. Must be one of the following: `qint8`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of `output` `Tensor`. Must be one of the following: `qint32`." + }, + { + "name": "window_strides", + "type": "int64[]", + "description": "The stride of the sliding window for each spatial dimension of `lhs`.\nMust be an empty list (default) or a list of size (number of spatial dimensions).\nIf an empty list is provided, the stride for each spatial dimension is set to 1.", + "default": [] + }, + { + "name": "padding", + "type": "string", + "description": "string from: `\"SAME\"`, `\"VALID\"`, or `\"EXPLICIT\"`, indicating the type of padding algorithm to use." + }, + { + "name": "explicit_padding", + "type": "int64[]", + "description": "If `padding` is `\"EXPLICIT\"`, must be set as a list indicating\nthe explicit paddings at the start and end of each `lhs` spatial dimension.\nOtherwise, this must be empty.\n\n(If used,) Must be a list of size `2 * (number of lhs spatial dimensions)`,\nwhere `(explicit_padding[2 * i], explicit_padding[2 * i + 1])` indicates\n`(start_padding, end_padding)` of `spatial_dimensions[i]`.", + "default": [] + }, + { + "name": "lhs_dilation", + "type": "int64[]", + "description": "The dilation factor to apply in each spatial dimension of `lhs`.\nMust be an empty list (default) or a list of size (number of `lhs` spatial dimensions).\nIf empty list, the dilation for each `lhs` spatial dimension is set to 1.", + "default": [] + }, + { + "name": "rhs_dilation", + "type": "int64[]", + "description": "The dilation factor to apply in each spatial dimension of `rhs`.\nMust be an empty list (default) or a list of size (number of `rhs` spatial dimensions).\nIf empty list, the dilation for each `rhs` spatial dimension is set to 1.", + "default": [] + }, + { + "name": "batch_group_count", + "type": "int64", + "description": "The number of batch groups. Used for grouped filters.\nMust be a divisor of `output_feature`.", + "default": 1 + }, + { + "name": "feature_group_count", + "type": "int64", + "description": "The number of feature groups. Used for grouped convolutions.\nMust be a divisor of both `lhs_feature` and `output_feature`.", + "default": 1 + }, + { + "name": "dimension_numbers", + "type": "string", + "description": "Structure of dimension information for the convolution op.\nMust be an empty string (default) or a serialized string of `tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr` proto.\nIf empty string, the default is `(\"NCHW\", \"OIHW\", \"NCHW\")` (for a 2D convolution).", + "default": "" + }, + { + "name": "lhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `lhs`, only per-tensor quantization is supported.\nThus, this must be set to -1.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "lhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `lhs`.\nFor example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "lhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `lhs`.\nFor example, if `Tin` is `qint8`, this must be set to 127." + }, + { + "name": "rhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `rhs`, only per-tensor quantization\nor per-channel quantization along `kernel_output_feature_dimension` is supported.\nThus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "rhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `rhs`.\nFor example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "rhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `rhs`.\nFor example, if `Tin` is `qint8`, this must be set to 127." + }, + { + "name": "output_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported.\nThus, this must be set to -1 or `dimension_numbers.output_feature_dimension`.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "output_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `output`.\nFor example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "output_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `output`.\nFor example, if `Tout` is `qint8`, this must be set to 127." + } + ], + "inputs": [ + { + "name": "lhs", + "description": "Must be a quantized tensor, rank >= 3.", + "typeAttr": "Tin" + }, + { + "name": "rhs", + "description": "Must be a quantized tensor, same rank as `lhs`.", + "typeAttr": "Tin" + }, + { + "name": "lhs_scales", + "description": "The float value(s) used as scale factors when quantizing the original data that `lhs` represents.\nMust be a scalar `Tensor` (`lhs` supports only per-tensor quantization).", + "type": 1 + }, + { + "name": "lhs_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that `lhs` represents.\nSame shape condition as `lhs_scales`.", + "type": 3 + }, + { + "name": "rhs_scales", + "description": "The float value(s) used as scale factors when quantizing the original data that `rhs` represents.\nMust be a scalar `Tensor` for per-tensor quantization,\nor 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization.", + "type": 1 + }, + { + "name": "rhs_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that `rhs` represents.\nSame shape condition as `rhs_scales`.", + "type": 3 + }, + { + "name": "output_scales", + "description": "The float value(s) to use as scale factors when quantizing original data that `output` represents.\nMust be a scalar `Tensor` for per-tensor quantization,\nor 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)`\n- which is equal to `output.dim_size(output_feature_dimension)`,\nfor per-channel quantization.\nIf `rhs` is per-tensor quantized, output must be also per-tensor quantized.\nThis means that if `rhs_scales` and `rhs_zero_points` are scalar `Tensor`s, `output_scales` and `output_zero_points` must be scalar `Tensor`s as well.", + "type": 1 + }, + { + "name": "output_zero_points", + "description": "The int32 value(s) used as zero points when quantizing original data that output represents.\nSame shape condition as `output_scales`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output quantized tensor of `Tout`, same rank as `lhs` and `rhs`.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformQuantizedConvolutionHybrid", + "summary": "Perform hybrid quantized convolution of float Tensor `lhs` and quantized Tensor `rhs`.", + "description": "Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`,\nand then performs quantized convolution on quantized `lhs` and `rhs`.\n\nThe internal quantization on `lhs` is a quantization to `Trhs`, dynamic range,\nper-batch (per-axis along axis `dimension_numbers.input_batch_dimension`), asymmetric,\nand not narrow range (the range is [Trhs_MIN, Trhs_MAX]).\n\n`lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions.\n- lhs_feature % feature_group_count == 0\n- lhs_feature % rhs_input_feature == 0\n- lhs_feature / feature_group_count == rhs_input_feature\n- rhs_output_feature % feature_group_count == 0\n- lhs_batch % batch_group_count == 0\n- rhs_output_feature % batch_group_count == 0\n\n`rhs` must be quantized Tensor, where its data value is quantized using the formula:\nquantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).", + "attributes": [ + { + "name": "Tlhs", + "type": "type", + "description": "The type of `lhs` input Tensor. Must be one of the following: `float32`." + }, + { + "name": "Trhs", + "type": "type", + "description": "The type of `rhs` (quantized) input Tensor. Must be one of the following: `qint8`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. Must be one of the following: `float32`." + }, + { + "name": "window_strides", + "type": "int64[]", + "description": "The stride of the sliding window for each spatial dimension of `lhs`.\nMust be an empty list (default) or a list of size (number of spatial dimensions).\nIf an empty list is provided, the stride for each spatial dimension is set to 1.", + "default": [] + }, + { + "name": "padding", + "type": "string", + "description": "string from: `\"SAME\"`, `\"VALID\"`, or `\"EXPLICIT\"`, indicating the type of padding algorithm to use." + }, + { + "name": "explicit_padding", + "type": "int64[]", + "description": "If `padding` Attr is `\"EXPLICIT\"`, must be set as a list indicating\nthe explicit paddings at the start and end of each lhs spatial dimension.\nOtherwise, this Attr is must be empty.\n\n(If used,) Must be a list of size 2 * (number of lhs spatial dimensions),\nwhere (explicit_padding[2 * i], explicit_padding[2 * i + 1]) indicates\nspatial_dimensions[i] (start_padding, end_padding).", + "default": [] + }, + { + "name": "lhs_dilation", + "type": "int64[]", + "description": "The dilation factor to apply in each spatial dimension of `lhs`.\nMust be an empty list (default) or a list of size (number of lhs spatial dimensions).\nIf empty list, the dilation for each lhs spatial dimension is set to 1.", + "default": [] + }, + { + "name": "rhs_dilation", + "type": "int64[]", + "description": "The dilation factor to apply in each spatial dimension of `rhs`.\nMust be an empty list (default) or a list of size (number of rhs spatial dimensions).\nIf empty list, the dilation for each rhs spatial dimension is set to 1.", + "default": [] + }, + { + "name": "batch_group_count", + "type": "int64", + "description": "The number of batch groups. Used for grouped filters.\nMust be a divisor of output_feature.", + "default": 1 + }, + { + "name": "feature_group_count", + "type": "int64", + "description": "The number of feature groups. Used for grouped convolutions.\nMust be a divisor of both lhs_feature and output_feature.", + "default": 1 + }, + { + "name": "dimension_numbers", + "type": "string", + "description": "Structure of dimension information for the convolution op.\nMust be an empty string (default) or a serialized string of tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr proto.\nIf empty string, the default is `(\"NCHW\", \"OIHW\", \"NCHW\")` (for a 2D convolution).", + "default": "" + }, + { + "name": "rhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor the `rhs`, only per-tensor quantization\nor per-channel quantization along kernel_output_feature_dimension is supported.\nThus, this attribute must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`.\nOther values will raise error at OpKernel construction.", + "default": -1 + }, + { + "name": "rhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in `rhs`.\nFor example, if `Trhs` is qint8, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "rhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in `rhs`.\nFor example, if `Trhs` is qint8, this must be set to 127." + } + ], + "inputs": [ + { + "name": "lhs", + "description": "Must be a non-quantized Tensor of `Tlhs`, rank >= 3.", + "typeAttr": "Tlhs" + }, + { + "name": "rhs", + "description": "Must be a quantized Tensor of `Trhs`, same rank as `lhs`.", + "typeAttr": "Trhs" + }, + { + "name": "rhs_scales", + "description": "The float value(s) used as scale factors when quantizing the original data that `rhs` represents.\nMust be a scalar Tensor for per-tensor quantization,\nor 1D Tensor of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization.", + "type": 1 + }, + { + "name": "rhs_zero_points", + "description": "The int32 value(s) used as zero_point when quantizing original data that `rhs` represents.\nSame shape condition as `rhs_scales`.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output Tensor of `Tout`, same rank as `lhs` and `rhs`.\nThe output data is the non-quantized output data.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformQuantizedDot", + "summary": "Perform quantized dot of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`.", + "description": "Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`.\n`lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0).\n`lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula:\nquantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).\n`output` is also quantized, using the same formula.\nIf `rhs` is per-tensor quantized, `output` must be also per-tensor quantized.", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of lhs and rhs input Tensor. Must be one of the following: `qint8`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. Must be one of the following: `qint32`." + }, + { + "name": "lhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor dot op lhs, only per-tensor quantization is supported.\nThus, this attribute must be set to -1. Other values are rejected.", + "default": -1 + }, + { + "name": "lhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in lhs.\nFor example, if Tin is qint8, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "lhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in rhs.\nFor example, if Tin is qint8, this must be set to 127." + }, + { + "name": "rhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported.\nThus, this attribute must be set to -1 or 1. Other values are rejected.", + "default": -1 + }, + { + "name": "rhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in rhs.\nFor example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "rhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in rhs.\nFor example, if Trhs is qint8, this must be set to 127." + }, + { + "name": "output_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor dot op output, only per-tensor quantization or per-channel quantization along dimension 1 is supported.\nThus, this attribute must be set to -1 or 1. Other values are rejected.", + "default": -1 + }, + { + "name": "output_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in output.\nFor example, if Tout is qint8, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "output_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in output.\nFor example, if Tout is qint8, this must be set to 127." + } + ], + "inputs": [ + { + "name": "lhs", + "description": "Must be a 2D Tensor of Tin.", + "typeAttr": "Tin" + }, + { + "name": "rhs", + "description": "Must be a 2D Tensor of Tin.", + "typeAttr": "Tin" + }, + { + "name": "lhs_scales", + "description": "The float value(s) used as scale when quantizing original data that lhs represents.\nMust be a scalar Tensor (lhs supports only per-tensor quantization).", + "type": 1 + }, + { + "name": "lhs_zero_points", + "description": "The int32 value(s) used as zero_point when quantizing original data that lhs represents.\nSame shape condition as lhs_scales.", + "type": 3 + }, + { + "name": "rhs_scales", + "description": "The float value(s) used as scale when quantizing original data that rhs represents.\nMust be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization).", + "type": 1 + }, + { + "name": "rhs_zero_points", + "description": "The int32 value(s) used as zero_point when quantizing original data that rhs represents.\nSame shape condition as rhs_scales.", + "type": 3 + }, + { + "name": "output_scales", + "description": "The float value(s) to use as scales when quantizing original data that output represents.\nMust be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (output.dim_size(1),) (per-channel quantization).\nIf rhs is per-tensor quantized, output must be also per-tensor quantized.\nThis means that if rhs_scales and rhs_zero_points are scalar Tensors, output_scales and output_zero_points must be scalar Tensors as well.", + "type": 1 + }, + { + "name": "output_zero_points", + "description": "The int32 value(s) used as zero_point when quantizing original data that output represents.\nSame shape condition as rhs_scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output 2D Tensor of Tout, whose shape is (lhs.dim_size(0), rhs.dim_size(1)).", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformQuantizedDotHybrid", + "summary": "Perform hybrid quantized dot of float Tensor `lhs` and quantized Tensor `rhs`.", + "description": "Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, and then performs quantized dot on quantized lhs and `rhs`.\nThe internal quantization on `lhs` is a quantization to qint8, dynamic range, per-batch (per-axis along axis 0), asymmetric, and not narrow range (the range is [-128, 127]).\n`lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0).\n`rhs` must be quantized Tensor, where its data value is quantized using the formula:\nquantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).", + "attributes": [ + { + "name": "Tlhs", + "type": "type", + "description": "The type of lhs input Tensor. Must be one of the following: `float32`." + }, + { + "name": "Trhs", + "type": "type", + "description": "The type of rhs (quantized) input Tensor. Must be one of the following: `qint8`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. Must be one of the following: `float32`." + }, + { + "name": "rhs_quantization_axis", + "type": "int64", + "description": "Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization.\nFor dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported.\nThus, this attribute must be set to -1 or 1. Other values are rejected.", + "default": -1 + }, + { + "name": "rhs_quantization_min_val", + "type": "int64", + "description": "The min value of the quantized data stored in rhs.\nFor example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "rhs_quantization_max_val", + "type": "int64", + "description": "The max value of the quantized data stored in rhs.\nFor example, if Trhs is qint8, this must be set to 127." + } + ], + "inputs": [ + { + "name": "lhs", + "description": "Must be a 2D Tensor of Tlhs.", + "typeAttr": "Tlhs" + }, + { + "name": "rhs", + "description": "Must be a 2D Tensor of Trhs.", + "typeAttr": "Trhs" + }, + { + "name": "rhs_scales", + "description": "The float value(s) used as scale when quantizing original data that rhs represents.\nMust be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization).", + "type": 1 + }, + { + "name": "rhs_zero_points", + "description": "The int32 value(s) used as zero_point when quantizing original data that rhs represents.\nSame shape condition as rhs_scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output 2D Tensor of Tout, whose shape is (lhs.dim_size(0), rhs.dim_size(1)).\nThe output data is the original output data itself (Not quantized).", + "typeAttr": "Tout" + } + ] + }, + { + "name": "UniformRequantize", + "summary": "Given quantized tensor `input`, requantize it with new quantization parameters.", + "description": "Given quantized tensor `input`, which was quantized using {input_scales, input_zero_points, input_quantization_axis, input_quantization_min_val, input_quantization_max_val},\nrequantize it to a tensor, which is quantized using {output_scales, output_zero_points, output_quantization_axis, output_quantization_min_val, output_quantization_max_val}.\nThe requantization is done by using the formula:\noutput_quantized_data = clip(\n (input_quantized_data - input_zero_point) * (input_scale / output_scale) + output_zero_point,\n output_quantization_min_val,\n output_quantization_max_val)\n\nPer-tensor and per-axis quantization supported cases are followings:\n* per-tensor -> per-tensor\n* per-tensor -> per-axis\n* per-axis -> per-axis where input_quantization_axis equals output_quantization_axis.\ni.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.", + "attributes": [ + { + "name": "Tin", + "type": "type", + "description": "The type of input Tensor. A tf.DType from: tf.qint8, tf.qint32 Must be one of the following: `qint8`, `qint32`." + }, + { + "name": "Tout", + "type": "type", + "description": "The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 Must be one of the following: `qint8`, `qint32`." + }, + { + "name": "input_quantization_axis", + "type": "int64", + "description": "The quantization axis that was used when quantizing original data that `input` represents.\nIndicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.\nIf set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).", + "default": -1 + }, + { + "name": "input_quantization_min_val", + "type": "int64", + "description": "The quantization min value that was used when quantizing original data that `input` represents.\nThe purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:\n`(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.\nFor example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not." + }, + { + "name": "input_quantization_max_val", + "type": "int64", + "description": "The quantization max value that was used when quantizing original data that `input` represents.\nThe purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:\n`(Tout max)` for both narrow range and not narrow range.\nFor example, if Tin is qint8, this is set to 127." + }, + { + "name": "output_quantization_axis", + "type": "int64", + "description": "The new quantization axis to use to quantize original data that `input` represents.", + "default": -1 + }, + { + "name": "output_quantization_min_val", + "type": "int64", + "description": "The new quantization min value to quantize original data that `input` represents." + }, + { + "name": "output_quantization_max_val", + "type": "int64", + "description": "The new quantization max value to quantize original data that `input` represents." + } + ], + "inputs": [ + { + "name": "input", + "description": "Must be a Tensor of Tin.", + "typeAttr": "Tin" + }, + { + "name": "input_scales", + "description": "The float value(s) used as scale(s) when quantizing original data that `input` represents.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).", + "type": 1 + }, + { + "name": "input_zero_points", + "description": "The int32 value(s) used as zero_point(s) when quantizing original data that `input` represents.\nSame shape condition as scales.", + "type": 3 + }, + { + "name": "output_scales", + "description": "The float value(s) to use as new scale(s) to quantize original data that `input` represents.\nMust be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).", + "type": 1 + }, + { + "name": "output_zero_points", + "description": "The int32 value(s) to use as new zero_point(s) to quantize original data that `input` represents.\nSame shape condition as scales.", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "description": "The output quantized Tensor of Tout, whose shape is same as input.", + "typeAttr": "Tout" + } + ] + }, + { + "name": "Unique", + "summary": "Finds unique elements in a 1-D tensor.", + "description": "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`; `x` does not need to be sorted.\nThis operation also returns a tensor `idx` the same size as `x` that contains\nthe index of each value of `x` in the unique output `y`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nExamples:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx = unique(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n```\n\n```\n# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]\ny, idx = unique(x)\ny ==> [4, 5, 1, 2, 3]\nidx ==> [0, 1, 2, 3, 4, 4, 0, 1]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_idx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "1-D.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "1-D.", + "typeAttr": "T" + }, + { + "name": "idx", + "description": "1-D.", + "typeAttr": "out_idx" + } + ] + }, + { + "name": "UniqueDataset", + "summary": "Creates a dataset that contains the unique elements of `input_dataset`.", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "UniqueV2", + "summary": "Finds unique elements along an axis of a tensor.", + "description": "This operation either returns a tensor `y` containing unique elements\nalong the `axis` of a tensor. The returned unique elements is sorted\nin the same order as they occur along `axis` in `x`.\nThis operation also returns a tensor `idx` that is the same size as\nthe number of the elements in `x` along the `axis` dimension. It\ncontains the index in the unique output `y`.\nIn other words, for an `1-D` tensor `x` with `axis = None:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx = unique(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n```\n\nFor an `2-D` tensor `x` with `axis = 0`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx = unique(x, axis=0)\ny ==> [[1, 0, 0],\n [2, 0, 0]]\nidx ==> [0, 0, 1]\n```\n\nFor an `2-D` tensor `x` with `axis = 1`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx = unique(x, axis=1)\ny ==> [[1, 0],\n [1, 0],\n [2, 0]]\nidx ==> [0, 1, 1]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Taxis", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "out_idx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor`.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "A `Tensor` of type `int32` (default: None). The axis of the Tensor to\nfind the unique elements.", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor`. Unique elements along the `axis` of `Tensor` x.", + "typeAttr": "T" + }, + { + "name": "idx", + "description": "A 1-D Tensor. Has the same type as x that contains the index of each\nvalue of x in the output y.", + "typeAttr": "out_idx" + } + ] + }, + { + "name": "UniqueWithCounts", + "summary": "Finds unique elements in a 1-D tensor.", + "description": "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`. This operation also returns a\ntensor `idx` the same size as `x` that contains the index of each value of `x`\nin the unique output `y`. Finally, it returns a third tensor `count` that\ncontains the count of each element of `y` in `x`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx, count = unique_with_counts(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\ncount ==> [2, 1, 3, 1, 2]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_idx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "1-D.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "description": "1-D.", + "typeAttr": "T" + }, + { + "name": "idx", + "description": "1-D.", + "typeAttr": "out_idx" + }, + { + "name": "count", + "description": "1-D.", + "typeAttr": "out_idx" + } + ] + }, + { + "name": "UniqueWithCountsV2", + "summary": "Finds unique elements along an axis of a tensor.", + "description": "This operation either returns a tensor `y` containing unique elements\nalong the `axis` of a tensor. The returned unique elements is sorted\nin the same order as they occur along `axis` in `x`.\nThis operation also returns a tensor `idx` and a tensor `count`\nthat are the same size as the number of the elements in `x` along the\n`axis` dimension. The `idx` contains the index in the unique output `y`\nand the `count` contains the count in the unique output `y`.\nIn other words, for an `1-D` tensor `x` with `axis = None:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\nx = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])\ny, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis = [0])\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\ncount ==> [2, 1, 3, 1, 2]\n```\n\nFor a `2-D` tensor `x` with `axis = 0`:\n\n```\nx = tf.constant([[1, 0, 0],\n [1, 0, 0],\n [2, 0, 0]])\ny, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis=[0])\ny ==> [[1, 0, 0],\n [2, 0, 0]]\nidx ==> [0, 0, 1]\ncount ==> [2, 1]\n```\n\nFor a `2-D` tensor `x` with `axis = 1`:\n\n```\nx = tf.constant([[1, 0, 0],\n [1, 0, 0],\n [2, 0, 0]])\ny, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis=[1])\ny ==> [[1, 0],\n [1, 0],\n [2, 0]]\nidx ==> [0, 1, 1]\ncount ==> [1, 2]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "Taxis", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 9 + } + }, + { + "name": "out_idx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "x", + "description": "A `Tensor`.", + "typeAttr": "T" + }, + { + "name": "axis", + "description": "A `Tensor` of type `int32` (default: None). The axis of the Tensor to\nfind the unique elements.", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "name": "y", + "description": "A `Tensor`. Unique elements along the `axis` of `Tensor` x.", + "typeAttr": "T" + }, + { + "name": "idx", + "description": "A 1-D Tensor. Has the same type as x that contains the index of each\nvalue of x in the output y.", + "typeAttr": "out_idx" + }, + { + "name": "count", + "description": "A 1-D Tensor. The count of each value of x in the output y.", + "typeAttr": "out_idx" + } + ] + }, + { + "name": "Unpack", + "summary": "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.", + "description": "Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\nFor example, given a tensor of shape `(A, B, C, D)`;\n\nIf `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`\n and each tensor in `output` will have shape `(B, C, D)`. (Note that the\n dimension unpacked along is gone, unlike `split`).\n\nIf `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`\n and each tensor in `output` will have shape `(A, C, D)`.\nEtc.\n\nThis is the opposite of `pack`.", + "attributes": [ + { + "name": "num", + "type": "int64", + "minimum": 0 + }, + { + "name": "T", + "type": "type" + }, + { + "name": "axis", + "type": "int64", + "description": "Dimension along which to unpack. Negative values wrap around, so the\nvalid range is `[-R, R)`.", + "default": 0 + } + ], + "inputs": [ + { + "name": "value", + "description": "1-D or higher, with `axis` dimension size equal to `num`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "The list of tensors unpacked from `value`.", + "numberAttr": "num", + "typeAttr": "T" + } + ] + }, + { + "name": "UnravelIndex", + "summary": "Converts an array of flat indices into a tuple of coordinate arrays.", + "description": "\nExample:\n\n```\ny = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])\n# 'dims' represent a hypothetical (3, 3) tensor of indices:\n# [[0, 1, *2*],\n# [3, 4, *5*],\n# [6, *7*, 8]]\n# For each entry from 'indices', this operation returns\n# its coordinates (marked with '*'), such as\n# 2 ==> (0, 2)\n# 5 ==> (1, 2)\n# 7 ==> (2, 1)\ny ==> [[0, 1, 2], [2, 2, 1]]\n```\n\n@compatibility(numpy)\nEquivalent to np.unravel_index\n@end_compatibility", + "attributes": [ + { + "name": "Tidx", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "indices", + "description": "An 0-D or 1-D `int` Tensor whose elements are indices into the\nflattened version of an array of dimensions dims.", + "typeAttr": "Tidx" + }, + { + "name": "dims", + "description": "An 1-D `int` Tensor. The shape of the array to use for unraveling\nindices.", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "description": "An 2-D (or 1-D if indices is 0-D) tensor where each row has the\nsame shape as the indices array.", + "typeAttr": "Tidx" + } + ] + }, + { + "name": "UnsortedSegmentJoin", + "attributes": [ + { + "name": "separator", + "type": "string", + "default": "" + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "inputs", + "type": 7 + }, + { + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ] + }, + { + "name": "UnsortedSegmentMax", + "summary": "Computes the maximum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to `tf.math.unsorted_segment_sum`,\nInstead of computing the sum over segments, it computes the maximum such that:\n\n\\\\(output_i = \\max_{j...} data[j...]\\\\) where max is over tuples `j...` such\nthat `segment_ids[j...] == i`.\n\nIf the maximum is empty for a given segment ID `i`, it outputs the smallest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::lowest()`.\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.\n\nCaution: On CPU, values in `segment_ids` are always validated to be less than\n`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this\ndoes not throw an error for out-of-bound indices. On Gpu, out-of-bound indices\nresult in safe but unspecified behavior, which may include ignoring\nout-of-bound indices or outputting a tensor with a 0 stored in the first\ndimension of its shape if `num_segments` is 0.\n\n
\n\n
\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\n>>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy()\narray([[4, 3, 3, 4],\n [5, 6, 7, 8]], dtype=int32)\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A tensor whose shape is a prefix of `data.shape`.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be in range on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "UnsortedSegmentMin", + "summary": "Computes the minimum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to `tf.math.unsorted_segment_sum`,\nInstead of computing the sum over segments, it computes the minimum such that:\n\n\\\\(output_i = \\min_{j...} data_[j...]\\\\) where min is over tuples `j...` such\nthat `segment_ids[j...] == i`.\n\nIf the minimum is empty for a given segment ID `i`, it outputs the largest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::max()`.\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\n>>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy()\narray([[1, 2, 2, 1],\n [5, 6, 7, 8]], dtype=int32)\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.\n\nCaution: On CPU, values in `segment_ids` are always validated to be less than\n`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this\ndoes not throw an error for out-of-bound indices. On Gpu, out-of-bound indices\nresult in safe but unspecified behavior, which may include ignoring\nout-of-bound indices or outputting a tensor with a 0 stored in the first\ndimension of its shape if `num_segments` is 0.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A tensor whose shape is a prefix of `data.shape`.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be in range on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "UnsortedSegmentProd", + "summary": "Computes the product along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to `tf.math.unsorted_segment_sum`,\nInstead of computing the sum over segments, it computes the product of all\nentries belonging to a segment such that:\n\n\\\\(output_i = \\prod_{j...} data[j...]\\\\) where the product is over tuples\n`j...` such that `segment_ids[j...] == i`.\n\nFor example:\n\n>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\n>>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy()\narray([[4, 6, 6, 4],\n [5, 6, 7, 8]], dtype=int32)\n\nIf there is no entry for a given segment ID `i`, it outputs 1.\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.\nCaution: On CPU, values in `segment_ids` are always validated to be less than\n`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this\ndoes not throw an error for out-of-bound indices. On Gpu, out-of-bound indices\nresult in safe but unspecified behavior, which may include ignoring\nout-of-bound indices or outputting a tensor with a 0 stored in the first\ndimension of its shape if `num_segments` is 0.\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A tensor whose shape is a prefix of `data.shape`.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be in range on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "UnsortedSegmentSum", + "summary": "Computes the sum along segments of a tensor.", + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output[i] = \\sum_{j...} data[j...]\\\\) where the sum is over tuples `j...` such\nthat `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\nrange of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\nIf the given segment ID `i` is negative, the value is dropped and will not be\nadded to the sum of the segment.\n\n`num_segments` should equal the number of distinct segment IDs.\n\nCaution: On CPU, values in `segment_ids` are always validated to be less than\n`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this\ndoes not throw an error for out-of-bound indices. On Gpu, out-of-bound indices\nresult in safe but unspecified behavior, which may include ignoring\nout-of-bound indices or outputting a tensor with a 0 stored in the first\ndimension of its shape if `num_segments` is 0.\n\n
\n\n
\n\n>>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]]\n>>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy()\narray([[5, 5, 5, 5],\n [5, 6, 7, 8]], dtype=int32)\n\n", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`." + }, + { + "name": "Tindices", + "type": "type", + "description": "Must be one of the following: `int16`, `int32`, `int64`." + }, + { + "name": "Tnumsegments", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "name": "segment_ids", + "description": "A tensor whose shape is a prefix of `data.shape`.\nThe values must be less than `num_segments`.\n\nCaution: The values are always validated to be in range on CPU, never validated\non GPU.", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "typeAttr": "T" + } + ] + }, + { + "name": "Unstage", + "summary": "Op is similar to a lightweight Dequeue.", + "description": "The basic functionality is similar to dequeue with many fewer\ncapabilities and options. This Op is optimized for performance.", + "attributes": [ + { + "name": "capacity", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "memory_limit", + "type": "int64", + "minimum": 0, + "default": 0 + }, + { + "name": "dtypes", + "type": "type[]", + "minimum": 1 + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ] + }, + { + "name": "UnwrapDatasetVariant", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "UpperBound", + "summary": "Applies upper_bound(sorted_search_values, values) along each row.", + "description": "Each set of rows with the same index in (sorted_inputs, values) is treated\nindependently. The resulting row is the equivalent of calling\n`np.searchsorted(sorted_inputs, values, side='right')`.\n\nThe result is not a global index to the entire\n`Tensor`, but rather just the index in the last dimension.\n\nA 2-D example:\n sorted_sequence = [[0, 3, 9, 9, 10],\n [1, 2, 3, 4, 5]]\n values = [[2, 4, 9],\n [0, 2, 6]]\n\n result = UpperBound(sorted_sequence, values)\n\n result == [[1, 2, 4],\n [0, 2, 5]]", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "sorted_inputs", + "description": "2-D Tensor where each row is ordered.", + "typeAttr": "T" + }, + { + "name": "values", + "description": "2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\nthe values that will be searched for in `sorted_search_values`.", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A `Tensor` with the same shape as `values`. It contains the last scalar index\ninto the last dimension where values can be inserted without changing the\nordered property.", + "typeAttr": "out_type" + } + ] + }, + { + "name": "VarHandleOp", + "summary": "Creates a handle to a Variable resource.", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "the container this variable is placed in.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "the name by which this variable is referred to.", + "default": "" + }, + { + "name": "debug_name", + "type": "string", + "description": "the user-given name, which still applies in anonymous mode.", + "default": "" + }, + { + "name": "dtype", + "type": "type", + "description": "the type of this variable. Must agree with the dtypes\nof all ops using this variable." + }, + { + "name": "shape", + "type": "shape", + "description": "The (possibly partially specified) shape of this variable." + }, + { + "name": "allowed_devices", + "type": "string[]", + "description": "DEPRECATED. The allowed devices containing the resource variable. Set when the\noutput ResourceHandle represents a per-replica/partitioned resource variable.", + "default": [] + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ] + }, + { + "name": "VarIsInitializedOp", + "summary": "Checks whether a resource handle-based variable has been initialized.", + "inputs": [ + { + "name": "resource", + "description": "the input resource handle.", + "type": 20 + } + ], + "outputs": [ + { + "name": "is_initialized", + "description": "a scalar boolean which is true if the variable has been\ninitialized.", + "type": 10 + } + ] + }, + { + "name": "Variable", + "category": "Control", + "summary": "Use VariableV2 instead.", + "attributes": [ + { + "name": "shape", + "type": "shape" + }, + { + "name": "dtype", + "type": "type" + }, + { + "name": "container", + "type": "string", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "default": "" + } + ], + "outputs": [ + { + "name": "ref", + "typeAttr": "dtype", + "isRef": true + } + ] + }, + { + "name": "VariableShape", + "summary": "Returns the shape of the variable pointed to by `resource`.", + "description": "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```", + "attributes": [ + { + "name": "out_type", + "type": "type", + "description": "Must be one of the following: `int32`, `int64`.", + "default": { + "type": "type", + "value": 3 + } + } + ], + "inputs": [ + { + "name": "input", + "type": 20 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ] + }, + { + "name": "VariableV2", + "category": "Control", + "summary": "Holds state in the form of a tensor that persists across steps.", + "description": "Outputs a ref to the tensor state so it may be read or modified.\nTODO(zhifengc/mrry): Adds a pointer to a more detail document\nabout sharing states in tensorflow.", + "attributes": [ + { + "name": "shape", + "type": "shape", + "description": "The shape of the variable tensor." + }, + { + "name": "dtype", + "type": "type", + "description": "The type of elements in the variable tensor." + }, + { + "name": "container", + "type": "string", + "description": "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "ref", + "description": "A reference to the variable tensor.", + "typeAttr": "dtype", + "isRef": true + } + ] + }, + { + "name": "Where", + "summary": "Returns locations of nonzero / true values in a tensor.", + "description": "This operation returns the coordinates of true elements in `input`. The\ncoordinates are returned in a 2-D tensor where the first dimension (rows)\nrepresents the number of true elements, and the second dimension (columns)\nrepresents the coordinates of the true elements. Keep in mind, the shape of\nthe output tensor can vary depending on how many true values there are in\n`input`. Indices are output in row-major order.\n\nFor example:\n\n```\n# 'input' tensor is [[True, False]\n# [True, False]]\n# 'input' has two true values, so output has two coordinates.\n# 'input' has rank of 2, so coordinates have two indices.\nwhere(input) ==> [[0, 0],\n [1, 0]]\n\n# `input` tensor is [[[True, False]\n# [True, False]]\n# [[False, True]\n# [False, True]]\n# [[False, False]\n# [False, True]]]\n# 'input' has 5 true values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5, 0.0]\n# [-0.5, 0.0]]\n# [[0.0, 0.25]\n# [0.0, 0.75]]\n# [[0.0, 0.0]\n# [0.0, 0.01]]]\n# 'input' has 5 nonzero values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.5j, 0.0 + 0.0j]]\n# [[0.0 + 0.0j, 0.25 + 1.5j]\n# [0.0 + 0.0j, 0.75 + 0.0j]]\n# [[0.0 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.0j, 0.01 + 0.0j]]]\n# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n```", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "default": { + "type": "type", + "value": 10 + } + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "index", + "type": 9 + } + ] + }, + { + "name": "While", + "summary": "output = input; While (Cond(output)) { output = Body(output) }", + "attributes": [ + { + "name": "T", + "type": "type[]", + "description": "dtype in use.", + "minimum": 0 + }, + { + "name": "cond", + "type": "function", + "description": " A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise." + }, + { + "name": "body", + "type": "function", + "description": " A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T." + }, + { + "name": "output_shapes", + "type": "shape[]", + "default": [] + }, + { + "name": "parallel_iterations", + "type": "int64", + "default": 10 + } + ], + "inputs": [ + { + "name": "input", + "description": "A list of input tensors whose types are T.", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "description": "A list of output tensors whose types are T.", + "typeListAttr": "T" + } + ] + }, + { + "name": "WholeFileReader", + "summary": "A Reader that outputs the entire contents of a file as a value.", + "description": "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value).", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 7, + "isRef": true + } + ] + }, + { + "name": "WholeFileReaderV2", + "summary": "A Reader that outputs the entire contents of a file as a value.", + "description": "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value).", + "attributes": [ + { + "name": "container", + "type": "string", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "default": "" + }, + { + "name": "shared_name", + "type": "string", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "default": "" + } + ], + "outputs": [ + { + "name": "reader_handle", + "description": "The handle to reference the Reader.", + "type": 20 + } + ] + }, + { + "name": "WindowDataset", + "summary": " Combines (nests of) input elements into a dataset of (nests of) windows.\n\n A \"window\" is a finite dataset of flat elements of size `size` (or possibly\n fewer if there are not enough input elements to fill the window and\n `drop_remainder` evaluates to false).\n\n The `shift` argument determines the number of input elements by which\n the window moves on each iteration. The first element in the `k`th window\n will be element\n\n ```\n 1 + (k-1) * shift\n ```\n\n of the input dataset. In particular, the first element of the first window\n will always be the first element of the input dataset. \n\n If the `stride` parameter is greater than 1, then each window will skip\n `(stride - 1)` input elements between each element that appears in the\n window. Output windows will still contain `size` elements regardless of\n the value of `stride`.\n\n The `stride` argument determines the stride of the input elements, and the\n `shift` argument determines the shift of the window.\n\n For example, letting `{...}` to represent a Dataset:\n\n - `tf.data.Dataset.range(7).window(2)` produces\n `{{0, 1}, {2, 3}, {4, 5}, {6}}`\n - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces\n `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`\n - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces\n `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`\n\n Note that when the `window` transformation is applied to a dataset of\n nested elements, it produces a dataset of nested windows.\n\n For example:\n\n - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`\n produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`\n - `tf.data.Dataset.from_tensor_slices({\"a\": range(4)}).window(2)`\n produces `{{\"a\": {0, 1}}, {\"a\": {2, 3}}}`", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "metadata", + "type": "string", + "default": "" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "size", + "description": "An integer scalar, representing the number of elements\nof the input dataset to combine into a window. Must be positive.", + "type": 9 + }, + { + "name": "shift", + "description": "An integer scalar, representing the number of input elements\nby which the window moves in each iteration. Defaults to `size`.\nMust be positive.", + "type": 9 + }, + { + "name": "stride", + "description": "An integer scalar, representing the stride of the input elements\nin the sliding window. Must be positive. The default value of 1 means\n\"retain every input element\".", + "type": 9 + }, + { + "name": "drop_remainder", + "description": "A Boolean scalar, representing whether the last window should be\ndropped if its size is smaller than `window_size`.", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "WindowOp", + "attributes": [ + { + "name": "output_types", + "type": "type[]", + "minimum": 1 + }, + { + "name": "output_shapes", + "type": "shape[]", + "minimum": 1 + }, + { + "name": "Tinputs", + "type": "type[]", + "minimum": 1 + } + ], + "inputs": [ + { + "name": "inputs", + "typeListAttr": "Tinputs" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + }, + { + "name": "WorkerHeartbeat", + "summary": "Worker heartbeat op.", + "description": "Heartbeats may be sent periodically to indicate the coordinator is still active,\nto retrieve the current worker status and to expedite shutdown when necessary.", + "inputs": [ + { + "name": "request", + "description": "A string tensor containing a serialized WorkerHeartbeatRequest", + "type": 7 + } + ], + "outputs": [ + { + "name": "response", + "description": "A string tensor containing a serialized WorkerHeartbeatResponse", + "type": 7 + } + ] + }, + { + "name": "WrapDatasetVariant", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + }, + { + "name": "WriteAudioSummary", + "summary": "Writes an audio summary.", + "description": "Writes encoded audio summary `tensor` at `step` with `tag` using summary `writer`.\n`sample_rate` is the audio sample rate is Hz.", + "attributes": [ + { + "name": "max_outputs", + "type": "int64", + "minimum": 1, + "default": 3 + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "tensor", + "type": 1 + }, + { + "name": "sample_rate", + "type": 1 + } + ] + }, + { + "name": "WriteFile", + "summary": "Writes `contents` to the file at input `filename`.", + "description": "Creates the file and recursively creates directory if it does not exist.", + "inputs": [ + { + "name": "filename", + "description": "scalar. The name of the file to which we write the contents.", + "type": 7 + }, + { + "name": "contents", + "description": "scalar. The content to be written to the output file.", + "type": 7 + } + ] + }, + { + "name": "WriteGraphSummary", + "summary": "Writes a graph summary.", + "description": "Writes TensorFlow graph `tensor` at `step` using summary `writer`.", + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "type": 7 + } + ] + }, + { + "name": "WriteHistogramSummary", + "summary": "Writes a histogram summary.", + "description": "Writes histogram `values` at `step` with `tag` using summary `writer`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `bool`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "values", + "typeAttr": "T" + } + ] + }, + { + "name": "WriteImageSummary", + "summary": "Writes an image summary.", + "description": "Writes image `tensor` at `step` with `tag` using summary `writer`.\n`tensor` is image with shape [height, width, channels].", + "attributes": [ + { + "name": "max_images", + "type": "int64", + "minimum": 1, + "default": 3 + }, + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `uint8`, `float64`, `float32`, `float16`.", + "default": { + "type": "type", + "value": 1 + } + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "bad_color", + "type": 4 + } + ] + }, + { + "name": "WriteRawProtoSummary", + "summary": "Writes a serialized proto summary.", + "description": "Writes `tensor`, a serialized proto at `step` using summary `writer`.", + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "type": 7 + } + ] + }, + { + "name": "WriteScalarSummary", + "summary": "Writes a scalar summary.", + "description": "Writes scalar `value` at `step` with `tag` using summary `writer`.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`." + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + } + ] + }, + { + "name": "WriteSummary", + "summary": "Writes a tensor summary.", + "description": "Writes `tensor` at `step` with `tag` using summary `writer`.", + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "summary_metadata", + "type": 7 + } + ] + }, + { + "name": "Xdivy", + "summary": "Returns 0 if x == 0, and x / y otherwise, elementwise.", + "attributes": [ + { + "name": "T", + "type": "type", + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `complex64`, `complex128`." + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ] + }, + { + "name": "XlaConcatND", + "summary": "Concats input tensor across all dimensions.", + "description": "An op which merges slices the input tensor based on the given num_splits\nattribute, strips paddings optionally, and returns the merged tensor without\npaddings.\n\nThis op may be generated via the TPU bridge.\n\nFor example, with `input` tensor:\n```\n[[0, 1],\n [4, 5]]\n[[2, 3],\n [6, 7]]\n[[8, 9],\n [12, 13]]\n[[10, 11],\n [14, 15]]\n```\n`num_splits`:\n```\n[2, 2]\n```\nand `paddings`:\n```\n[1, 1]\n```\nthe expected `outputs` is:\n```\n[[0, 1, 2],\n [4, 5, 6],\n [8, 9, 10]]\n```", + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "name": "N", + "type": "int64", + "minimum": 1 + }, + { + "name": "num_concats", + "type": "int64[]", + "description": "Number of ways to merge per dimension." + }, + { + "name": "paddings", + "type": "int64[]", + "description": "Optional list of right paddings per dimension to strip from the final merged\ntensor. These paddings must not exceed the dimension size of the merged result\nprior to stripping paddings.", + "default": [] + } + ], + "inputs": [ + { + "name": "inputs", + "description": "Input tensor slices in row-major order to merge across all dimensions. All\ninputs must have the same shape.\n }\n out_arg {\n name: \"output\"\n description: <>> 3) { + case 1: + message.saved_model_schema_version = reader.int64(); + break; + case 2: + message.meta_graphs.push($root.tensorflow.MetaGraphDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedModel(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "saved_model_schema_version": + message.saved_model_schema_version = reader.int64(); + break; + case "meta_graphs": + message.meta_graphs.push($root.tensorflow.MetaGraphDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedModel.prototype.saved_model_schema_version = protobuf.Int64.create(0); + +$root.tensorflow.MetaGraphDef = class MetaGraphDef { + + constructor() { + this.collection_def = {}; + this.signature_def = {}; + this.asset_file_def = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.MetaGraphDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta_info_def = $root.tensorflow.MetaGraphDef.MetaInfoDef.decode(reader, reader.uint32()); + break; + case 2: + message.graph_def = $root.tensorflow.GraphDef.decode(reader, reader.uint32()); + break; + case 3: + message.saver_def = $root.tensorflow.SaverDef.decode(reader, reader.uint32()); + break; + case 4: + reader.entry(message.collection_def, () => reader.string(), () => $root.tensorflow.CollectionDef.decode(reader, reader.uint32())); + break; + case 5: + reader.entry(message.signature_def, () => reader.string(), () => $root.tensorflow.SignatureDef.decode(reader, reader.uint32())); + break; + case 6: + message.asset_file_def.push($root.tensorflow.AssetFileDef.decode(reader, reader.uint32())); + break; + case 7: + message.object_graph_def = $root.tensorflow.SavedObjectGraph.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.MetaGraphDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "meta_info_def": + message.meta_info_def = $root.tensorflow.MetaGraphDef.MetaInfoDef.decodeText(reader); + break; + case "graph_def": + message.graph_def = $root.tensorflow.GraphDef.decodeText(reader); + break; + case "saver_def": + message.saver_def = $root.tensorflow.SaverDef.decodeText(reader); + break; + case "collection_def": + reader.entry(message.collection_def, () => reader.string(), () => $root.tensorflow.CollectionDef.decodeText(reader)); + break; + case "signature_def": + reader.entry(message.signature_def, () => reader.string(), () => $root.tensorflow.SignatureDef.decodeText(reader)); + break; + case "asset_file_def": + message.asset_file_def.push($root.tensorflow.AssetFileDef.decodeText(reader)); + break; + case "object_graph_def": + message.object_graph_def = $root.tensorflow.SavedObjectGraph.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.MetaGraphDef.prototype.meta_info_def = null; +$root.tensorflow.MetaGraphDef.prototype.graph_def = null; +$root.tensorflow.MetaGraphDef.prototype.saver_def = null; +$root.tensorflow.MetaGraphDef.prototype.object_graph_def = null; + +$root.tensorflow.MetaGraphDef.MetaInfoDef = class MetaInfoDef { + + constructor() { + this.tags = []; + this.function_aliases = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.MetaGraphDef.MetaInfoDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta_graph_version = reader.string(); + break; + case 2: + message.stripped_op_list = $root.tensorflow.OpList.decode(reader, reader.uint32()); + break; + case 3: + message.any_info = $root.google.protobuf.Any.decode(reader, reader.uint32()); + break; + case 4: + message.tags.push(reader.string()); + break; + case 5: + message.tensorflow_version = reader.string(); + break; + case 6: + message.tensorflow_git_version = reader.string(); + break; + case 7: + message.stripped_default_attrs = reader.bool(); + break; + case 8: + reader.entry(message.function_aliases, () => reader.string(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.MetaGraphDef.MetaInfoDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "meta_graph_version": + message.meta_graph_version = reader.string(); + break; + case "stripped_op_list": + message.stripped_op_list = $root.tensorflow.OpList.decodeText(reader); + break; + case "any_info": + message.any_info = $root.google.protobuf.Any.decodeText(reader); + break; + case "tags": + reader.array(message.tags, () => reader.string()); + break; + case "tensorflow_version": + message.tensorflow_version = reader.string(); + break; + case "tensorflow_git_version": + message.tensorflow_git_version = reader.string(); + break; + case "stripped_default_attrs": + message.stripped_default_attrs = reader.bool(); + break; + case "function_aliases": + reader.entry(message.function_aliases, () => reader.string(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.meta_graph_version = ""; +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.stripped_op_list = null; +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.any_info = null; +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.tensorflow_version = ""; +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.tensorflow_git_version = ""; +$root.tensorflow.MetaGraphDef.MetaInfoDef.prototype.stripped_default_attrs = false; + +$root.tensorflow.CollectionDef = class CollectionDef { + + constructor() { + } + + get kind() { + $root.tensorflow.CollectionDef.kindSet = $root.tensorflow.CollectionDef.kindSet || new Set([ "node_list", "bytes_list", "int64_list", "float_list", "any_list"]); + return Object.keys(this).find((key) => $root.tensorflow.CollectionDef.kindSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_list = $root.tensorflow.CollectionDef.NodeList.decode(reader, reader.uint32()); + break; + case 2: + message.bytes_list = $root.tensorflow.CollectionDef.BytesList.decode(reader, reader.uint32()); + break; + case 3: + message.int64_list = $root.tensorflow.CollectionDef.Int64List.decode(reader, reader.uint32()); + break; + case 4: + message.float_list = $root.tensorflow.CollectionDef.FloatList.decode(reader, reader.uint32()); + break; + case 5: + message.any_list = $root.tensorflow.CollectionDef.AnyList.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_list": + message.node_list = $root.tensorflow.CollectionDef.NodeList.decodeText(reader); + break; + case "bytes_list": + message.bytes_list = $root.tensorflow.CollectionDef.BytesList.decodeText(reader); + break; + case "int64_list": + message.int64_list = $root.tensorflow.CollectionDef.Int64List.decodeText(reader); + break; + case "float_list": + message.float_list = $root.tensorflow.CollectionDef.FloatList.decodeText(reader); + break; + case "any_list": + message.any_list = $root.tensorflow.CollectionDef.AnyList.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CollectionDef.NodeList = class NodeList { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef.NodeList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef.NodeList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CollectionDef.BytesList = class BytesList { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef.BytesList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push(reader.bytes()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef.BytesList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.bytes()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CollectionDef.Int64List = class Int64List { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef.Int64List(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef.Int64List(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CollectionDef.FloatList = class FloatList { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef.FloatList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.floats(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef.FloatList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CollectionDef.AnyList = class AnyList { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CollectionDef.AnyList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push($root.google.protobuf.Any.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CollectionDef.AnyList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.anyarray(message.value, () => new $root.google.protobuf.Any()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorInfo = class TensorInfo { + + constructor() { + } + + get encoding() { + $root.tensorflow.TensorInfo.encodingSet = $root.tensorflow.TensorInfo.encodingSet || new Set([ "name", "coo_sparse", "composite_tensor"]); + return Object.keys(this).find((key) => $root.tensorflow.TensorInfo.encodingSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 4: + message.coo_sparse = $root.tensorflow.TensorInfo.CooSparse.decode(reader, reader.uint32()); + break; + case 5: + message.composite_tensor = $root.tensorflow.TensorInfo.CompositeTensor.decode(reader, reader.uint32()); + break; + case 2: + message.dtype = reader.int32(); + break; + case 3: + message.tensor_shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "coo_sparse": + message.coo_sparse = $root.tensorflow.TensorInfo.CooSparse.decodeText(reader); + break; + case "composite_tensor": + message.composite_tensor = $root.tensorflow.TensorInfo.CompositeTensor.decodeText(reader); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "tensor_shape": + message.tensor_shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorInfo.prototype.dtype = 0; +$root.tensorflow.TensorInfo.prototype.tensor_shape = null; + +$root.tensorflow.TensorInfo.CooSparse = class CooSparse { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorInfo.CooSparse(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values_tensor_name = reader.string(); + break; + case 2: + message.indices_tensor_name = reader.string(); + break; + case 3: + message.dense_shape_tensor_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorInfo.CooSparse(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values_tensor_name": + message.values_tensor_name = reader.string(); + break; + case "indices_tensor_name": + message.indices_tensor_name = reader.string(); + break; + case "dense_shape_tensor_name": + message.dense_shape_tensor_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorInfo.CooSparse.prototype.values_tensor_name = ""; +$root.tensorflow.TensorInfo.CooSparse.prototype.indices_tensor_name = ""; +$root.tensorflow.TensorInfo.CooSparse.prototype.dense_shape_tensor_name = ""; + +$root.tensorflow.TensorInfo.CompositeTensor = class CompositeTensor { + + constructor() { + this.components = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorInfo.CompositeTensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_spec = $root.tensorflow.TypeSpecProto.decode(reader, reader.uint32()); + break; + case 2: + message.components.push($root.tensorflow.TensorInfo.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorInfo.CompositeTensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type_spec": + message.type_spec = $root.tensorflow.TypeSpecProto.decodeText(reader); + break; + case "components": + message.components.push($root.tensorflow.TensorInfo.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorInfo.CompositeTensor.prototype.type_spec = null; + +$root.tensorflow.SignatureDef = class SignatureDef { + + constructor() { + this.inputs = {}; + this.outputs = {}; + this.defaults = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SignatureDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.inputs, () => reader.string(), () => $root.tensorflow.TensorInfo.decode(reader, reader.uint32())); + break; + case 2: + reader.entry(message.outputs, () => reader.string(), () => $root.tensorflow.TensorInfo.decode(reader, reader.uint32())); + break; + case 3: + message.method_name = reader.string(); + break; + case 4: + reader.entry(message.defaults, () => reader.string(), () => $root.tensorflow.TensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SignatureDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "inputs": + reader.entry(message.inputs, () => reader.string(), () => $root.tensorflow.TensorInfo.decodeText(reader)); + break; + case "outputs": + reader.entry(message.outputs, () => reader.string(), () => $root.tensorflow.TensorInfo.decodeText(reader)); + break; + case "method_name": + message.method_name = reader.string(); + break; + case "defaults": + reader.entry(message.defaults, () => reader.string(), () => $root.tensorflow.TensorProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SignatureDef.prototype.method_name = ""; + +$root.tensorflow.AssetFileDef = class AssetFileDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.AssetFileDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_info = $root.tensorflow.TensorInfo.decode(reader, reader.uint32()); + break; + case 2: + message.filename = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AssetFileDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor_info": + message.tensor_info = $root.tensorflow.TensorInfo.decodeText(reader); + break; + case "filename": + message.filename = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AssetFileDef.prototype.tensor_info = null; +$root.tensorflow.AssetFileDef.prototype.filename = ""; + +$root.tensorflow.GraphDef = class GraphDef { + + constructor() { + this.node = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.GraphDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node.push($root.tensorflow.NodeDef.decode(reader, reader.uint32())); + break; + case 4: + message.versions = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + case 3: + message.version = reader.int32(); + break; + case 2: + message.library = $root.tensorflow.FunctionDefLibrary.decode(reader, reader.uint32()); + break; + case 5: + message.debug_info = $root.tensorflow.GraphDebugInfo.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GraphDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node": + message.node.push($root.tensorflow.NodeDef.decodeText(reader)); + break; + case "versions": + message.versions = $root.tensorflow.VersionDef.decodeText(reader); + break; + case "version": + message.version = reader.int32(); + break; + case "library": + message.library = $root.tensorflow.FunctionDefLibrary.decodeText(reader); + break; + case "debug_info": + message.debug_info = $root.tensorflow.GraphDebugInfo.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GraphDef.prototype.versions = null; +$root.tensorflow.GraphDef.prototype.version = 0; +$root.tensorflow.GraphDef.prototype.library = null; +$root.tensorflow.GraphDef.prototype.debug_info = null; + +$root.tensorflow.FunctionDefLibrary = class FunctionDefLibrary { + + constructor() { + this["function"] = []; + this.gradient = []; + this.registered_gradients = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.FunctionDefLibrary(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message["function"].push($root.tensorflow.FunctionDef.decode(reader, reader.uint32())); + break; + case 2: + message.gradient.push($root.tensorflow.GradientDef.decode(reader, reader.uint32())); + break; + case 3: + message.registered_gradients.push($root.tensorflow.RegisteredGradient.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FunctionDefLibrary(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "function": + message["function"].push($root.tensorflow.FunctionDef.decodeText(reader)); + break; + case "gradient": + message.gradient.push($root.tensorflow.GradientDef.decodeText(reader)); + break; + case "registered_gradients": + message.registered_gradients.push($root.tensorflow.RegisteredGradient.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FunctionDef = class FunctionDef { + + constructor() { + this.attr = {}; + this.arg_attr = {}; + this.resource_arg_unique_id = {}; + this.node_def = []; + this.ret = {}; + this.control_ret = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.FunctionDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signature = $root.tensorflow.OpDef.decode(reader, reader.uint32()); + break; + case 5: + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decode(reader, reader.uint32())); + break; + case 7: + reader.entry(message.arg_attr, () => reader.uint32(), () => $root.tensorflow.FunctionDef.ArgAttrs.decode(reader, reader.uint32())); + break; + case 8: + reader.entry(message.resource_arg_unique_id, () => reader.uint32(), () => reader.uint32()); + break; + case 3: + message.node_def.push($root.tensorflow.NodeDef.decode(reader, reader.uint32())); + break; + case 4: + reader.entry(message.ret, () => reader.string(), () => reader.string()); + break; + case 6: + reader.entry(message.control_ret, () => reader.string(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FunctionDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "signature": + message.signature = $root.tensorflow.OpDef.decodeText(reader); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decodeText(reader)); + break; + case "arg_attr": + reader.entry(message.arg_attr, () => reader.uint32(), () => $root.tensorflow.FunctionDef.ArgAttrs.decodeText(reader)); + break; + case "resource_arg_unique_id": + reader.entry(message.resource_arg_unique_id, () => reader.uint32(), () => reader.uint32()); + break; + case "node_def": + message.node_def.push($root.tensorflow.NodeDef.decodeText(reader)); + break; + case "ret": + reader.entry(message.ret, () => reader.string(), () => reader.string()); + break; + case "control_ret": + reader.entry(message.control_ret, () => reader.string(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FunctionDef.prototype.signature = null; + +$root.tensorflow.FunctionDef.ArgAttrs = class ArgAttrs { + + constructor() { + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.FunctionDef.ArgAttrs(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FunctionDef.ArgAttrs(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GradientDef = class GradientDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.GradientDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.function_name = reader.string(); + break; + case 2: + message.gradient_func = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GradientDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "function_name": + message.function_name = reader.string(); + break; + case "gradient_func": + message.gradient_func = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GradientDef.prototype.function_name = ""; +$root.tensorflow.GradientDef.prototype.gradient_func = ""; + +$root.tensorflow.RegisteredGradient = class RegisteredGradient { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RegisteredGradient(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gradient_func = reader.string(); + break; + case 2: + message.registered_op_type = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RegisteredGradient(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "gradient_func": + message.gradient_func = reader.string(); + break; + case "registered_op_type": + message.registered_op_type = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RegisteredGradient.prototype.gradient_func = ""; +$root.tensorflow.RegisteredGradient.prototype.registered_op_type = ""; + +$root.tensorflow.AttrValue = class AttrValue { + + constructor() { + } + + get value() { + $root.tensorflow.AttrValue.valueSet = $root.tensorflow.AttrValue.valueSet || new Set([ "s", "i", "f", "b", "type", "shape", "tensor", "list", "func", "placeholder"]); + return Object.keys(this).find((key) => $root.tensorflow.AttrValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.AttrValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.s = reader.bytes(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.f = reader.float(); + break; + case 5: + message.b = reader.bool(); + break; + case 6: + message.type = reader.int32(); + break; + case 7: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 8: + message.tensor = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + case 1: + message.list = $root.tensorflow.AttrValue.ListValue.decode(reader, reader.uint32()); + break; + case 10: + message.func = $root.tensorflow.NameAttrList.decode(reader, reader.uint32()); + break; + case 9: + message.placeholder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AttrValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "s": + message.s = reader.bytes(); + break; + case "i": + message.i = reader.int64(); + break; + case "f": + message.f = reader.float(); + break; + case "b": + message.b = reader.bool(); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "tensor": + message.tensor = $root.tensorflow.TensorProto.decodeText(reader); + break; + case "list": + message.list = $root.tensorflow.AttrValue.ListValue.decodeText(reader); + break; + case "func": + message.func = $root.tensorflow.NameAttrList.decodeText(reader); + break; + case "placeholder": + message.placeholder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AttrValue.ListValue = class ListValue { + + constructor() { + this.s = []; + this.i = []; + this.f = []; + this.b = []; + this.type = []; + this.shape = []; + this.tensor = []; + this.func = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.AttrValue.ListValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.s.push(reader.bytes()); + break; + case 3: + message.i = reader.array(message.i, () => reader.int64(), tag); + break; + case 4: + message.f = reader.floats(message.f, tag); + break; + case 5: + message.b = reader.array(message.b, () => reader.bool(), tag); + break; + case 6: + message.type = reader.array(message.type, () => reader.int32(), tag); + break; + case 7: + message.shape.push($root.tensorflow.TensorShapeProto.decode(reader, reader.uint32())); + break; + case 8: + message.tensor.push($root.tensorflow.TensorProto.decode(reader, reader.uint32())); + break; + case 9: + message.func.push($root.tensorflow.NameAttrList.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AttrValue.ListValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "s": + reader.array(message.s, () => reader.bytes()); + break; + case "i": + reader.array(message.i, () => reader.int64()); + break; + case "f": + reader.array(message.f, () => reader.float()); + break; + case "b": + reader.array(message.b, () => reader.bool()); + break; + case "type": + reader.array(message.type, () => reader.enum($root.tensorflow.DataType)); + break; + case "shape": + message.shape.push($root.tensorflow.TensorShapeProto.decodeText(reader)); + break; + case "tensor": + message.tensor.push($root.tensorflow.TensorProto.decodeText(reader)); + break; + case "func": + message.func.push($root.tensorflow.NameAttrList.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NameAttrList = class NameAttrList { + + constructor() { + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.NameAttrList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NameAttrList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NameAttrList.prototype.name = ""; + +$root.tensorflow.TensorProto = class TensorProto { + + constructor() { + this.half_val = []; + this.float_val = []; + this.double_val = []; + this.int_val = []; + this.string_val = []; + this.scomplex_val = []; + this.int64_val = []; + this.bool_val = []; + this.dcomplex_val = []; + this.resource_handle_val = []; + this.variant_val = []; + this.uint32_val = []; + this.uint64_val = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.tensor_shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.version_number = reader.int32(); + break; + case 4: + message.tensor_content = reader.bytes(); + break; + case 13: + message.half_val = reader.array(message.half_val, () => reader.int32(), tag); + break; + case 5: + message.float_val = reader.floats(message.float_val, tag); + break; + case 6: + message.double_val = reader.doubles(message.double_val, tag); + break; + case 7: + message.int_val = reader.array(message.int_val, () => reader.int32(), tag); + break; + case 8: + message.string_val.push(reader.bytes()); + break; + case 9: + message.scomplex_val = reader.floats(message.scomplex_val, tag); + break; + case 10: + message.int64_val = reader.array(message.int64_val, () => reader.int64(), tag); + break; + case 11: + message.bool_val = reader.array(message.bool_val, () => reader.bool(), tag); + break; + case 12: + message.dcomplex_val = reader.doubles(message.dcomplex_val, tag); + break; + case 14: + message.resource_handle_val.push($root.tensorflow.ResourceHandleProto.decode(reader, reader.uint32())); + break; + case 15: + message.variant_val.push($root.tensorflow.VariantTensorDataProto.decode(reader, reader.uint32())); + break; + case 16: + message.uint32_val = reader.array(message.uint32_val, () => reader.uint32(), tag); + break; + case 17: + message.uint64_val = reader.array(message.uint64_val, () => reader.uint64(), tag); + break; + case 18: + message.float8_val = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "tensor_shape": + message.tensor_shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "version_number": + message.version_number = reader.int32(); + break; + case "tensor_content": + message.tensor_content = reader.bytes(); + break; + case "half_val": + reader.array(message.half_val, () => reader.int32()); + break; + case "float_val": + reader.array(message.float_val, () => reader.float()); + break; + case "double_val": + reader.array(message.double_val, () => reader.double()); + break; + case "int_val": + reader.array(message.int_val, () => reader.int32()); + break; + case "string_val": + reader.array(message.string_val, () => reader.bytes()); + break; + case "scomplex_val": + reader.array(message.scomplex_val, () => reader.float()); + break; + case "int64_val": + reader.array(message.int64_val, () => reader.int64()); + break; + case "bool_val": + reader.array(message.bool_val, () => reader.bool()); + break; + case "dcomplex_val": + reader.array(message.dcomplex_val, () => reader.double()); + break; + case "resource_handle_val": + message.resource_handle_val.push($root.tensorflow.ResourceHandleProto.decodeText(reader)); + break; + case "variant_val": + message.variant_val.push($root.tensorflow.VariantTensorDataProto.decodeText(reader)); + break; + case "uint32_val": + reader.array(message.uint32_val, () => reader.uint32()); + break; + case "uint64_val": + reader.array(message.uint64_val, () => reader.uint64()); + break; + case "float8_val": + message.float8_val = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorProto.prototype.dtype = 0; +$root.tensorflow.TensorProto.prototype.tensor_shape = null; +$root.tensorflow.TensorProto.prototype.version_number = 0; +$root.tensorflow.TensorProto.prototype.tensor_content = new Uint8Array([]); +$root.tensorflow.TensorProto.prototype.float8_val = new Uint8Array([]); + +$root.tensorflow.VariantTensorDataProto = class VariantTensorDataProto { + + constructor() { + this.tensors = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.VariantTensorDataProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_name = reader.string(); + break; + case 2: + message.metadata = reader.bytes(); + break; + case 3: + message.tensors.push($root.tensorflow.TensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.VariantTensorDataProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type_name": + message.type_name = reader.string(); + break; + case "metadata": + message.metadata = reader.bytes(); + break; + case "tensors": + message.tensors.push($root.tensorflow.TensorProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.VariantTensorDataProto.prototype.type_name = ""; +$root.tensorflow.VariantTensorDataProto.prototype.metadata = new Uint8Array([]); + +$root.tensorflow.ResourceHandleProto = class ResourceHandleProto { + + constructor() { + this.dtypes_and_shapes = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.ResourceHandleProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device = reader.string(); + break; + case 2: + message.container = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.hash_code = reader.uint64(); + break; + case 5: + message.maybe_type_name = reader.string(); + break; + case 6: + message.dtypes_and_shapes.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ResourceHandleProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "device": + message.device = reader.string(); + break; + case "container": + message.container = reader.string(); + break; + case "name": + message.name = reader.string(); + break; + case "hash_code": + message.hash_code = reader.uint64(); + break; + case "maybe_type_name": + message.maybe_type_name = reader.string(); + break; + case "dtypes_and_shapes": + message.dtypes_and_shapes.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ResourceHandleProto.prototype.device = ""; +$root.tensorflow.ResourceHandleProto.prototype.container = ""; +$root.tensorflow.ResourceHandleProto.prototype.name = ""; +$root.tensorflow.ResourceHandleProto.prototype.hash_code = protobuf.Uint64.create(0); +$root.tensorflow.ResourceHandleProto.prototype.maybe_type_name = ""; + +$root.tensorflow.ResourceHandleProto.DtypeAndShape = class DtypeAndShape { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.ResourceHandleProto.DtypeAndShape(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ResourceHandleProto.DtypeAndShape(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ResourceHandleProto.DtypeAndShape.prototype.dtype = 0; +$root.tensorflow.ResourceHandleProto.DtypeAndShape.prototype.shape = null; + +$root.tensorflow.TensorShapeProto = class TensorShapeProto { + + constructor() { + this.dim = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorShapeProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.dim.push($root.tensorflow.TensorShapeProto.Dim.decode(reader, reader.uint32())); + break; + case 3: + message.unknown_rank = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorShapeProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dim": + message.dim.push($root.tensorflow.TensorShapeProto.Dim.decodeText(reader)); + break; + case "unknown_rank": + message.unknown_rank = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorShapeProto.prototype.unknown_rank = false; + +$root.tensorflow.TensorShapeProto.Dim = class Dim { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorShapeProto.Dim(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.int64(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorShapeProto.Dim(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "size": + message.size = reader.int64(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorShapeProto.Dim.prototype.size = protobuf.Int64.create(0); +$root.tensorflow.TensorShapeProto.Dim.prototype.name = ""; + +$root.tensorflow.DataType = { + "DT_INVALID": 0, + "DT_FLOAT": 1, + "DT_DOUBLE": 2, + "DT_INT32": 3, + "DT_UINT8": 4, + "DT_INT16": 5, + "DT_INT8": 6, + "DT_STRING": 7, + "DT_COMPLEX64": 8, + "DT_INT64": 9, + "DT_BOOL": 10, + "DT_QINT8": 11, + "DT_QUINT8": 12, + "DT_QINT32": 13, + "DT_BFLOAT16": 14, + "DT_QINT16": 15, + "DT_QUINT16": 16, + "DT_UINT16": 17, + "DT_COMPLEX128": 18, + "DT_HALF": 19, + "DT_RESOURCE": 20, + "DT_VARIANT": 21, + "DT_UINT32": 22, + "DT_UINT64": 23, + "DT_FLOAT8_E5M2": 24, + "DT_FLOAT8_E4M3FN": 25, + "DT_INT4": 29, + "DT_UINT4": 30, + "DT_FLOAT_REF": 101, + "DT_DOUBLE_REF": 102, + "DT_INT32_REF": 103, + "DT_UINT8_REF": 104, + "DT_INT16_REF": 105, + "DT_INT8_REF": 106, + "DT_STRING_REF": 107, + "DT_COMPLEX64_REF": 108, + "DT_INT64_REF": 109, + "DT_BOOL_REF": 110, + "DT_QINT8_REF": 111, + "DT_QUINT8_REF": 112, + "DT_QINT32_REF": 113, + "DT_BFLOAT16_REF": 114, + "DT_QINT16_REF": 115, + "DT_QUINT16_REF": 116, + "DT_UINT16_REF": 117, + "DT_COMPLEX128_REF": 118, + "DT_HALF_REF": 119, + "DT_RESOURCE_REF": 120, + "DT_VARIANT_REF": 121, + "DT_UINT32_REF": 122, + "DT_UINT64_REF": 123, + "DT_FLOAT8_E5M2_REF": 124, + "DT_FLOAT8_E4M3FN_REF": 125, + "DT_INT4_REF": 129, + "DT_UINT4_REF": 130 +}; + +$root.tensorflow.SerializedDType = class SerializedDType { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SerializedDType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.datatype = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SerializedDType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "datatype": + message.datatype = reader.enum($root.tensorflow.DataType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SerializedDType.prototype.datatype = 0; + +$root.tensorflow.NodeDef = class NodeDef { + + constructor() { + this.input = []; + this.attr = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.NodeDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.op = reader.string(); + break; + case 3: + message.input.push(reader.string()); + break; + case 4: + message.device = reader.string(); + break; + case 5: + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decode(reader, reader.uint32())); + break; + case 6: + message.experimental_debug_info = $root.tensorflow.NodeDef.ExperimentalDebugInfo.decode(reader, reader.uint32()); + break; + case 7: + message.experimental_type = $root.tensorflow.FullTypeDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NodeDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "op": + message.op = reader.string(); + break; + case "input": + reader.array(message.input, () => reader.string()); + break; + case "device": + message.device = reader.string(); + break; + case "attr": + reader.entry(message.attr, () => reader.string(), () => $root.tensorflow.AttrValue.decodeText(reader)); + break; + case "experimental_debug_info": + message.experimental_debug_info = $root.tensorflow.NodeDef.ExperimentalDebugInfo.decodeText(reader); + break; + case "experimental_type": + message.experimental_type = $root.tensorflow.FullTypeDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NodeDef.prototype.name = ""; +$root.tensorflow.NodeDef.prototype.op = ""; +$root.tensorflow.NodeDef.prototype.device = ""; +$root.tensorflow.NodeDef.prototype.experimental_debug_info = null; +$root.tensorflow.NodeDef.prototype.experimental_type = null; + +$root.tensorflow.NodeDef.ExperimentalDebugInfo = class ExperimentalDebugInfo { + + constructor() { + this.original_node_names = []; + this.original_func_names = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.NodeDef.ExperimentalDebugInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.original_node_names.push(reader.string()); + break; + case 2: + message.original_func_names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NodeDef.ExperimentalDebugInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "original_node_names": + reader.array(message.original_node_names, () => reader.string()); + break; + case "original_func_names": + reader.array(message.original_func_names, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FullTypeId = { + "TFT_UNSET": 0, + "TFT_VAR": 1, + "TFT_ANY": 2, + "TFT_PRODUCT": 3, + "TFT_NAMED": 4, + "TFT_FOR_EACH": 20, + "TFT_CALLABLE": 100, + "TFT_TENSOR": 1000, + "TFT_ARRAY": 1001, + "TFT_OPTIONAL": 1002, + "TFT_LITERAL": 1003, + "TFT_ENCODED": 1004, + "TFT_SHAPE_TENSOR": 1005, + "TFT_BOOL": 200, + "TFT_UINT8": 201, + "TFT_UINT16": 202, + "TFT_UINT32": 203, + "TFT_UINT64": 204, + "TFT_INT8": 205, + "TFT_INT16": 206, + "TFT_INT32": 207, + "TFT_INT64": 208, + "TFT_HALF": 209, + "TFT_FLOAT": 210, + "TFT_DOUBLE": 211, + "TFT_BFLOAT16": 215, + "TFT_COMPLEX64": 212, + "TFT_COMPLEX128": 213, + "TFT_STRING": 214, + "TFT_DATASET": 10102, + "TFT_RAGGED": 10103, + "TFT_ITERATOR": 10104, + "TFT_MUTEX_LOCK": 10202, + "TFT_LEGACY_VARIANT": 10203 +}; + +$root.tensorflow.FullTypeDef = class FullTypeDef { + + constructor() { + this.args = []; + } + + get attr() { + $root.tensorflow.FullTypeDef.attrSet = $root.tensorflow.FullTypeDef.attrSet || new Set([ "s", "i"]); + return Object.keys(this).find((key) => $root.tensorflow.FullTypeDef.attrSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.FullTypeDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_id = reader.int32(); + break; + case 2: + message.args.push($root.tensorflow.FullTypeDef.decode(reader, reader.uint32())); + break; + case 3: + message.s = reader.string(); + break; + case 4: + message.i = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FullTypeDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type_id": + message.type_id = reader.enum($root.tensorflow.FullTypeId); + break; + case "args": + message.args.push($root.tensorflow.FullTypeDef.decodeText(reader)); + break; + case "s": + message.s = reader.string(); + break; + case "i": + message.i = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FullTypeDef.prototype.type_id = 0; + +$root.tensorflow.OpDef = class OpDef { + + constructor() { + this.input_arg = []; + this.output_arg = []; + this.control_output = []; + this.attr = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.OpDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.input_arg.push($root.tensorflow.OpDef.ArgDef.decode(reader, reader.uint32())); + break; + case 3: + message.output_arg.push($root.tensorflow.OpDef.ArgDef.decode(reader, reader.uint32())); + break; + case 20: + message.control_output.push(reader.string()); + break; + case 4: + message.attr.push($root.tensorflow.OpDef.AttrDef.decode(reader, reader.uint32())); + break; + case 8: + message.deprecation = $root.tensorflow.OpDeprecation.decode(reader, reader.uint32()); + break; + case 5: + message.summary = reader.string(); + break; + case 6: + message.description = reader.string(); + break; + case 18: + message.is_commutative = reader.bool(); + break; + case 16: + message.is_aggregate = reader.bool(); + break; + case 17: + message.is_stateful = reader.bool(); + break; + case 19: + message.allows_uninitialized_input = reader.bool(); + break; + case 21: + message.is_distributed_communication = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OpDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input_arg": + message.input_arg.push($root.tensorflow.OpDef.ArgDef.decodeText(reader)); + break; + case "output_arg": + message.output_arg.push($root.tensorflow.OpDef.ArgDef.decodeText(reader)); + break; + case "control_output": + reader.array(message.control_output, () => reader.string()); + break; + case "attr": + message.attr.push($root.tensorflow.OpDef.AttrDef.decodeText(reader)); + break; + case "deprecation": + message.deprecation = $root.tensorflow.OpDeprecation.decodeText(reader); + break; + case "summary": + message.summary = reader.string(); + break; + case "description": + message.description = reader.string(); + break; + case "is_commutative": + message.is_commutative = reader.bool(); + break; + case "is_aggregate": + message.is_aggregate = reader.bool(); + break; + case "is_stateful": + message.is_stateful = reader.bool(); + break; + case "allows_uninitialized_input": + message.allows_uninitialized_input = reader.bool(); + break; + case "is_distributed_communication": + message.is_distributed_communication = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OpDef.prototype.name = ""; +$root.tensorflow.OpDef.prototype.deprecation = null; +$root.tensorflow.OpDef.prototype.summary = ""; +$root.tensorflow.OpDef.prototype.description = ""; +$root.tensorflow.OpDef.prototype.is_commutative = false; +$root.tensorflow.OpDef.prototype.is_aggregate = false; +$root.tensorflow.OpDef.prototype.is_stateful = false; +$root.tensorflow.OpDef.prototype.allows_uninitialized_input = false; +$root.tensorflow.OpDef.prototype.is_distributed_communication = false; + +$root.tensorflow.OpDef.ArgDef = class ArgDef { + + constructor() { + this.handle_data = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.OpDef.ArgDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + case 3: + message.type = reader.int32(); + break; + case 4: + message.type_attr = reader.string(); + break; + case 5: + message.number_attr = reader.string(); + break; + case 6: + message.type_list_attr = reader.string(); + break; + case 7: + message.handle_data.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decode(reader, reader.uint32())); + break; + case 16: + message.is_ref = reader.bool(); + break; + case 17: + message.experimental_full_type = $root.tensorflow.FullTypeDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OpDef.ArgDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "description": + message.description = reader.string(); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "type_attr": + message.type_attr = reader.string(); + break; + case "number_attr": + message.number_attr = reader.string(); + break; + case "type_list_attr": + message.type_list_attr = reader.string(); + break; + case "handle_data": + message.handle_data.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decodeText(reader)); + break; + case "is_ref": + message.is_ref = reader.bool(); + break; + case "experimental_full_type": + message.experimental_full_type = $root.tensorflow.FullTypeDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OpDef.ArgDef.prototype.name = ""; +$root.tensorflow.OpDef.ArgDef.prototype.description = ""; +$root.tensorflow.OpDef.ArgDef.prototype.type = 0; +$root.tensorflow.OpDef.ArgDef.prototype.type_attr = ""; +$root.tensorflow.OpDef.ArgDef.prototype.number_attr = ""; +$root.tensorflow.OpDef.ArgDef.prototype.type_list_attr = ""; +$root.tensorflow.OpDef.ArgDef.prototype.is_ref = false; +$root.tensorflow.OpDef.ArgDef.prototype.experimental_full_type = null; + +$root.tensorflow.OpDef.AttrDef = class AttrDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.OpDef.AttrDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.default_value = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.has_minimum = reader.bool(); + break; + case 6: + message.minimum = reader.int64(); + break; + case 7: + message.allowed_values = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OpDef.AttrDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "default_value": + message.default_value = $root.tensorflow.AttrValue.decodeText(reader); + break; + case "description": + message.description = reader.string(); + break; + case "has_minimum": + message.has_minimum = reader.bool(); + break; + case "minimum": + message.minimum = reader.int64(); + break; + case "allowed_values": + message.allowed_values = $root.tensorflow.AttrValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OpDef.AttrDef.prototype.name = ""; +$root.tensorflow.OpDef.AttrDef.prototype.type = ""; +$root.tensorflow.OpDef.AttrDef.prototype.default_value = null; +$root.tensorflow.OpDef.AttrDef.prototype.description = ""; +$root.tensorflow.OpDef.AttrDef.prototype.has_minimum = false; +$root.tensorflow.OpDef.AttrDef.prototype.minimum = protobuf.Int64.create(0); +$root.tensorflow.OpDef.AttrDef.prototype.allowed_values = null; + +$root.tensorflow.OpDeprecation = class OpDeprecation { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.OpDeprecation(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + case 2: + message.explanation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OpDeprecation(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int32(); + break; + case "explanation": + message.explanation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OpDeprecation.prototype.version = 0; +$root.tensorflow.OpDeprecation.prototype.explanation = ""; + +$root.tensorflow.OpList = class OpList { + + constructor() { + this.op = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.OpList(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op.push($root.tensorflow.OpDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OpList(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "op": + message.op.push($root.tensorflow.OpDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GraphDebugInfo = class GraphDebugInfo { + + constructor() { + this.files = []; + this.frames_by_id = {}; + this.traces_by_id = {}; + this.traces = {}; + this.name_to_trace_id = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.GraphDebugInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.files.push(reader.string()); + break; + case 4: + reader.entry(message.frames_by_id, () => reader.fixed64(), () => $root.tensorflow.GraphDebugInfo.FileLineCol.decode(reader, reader.uint32())); + break; + case 6: + reader.entry(message.traces_by_id, () => reader.fixed64(), () => $root.tensorflow.GraphDebugInfo.StackTrace.decode(reader, reader.uint32())); + break; + case 2: + reader.entry(message.traces, () => reader.string(), () => $root.tensorflow.GraphDebugInfo.StackTrace.decode(reader, reader.uint32())); + break; + case 5: + reader.entry(message.name_to_trace_id, () => reader.string(), () => reader.fixed64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GraphDebugInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "files": + reader.array(message.files, () => reader.string()); + break; + case "frames_by_id": + reader.entry(message.frames_by_id, () => reader.fixed64(), () => $root.tensorflow.GraphDebugInfo.FileLineCol.decodeText(reader)); + break; + case "traces_by_id": + reader.entry(message.traces_by_id, () => reader.fixed64(), () => $root.tensorflow.GraphDebugInfo.StackTrace.decodeText(reader)); + break; + case "traces": + reader.entry(message.traces, () => reader.string(), () => $root.tensorflow.GraphDebugInfo.StackTrace.decodeText(reader)); + break; + case "name_to_trace_id": + reader.entry(message.name_to_trace_id, () => reader.string(), () => reader.fixed64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GraphDebugInfo.FileLineCol = class FileLineCol { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.GraphDebugInfo.FileLineCol(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.file_index = reader.int32(); + break; + case 2: + message.line = reader.int32(); + break; + case 3: + message.col = reader.int32(); + break; + case 4: + message.func = reader.string(); + break; + case 5: + message.code = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GraphDebugInfo.FileLineCol(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "file_index": + message.file_index = reader.int32(); + break; + case "line": + message.line = reader.int32(); + break; + case "col": + message.col = reader.int32(); + break; + case "func": + message.func = reader.string(); + break; + case "code": + message.code = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GraphDebugInfo.FileLineCol.prototype.file_index = 0; +$root.tensorflow.GraphDebugInfo.FileLineCol.prototype.line = 0; +$root.tensorflow.GraphDebugInfo.FileLineCol.prototype.col = 0; +$root.tensorflow.GraphDebugInfo.FileLineCol.prototype.func = ""; +$root.tensorflow.GraphDebugInfo.FileLineCol.prototype.code = ""; + +$root.tensorflow.GraphDebugInfo.StackTrace = class StackTrace { + + constructor() { + this.file_line_cols = []; + this.frame_id = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.GraphDebugInfo.StackTrace(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.file_line_cols.push($root.tensorflow.GraphDebugInfo.FileLineCol.decode(reader, reader.uint32())); + break; + case 2: + message.frame_id = reader.array(message.frame_id, () => reader.fixed64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GraphDebugInfo.StackTrace(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "file_line_cols": + message.file_line_cols.push($root.tensorflow.GraphDebugInfo.FileLineCol.decodeText(reader)); + break; + case "frame_id": + reader.array(message.frame_id, () => reader.fixed64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.VersionDef = class VersionDef { + + constructor() { + this.bad_consumers = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.VersionDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.producer = reader.int32(); + break; + case 2: + message.min_consumer = reader.int32(); + break; + case 3: + message.bad_consumers = reader.array(message.bad_consumers, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.VersionDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "producer": + message.producer = reader.int32(); + break; + case "min_consumer": + message.min_consumer = reader.int32(); + break; + case "bad_consumers": + reader.array(message.bad_consumers, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.VersionDef.prototype.producer = 0; +$root.tensorflow.VersionDef.prototype.min_consumer = 0; + +$root.tensorflow.SavedObjectGraph = class SavedObjectGraph { + + constructor() { + this.nodes = []; + this.concrete_functions = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedObjectGraph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nodes.push($root.tensorflow.SavedObject.decode(reader, reader.uint32())); + break; + case 2: + reader.entry(message.concrete_functions, () => reader.string(), () => $root.tensorflow.SavedConcreteFunction.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedObjectGraph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nodes": + message.nodes.push($root.tensorflow.SavedObject.decodeText(reader)); + break; + case "concrete_functions": + reader.entry(message.concrete_functions, () => reader.string(), () => $root.tensorflow.SavedConcreteFunction.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedObject = class SavedObject { + + constructor() { + this.children = []; + this.dependencies = []; + this.slot_variables = []; + this.saveable_objects = {}; + } + + get kind() { + $root.tensorflow.SavedObject.kindSet = $root.tensorflow.SavedObject.kindSet || new Set([ "user_object", "asset", "function", "variable", "bare_concrete_function", "constant", "resource", "captured_tensor"]); + return Object.keys(this).find((key) => $root.tensorflow.SavedObject.kindSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedObject(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decode(reader, reader.uint32())); + break; + case 15: + message.dependencies.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decode(reader, reader.uint32())); + break; + case 3: + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decode(reader, reader.uint32())); + break; + case 4: + message.user_object = $root.tensorflow.SavedUserObject.decode(reader, reader.uint32()); + break; + case 5: + message.asset = $root.tensorflow.SavedAsset.decode(reader, reader.uint32()); + break; + case 6: + message["function"] = $root.tensorflow.SavedFunction.decode(reader, reader.uint32()); + break; + case 7: + message.variable = $root.tensorflow.SavedVariable.decode(reader, reader.uint32()); + break; + case 8: + message.bare_concrete_function = $root.tensorflow.SavedBareConcreteFunction.decode(reader, reader.uint32()); + break; + case 9: + message.constant = $root.tensorflow.SavedConstant.decode(reader, reader.uint32()); + break; + case 10: + message.resource = $root.tensorflow.SavedResource.decode(reader, reader.uint32()); + break; + case 12: + message.captured_tensor = $root.tensorflow.CapturedTensor.decode(reader, reader.uint32()); + break; + case 11: + reader.entry(message.saveable_objects, () => reader.string(), () => $root.tensorflow.SaveableObject.decode(reader, reader.uint32())); + break; + case 13: + message.registered_name = reader.string(); + break; + case 14: + message.serialized_user_proto = $root.google.protobuf.Any.decode(reader, reader.uint32()); + break; + case 16: + message.registered_saver = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedObject(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "children": + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decodeText(reader)); + break; + case "dependencies": + message.dependencies.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decodeText(reader)); + break; + case "slot_variables": + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decodeText(reader)); + break; + case "user_object": + message.user_object = $root.tensorflow.SavedUserObject.decodeText(reader); + break; + case "asset": + message.asset = $root.tensorflow.SavedAsset.decodeText(reader); + break; + case "function": + message["function"] = $root.tensorflow.SavedFunction.decodeText(reader); + break; + case "variable": + message.variable = $root.tensorflow.SavedVariable.decodeText(reader); + break; + case "bare_concrete_function": + message.bare_concrete_function = $root.tensorflow.SavedBareConcreteFunction.decodeText(reader); + break; + case "constant": + message.constant = $root.tensorflow.SavedConstant.decodeText(reader); + break; + case "resource": + message.resource = $root.tensorflow.SavedResource.decodeText(reader); + break; + case "captured_tensor": + message.captured_tensor = $root.tensorflow.CapturedTensor.decodeText(reader); + break; + case "saveable_objects": + reader.entry(message.saveable_objects, () => reader.string(), () => $root.tensorflow.SaveableObject.decodeText(reader)); + break; + case "registered_name": + message.registered_name = reader.string(); + break; + case "serialized_user_proto": + message.serialized_user_proto = $root.google.protobuf.Any.decodeText(reader); + break; + case "registered_saver": + message.registered_saver = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedObject.prototype.registered_name = ""; +$root.tensorflow.SavedObject.prototype.serialized_user_proto = null; +$root.tensorflow.SavedObject.prototype.registered_saver = ""; + +$root.tensorflow.SavedUserObject = class SavedUserObject { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedUserObject(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.identifier = reader.string(); + break; + case 2: + message.version = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + case 3: + message.metadata = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedUserObject(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "identifier": + message.identifier = reader.string(); + break; + case "version": + message.version = $root.tensorflow.VersionDef.decodeText(reader); + break; + case "metadata": + message.metadata = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedUserObject.prototype.identifier = ""; +$root.tensorflow.SavedUserObject.prototype.version = null; +$root.tensorflow.SavedUserObject.prototype.metadata = ""; + +$root.tensorflow.SavedAsset = class SavedAsset { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedAsset(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.asset_file_def_index = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedAsset(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "asset_file_def_index": + message.asset_file_def_index = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedAsset.prototype.asset_file_def_index = 0; + +$root.tensorflow.SavedFunction = class SavedFunction { + + constructor() { + this.concrete_functions = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedFunction(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.concrete_functions.push(reader.string()); + break; + case 2: + message.function_spec = $root.tensorflow.FunctionSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedFunction(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "concrete_functions": + reader.array(message.concrete_functions, () => reader.string()); + break; + case "function_spec": + message.function_spec = $root.tensorflow.FunctionSpec.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedFunction.prototype.function_spec = null; + +$root.tensorflow.CapturedTensor = class CapturedTensor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.CapturedTensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.concrete_function = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CapturedTensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "concrete_function": + message.concrete_function = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CapturedTensor.prototype.name = ""; +$root.tensorflow.CapturedTensor.prototype.concrete_function = ""; + +$root.tensorflow.SavedConcreteFunction = class SavedConcreteFunction { + + constructor() { + this.bound_inputs = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedConcreteFunction(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.bound_inputs = reader.array(message.bound_inputs, () => reader.int32(), tag); + break; + case 3: + message.canonicalized_input_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 4: + message.output_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedConcreteFunction(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "bound_inputs": + reader.array(message.bound_inputs, () => reader.int32()); + break; + case "canonicalized_input_signature": + message.canonicalized_input_signature = $root.tensorflow.StructuredValue.decodeText(reader); + break; + case "output_signature": + message.output_signature = $root.tensorflow.StructuredValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedConcreteFunction.prototype.canonicalized_input_signature = null; +$root.tensorflow.SavedConcreteFunction.prototype.output_signature = null; + +$root.tensorflow.SavedBareConcreteFunction = class SavedBareConcreteFunction { + + constructor() { + this.argument_keywords = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedBareConcreteFunction(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.concrete_function_name = reader.string(); + break; + case 2: + message.argument_keywords.push(reader.string()); + break; + case 3: + message.allowed_positional_arguments = reader.int64(); + break; + case 4: + message.function_spec = $root.tensorflow.FunctionSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedBareConcreteFunction(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "concrete_function_name": + message.concrete_function_name = reader.string(); + break; + case "argument_keywords": + reader.array(message.argument_keywords, () => reader.string()); + break; + case "allowed_positional_arguments": + message.allowed_positional_arguments = reader.int64(); + break; + case "function_spec": + message.function_spec = $root.tensorflow.FunctionSpec.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedBareConcreteFunction.prototype.concrete_function_name = ""; +$root.tensorflow.SavedBareConcreteFunction.prototype.allowed_positional_arguments = protobuf.Int64.create(0); +$root.tensorflow.SavedBareConcreteFunction.prototype.function_spec = null; + +$root.tensorflow.SavedConstant = class SavedConstant { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedConstant(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedConstant(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedConstant.prototype.operation = ""; + +$root.tensorflow.SavedVariable = class SavedVariable { + + constructor() { + this.experimental_distributed_variable_components = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedVariable(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.trainable = reader.bool(); + break; + case 4: + message.synchronization = reader.int32(); + break; + case 5: + message.aggregation = reader.int32(); + break; + case 6: + message.name = reader.string(); + break; + case 7: + message.device = reader.string(); + break; + case 8: + message.experimental_distributed_variable_components.push($root.tensorflow.SavedVariable.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedVariable(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "trainable": + message.trainable = reader.bool(); + break; + case "synchronization": + message.synchronization = reader.enum($root.tensorflow.VariableSynchronization); + break; + case "aggregation": + message.aggregation = reader.enum($root.tensorflow.VariableAggregation); + break; + case "name": + message.name = reader.string(); + break; + case "device": + message.device = reader.string(); + break; + case "experimental_distributed_variable_components": + message.experimental_distributed_variable_components.push($root.tensorflow.SavedVariable.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedVariable.prototype.dtype = 0; +$root.tensorflow.SavedVariable.prototype.shape = null; +$root.tensorflow.SavedVariable.prototype.trainable = false; +$root.tensorflow.SavedVariable.prototype.synchronization = 0; +$root.tensorflow.SavedVariable.prototype.aggregation = 0; +$root.tensorflow.SavedVariable.prototype.name = ""; +$root.tensorflow.SavedVariable.prototype.device = ""; + +$root.tensorflow.FunctionSpec = class FunctionSpec { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.FunctionSpec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fullargspec = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 2: + message.is_method = reader.bool(); + break; + case 5: + message.input_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 6: + message.jit_compile = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FunctionSpec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "fullargspec": + message.fullargspec = $root.tensorflow.StructuredValue.decodeText(reader); + break; + case "is_method": + message.is_method = reader.bool(); + break; + case "input_signature": + message.input_signature = $root.tensorflow.StructuredValue.decodeText(reader); + break; + case "jit_compile": + message.jit_compile = reader.enum($root.tensorflow.FunctionSpec.JitCompile); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FunctionSpec.prototype.fullargspec = null; +$root.tensorflow.FunctionSpec.prototype.is_method = false; +$root.tensorflow.FunctionSpec.prototype.input_signature = null; +$root.tensorflow.FunctionSpec.prototype.jit_compile = 0; + +$root.tensorflow.FunctionSpec.JitCompile = { + "DEFAULT": 0, + "ON": 1, + "OFF": 2 +}; + +$root.tensorflow.SavedResource = class SavedResource { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedResource(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedResource(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "device": + message.device = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedResource.prototype.device = ""; + +$root.tensorflow.SaveableObject = class SaveableObject { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SaveableObject(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.save_function = reader.int32(); + break; + case 3: + message.restore_function = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SaveableObject(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "save_function": + message.save_function = reader.int32(); + break; + case "restore_function": + message.restore_function = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SaveableObject.prototype.save_function = 0; +$root.tensorflow.SaveableObject.prototype.restore_function = 0; + +$root.tensorflow.VariableSynchronization = { + "VARIABLE_SYNCHRONIZATION_AUTO": 0, + "VARIABLE_SYNCHRONIZATION_NONE": 1, + "VARIABLE_SYNCHRONIZATION_ON_WRITE": 2, + "VARIABLE_SYNCHRONIZATION_ON_READ": 3 +}; + +$root.tensorflow.VariableAggregation = { + "VARIABLE_AGGREGATION_NONE": 0, + "VARIABLE_AGGREGATION_SUM": 1, + "VARIABLE_AGGREGATION_MEAN": 2, + "VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA": 3 +}; + +$root.tensorflow.VariableDef = class VariableDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.VariableDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 6: + message.initial_value_name = reader.string(); + break; + case 2: + message.initializer_name = reader.string(); + break; + case 3: + message.snapshot_name = reader.string(); + break; + case 4: + message.save_slice_info_def = $root.tensorflow.SaveSliceInfoDef.decode(reader, reader.uint32()); + break; + case 5: + message.is_resource = reader.bool(); + break; + case 7: + message.trainable = reader.bool(); + break; + case 8: + message.synchronization = reader.int32(); + break; + case 9: + message.aggregation = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.VariableDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "initial_value_name": + message.initial_value_name = reader.string(); + break; + case "initializer_name": + message.initializer_name = reader.string(); + break; + case "snapshot_name": + message.snapshot_name = reader.string(); + break; + case "save_slice_info_def": + message.save_slice_info_def = $root.tensorflow.SaveSliceInfoDef.decodeText(reader); + break; + case "is_resource": + message.is_resource = reader.bool(); + break; + case "trainable": + message.trainable = reader.bool(); + break; + case "synchronization": + message.synchronization = reader.enum($root.tensorflow.VariableSynchronization); + break; + case "aggregation": + message.aggregation = reader.enum($root.tensorflow.VariableAggregation); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.VariableDef.prototype.variable_name = ""; +$root.tensorflow.VariableDef.prototype.initial_value_name = ""; +$root.tensorflow.VariableDef.prototype.initializer_name = ""; +$root.tensorflow.VariableDef.prototype.snapshot_name = ""; +$root.tensorflow.VariableDef.prototype.save_slice_info_def = null; +$root.tensorflow.VariableDef.prototype.is_resource = false; +$root.tensorflow.VariableDef.prototype.trainable = false; +$root.tensorflow.VariableDef.prototype.synchronization = 0; +$root.tensorflow.VariableDef.prototype.aggregation = 0; + +$root.tensorflow.SaveSliceInfoDef = class SaveSliceInfoDef { + + constructor() { + this.full_shape = []; + this.var_offset = []; + this.var_shape = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SaveSliceInfoDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.full_name = reader.string(); + break; + case 2: + message.full_shape = reader.array(message.full_shape, () => reader.int64(), tag); + break; + case 3: + message.var_offset = reader.array(message.var_offset, () => reader.int64(), tag); + break; + case 4: + message.var_shape = reader.array(message.var_shape, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SaveSliceInfoDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "full_name": + message.full_name = reader.string(); + break; + case "full_shape": + reader.array(message.full_shape, () => reader.int64()); + break; + case "var_offset": + reader.array(message.var_offset, () => reader.int64()); + break; + case "var_shape": + reader.array(message.var_shape, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SaveSliceInfoDef.prototype.full_name = ""; + +$root.tensorflow.StructuredValue = class StructuredValue { + + constructor() { + } + + get kind() { + $root.tensorflow.StructuredValue.kindSet = $root.tensorflow.StructuredValue.kindSet || new Set([ "none_value", "float64_value", "int64_value", "string_value", "bool_value", "tensor_shape_value", "tensor_dtype_value", "tensor_spec_value", "type_spec_value", "bounded_tensor_spec_value", "list_value", "tuple_value", "dict_value", "named_tuple_value", "tensor_value", "numpy_value"]); + return Object.keys(this).find((key) => $root.tensorflow.StructuredValue.kindSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.StructuredValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.none_value = $root.tensorflow.NoneValue.decode(reader, reader.uint32()); + break; + case 11: + message.float64_value = reader.double(); + break; + case 12: + message.int64_value = reader.sint64(); + break; + case 13: + message.string_value = reader.string(); + break; + case 14: + message.bool_value = reader.bool(); + break; + case 31: + message.tensor_shape_value = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 32: + message.tensor_dtype_value = reader.int32(); + break; + case 33: + message.tensor_spec_value = $root.tensorflow.TensorSpecProto.decode(reader, reader.uint32()); + break; + case 34: + message.type_spec_value = $root.tensorflow.TypeSpecProto.decode(reader, reader.uint32()); + break; + case 35: + message.bounded_tensor_spec_value = $root.tensorflow.BoundedTensorSpecProto.decode(reader, reader.uint32()); + break; + case 51: + message.list_value = $root.tensorflow.ListValue.decode(reader, reader.uint32()); + break; + case 52: + message.tuple_value = $root.tensorflow.TupleValue.decode(reader, reader.uint32()); + break; + case 53: + message.dict_value = $root.tensorflow.DictValue.decode(reader, reader.uint32()); + break; + case 54: + message.named_tuple_value = $root.tensorflow.NamedTupleValue.decode(reader, reader.uint32()); + break; + case 55: + message.tensor_value = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + case 56: + message.numpy_value = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.StructuredValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "none_value": + message.none_value = $root.tensorflow.NoneValue.decodeText(reader); + break; + case "float64_value": + message.float64_value = reader.double(); + break; + case "int64_value": + message.int64_value = reader.sint64(); + break; + case "string_value": + message.string_value = reader.string(); + break; + case "bool_value": + message.bool_value = reader.bool(); + break; + case "tensor_shape_value": + message.tensor_shape_value = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "tensor_dtype_value": + message.tensor_dtype_value = reader.enum($root.tensorflow.DataType); + break; + case "tensor_spec_value": + message.tensor_spec_value = $root.tensorflow.TensorSpecProto.decodeText(reader); + break; + case "type_spec_value": + message.type_spec_value = $root.tensorflow.TypeSpecProto.decodeText(reader); + break; + case "bounded_tensor_spec_value": + message.bounded_tensor_spec_value = $root.tensorflow.BoundedTensorSpecProto.decodeText(reader); + break; + case "list_value": + message.list_value = $root.tensorflow.ListValue.decodeText(reader); + break; + case "tuple_value": + message.tuple_value = $root.tensorflow.TupleValue.decodeText(reader); + break; + case "dict_value": + message.dict_value = $root.tensorflow.DictValue.decodeText(reader); + break; + case "named_tuple_value": + message.named_tuple_value = $root.tensorflow.NamedTupleValue.decodeText(reader); + break; + case "tensor_value": + message.tensor_value = $root.tensorflow.TensorProto.decodeText(reader); + break; + case "numpy_value": + message.numpy_value = $root.tensorflow.TensorProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NoneValue = class NoneValue { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.NoneValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NoneValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ListValue = class ListValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.ListValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push($root.tensorflow.StructuredValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ListValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values.push($root.tensorflow.StructuredValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TupleValue = class TupleValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TupleValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push($root.tensorflow.StructuredValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TupleValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "values": + message.values.push($root.tensorflow.StructuredValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DictValue = class DictValue { + + constructor() { + this.fields = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DictValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.fields, () => reader.string(), () => $root.tensorflow.StructuredValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DictValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "fields": + reader.entry(message.fields, () => reader.string(), () => $root.tensorflow.StructuredValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.PairValue = class PairValue { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.PairValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.PairValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.tensorflow.StructuredValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.PairValue.prototype.key = ""; +$root.tensorflow.PairValue.prototype.value = null; + +$root.tensorflow.NamedTupleValue = class NamedTupleValue { + + constructor() { + this.values = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.NamedTupleValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.values.push($root.tensorflow.PairValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NamedTupleValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "values": + message.values.push($root.tensorflow.PairValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NamedTupleValue.prototype.name = ""; + +$root.tensorflow.TensorSpecProto = class TensorSpecProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorSpecProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.dtype = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorSpecProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorSpecProto.prototype.name = ""; +$root.tensorflow.TensorSpecProto.prototype.shape = null; +$root.tensorflow.TensorSpecProto.prototype.dtype = 0; + +$root.tensorflow.BoundedTensorSpecProto = class BoundedTensorSpecProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.BoundedTensorSpecProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.dtype = reader.int32(); + break; + case 4: + message.minimum = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + case 5: + message.maximum = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.BoundedTensorSpecProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "minimum": + message.minimum = $root.tensorflow.TensorProto.decodeText(reader); + break; + case "maximum": + message.maximum = $root.tensorflow.TensorProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.BoundedTensorSpecProto.prototype.name = ""; +$root.tensorflow.BoundedTensorSpecProto.prototype.shape = null; +$root.tensorflow.BoundedTensorSpecProto.prototype.dtype = 0; +$root.tensorflow.BoundedTensorSpecProto.prototype.minimum = null; +$root.tensorflow.BoundedTensorSpecProto.prototype.maximum = null; + +$root.tensorflow.TypeSpecProto = class TypeSpecProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TypeSpecProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_spec_class = reader.int32(); + break; + case 2: + message.type_state = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 3: + message.type_spec_class_name = reader.string(); + break; + case 4: + message.num_flat_components = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TypeSpecProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type_spec_class": + message.type_spec_class = reader.enum($root.tensorflow.TypeSpecProto.TypeSpecClass); + break; + case "type_state": + message.type_state = $root.tensorflow.StructuredValue.decodeText(reader); + break; + case "type_spec_class_name": + message.type_spec_class_name = reader.string(); + break; + case "num_flat_components": + message.num_flat_components = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TypeSpecProto.prototype.type_spec_class = 0; +$root.tensorflow.TypeSpecProto.prototype.type_state = null; +$root.tensorflow.TypeSpecProto.prototype.type_spec_class_name = ""; +$root.tensorflow.TypeSpecProto.prototype.num_flat_components = 0; + +$root.tensorflow.TypeSpecProto.TypeSpecClass = { + "UNKNOWN": 0, + "SPARSE_TENSOR_SPEC": 1, + "INDEXED_SLICES_SPEC": 2, + "RAGGED_TENSOR_SPEC": 3, + "TENSOR_ARRAY_SPEC": 4, + "DATA_DATASET_SPEC": 5, + "DATA_ITERATOR_SPEC": 6, + "OPTIONAL_SPEC": 7, + "PER_REPLICA_SPEC": 8, + "VARIABLE_SPEC": 9, + "ROW_PARTITION_SPEC": 10, + "REGISTERED_TYPE_SPEC": 12, + "EXTENSION_TYPE_SPEC": 13 +}; + +$root.tensorflow.TrackableObjectGraph = class TrackableObjectGraph { + + constructor() { + this.nodes = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TrackableObjectGraph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nodes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TrackableObjectGraph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "nodes": + message.nodes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TrackableObjectGraph.TrackableObject = class TrackableObject { + + constructor() { + this.children = []; + this.attributes = []; + this.slot_variables = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decode(reader, reader.uint32())); + break; + case 2: + message.attributes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.decode(reader, reader.uint32())); + break; + case 3: + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decode(reader, reader.uint32())); + break; + case 4: + message.registered_saver = $root.tensorflow.RegisteredSaver.decode(reader, reader.uint32()); + break; + case 5: + message.has_checkpoint_values = $root.google.protobuf.BoolValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "children": + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decodeText(reader)); + break; + case "attributes": + message.attributes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.decodeText(reader)); + break; + case "slot_variables": + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decodeText(reader)); + break; + case "registered_saver": + message.registered_saver = $root.tensorflow.RegisteredSaver.decodeText(reader); + break; + case "has_checkpoint_values": + message.has_checkpoint_values = $root.google.protobuf.BoolValue.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.prototype.registered_saver = null; +$root.tensorflow.TrackableObjectGraph.TrackableObject.prototype.has_checkpoint_values = null; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference = class ObjectReference { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_id = reader.int32(); + break; + case 2: + message.local_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_id": + message.node_id = reader.int32(); + break; + case "local_name": + message.local_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.prototype.node_id = 0; +$root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.prototype.local_name = ""; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor = class SerializedTensor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.full_name = reader.string(); + break; + case 3: + message.checkpoint_key = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "full_name": + message.full_name = reader.string(); + break; + case "checkpoint_key": + message.checkpoint_key = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.prototype.name = ""; +$root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.prototype.full_name = ""; +$root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.prototype.checkpoint_key = ""; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference = class SlotVariableReference { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.original_variable_node_id = reader.int32(); + break; + case 2: + message.slot_name = reader.string(); + break; + case 3: + message.slot_variable_node_id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "original_variable_node_id": + message.original_variable_node_id = reader.int32(); + break; + case "slot_name": + message.slot_name = reader.string(); + break; + case "slot_variable_node_id": + message.slot_variable_node_id = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.prototype.original_variable_node_id = 0; +$root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.prototype.slot_name = ""; +$root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.prototype.slot_variable_node_id = 0; + +$root.tensorflow.RegisteredSaver = class RegisteredSaver { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RegisteredSaver(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.object_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RegisteredSaver(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "object_name": + message.object_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RegisteredSaver.prototype.name = ""; +$root.tensorflow.RegisteredSaver.prototype.object_name = ""; + +$root.tensorflow.SaverDef = class SaverDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SaverDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filename_tensor_name = reader.string(); + break; + case 2: + message.save_tensor_name = reader.string(); + break; + case 3: + message.restore_op_name = reader.string(); + break; + case 4: + message.max_to_keep = reader.int32(); + break; + case 5: + message.sharded = reader.bool(); + break; + case 6: + message.keep_checkpoint_every_n_hours = reader.float(); + break; + case 7: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SaverDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "filename_tensor_name": + message.filename_tensor_name = reader.string(); + break; + case "save_tensor_name": + message.save_tensor_name = reader.string(); + break; + case "restore_op_name": + message.restore_op_name = reader.string(); + break; + case "max_to_keep": + message.max_to_keep = reader.int32(); + break; + case "sharded": + message.sharded = reader.bool(); + break; + case "keep_checkpoint_every_n_hours": + message.keep_checkpoint_every_n_hours = reader.float(); + break; + case "version": + message.version = reader.enum($root.tensorflow.SaverDef.CheckpointFormatVersion); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SaverDef.prototype.filename_tensor_name = ""; +$root.tensorflow.SaverDef.prototype.save_tensor_name = ""; +$root.tensorflow.SaverDef.prototype.restore_op_name = ""; +$root.tensorflow.SaverDef.prototype.max_to_keep = 0; +$root.tensorflow.SaverDef.prototype.sharded = false; +$root.tensorflow.SaverDef.prototype.keep_checkpoint_every_n_hours = 0; +$root.tensorflow.SaverDef.prototype.version = 0; + +$root.tensorflow.SaverDef.CheckpointFormatVersion = { + "LEGACY": 0, + "V1": 1, + "V2": 2 +}; + +$root.tensorflow.BundleHeaderProto = class BundleHeaderProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.BundleHeaderProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_shards = reader.int32(); + break; + case 2: + message.endianness = reader.int32(); + break; + case 3: + message.version = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.BundleHeaderProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_shards": + message.num_shards = reader.int32(); + break; + case "endianness": + message.endianness = reader.enum($root.tensorflow.BundleHeaderProto.Endianness); + break; + case "version": + message.version = $root.tensorflow.VersionDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.BundleHeaderProto.prototype.num_shards = 0; +$root.tensorflow.BundleHeaderProto.prototype.endianness = 0; +$root.tensorflow.BundleHeaderProto.prototype.version = null; + +$root.tensorflow.BundleHeaderProto.Endianness = { + "LITTLE": 0, + "BIG": 1 +}; + +$root.tensorflow.BundleEntryProto = class BundleEntryProto { + + constructor() { + this.slices = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.BundleEntryProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.shard_id = reader.int32(); + break; + case 4: + message.offset = reader.int64(); + break; + case 5: + message.size = reader.int64(); + break; + case 6: + message.crc32c = reader.fixed32(); + break; + case 7: + message.slices.push($root.tensorflow.TensorSliceProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.BundleEntryProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "shard_id": + message.shard_id = reader.int32(); + break; + case "offset": + message.offset = reader.int64(); + break; + case "size": + message.size = reader.int64(); + break; + case "crc32c": + message.crc32c = reader.fixed32(); + break; + case "slices": + message.slices.push($root.tensorflow.TensorSliceProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.BundleEntryProto.prototype.dtype = 0; +$root.tensorflow.BundleEntryProto.prototype.shape = null; +$root.tensorflow.BundleEntryProto.prototype.shard_id = 0; +$root.tensorflow.BundleEntryProto.prototype.offset = protobuf.Int64.create(0); +$root.tensorflow.BundleEntryProto.prototype.size = protobuf.Int64.create(0); +$root.tensorflow.BundleEntryProto.prototype.crc32c = 0; + +$root.tensorflow.TensorSliceProto = class TensorSliceProto { + + constructor() { + this.extent = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorSliceProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.extent.push($root.tensorflow.TensorSliceProto.Extent.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorSliceProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "extent": + message.extent.push($root.tensorflow.TensorSliceProto.Extent.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorSliceProto.Extent = class Extent { + + constructor() { + } + + get has_length() { + $root.tensorflow.TensorSliceProto.Extent.has_lengthSet = $root.tensorflow.TensorSliceProto.Extent.has_lengthSet || new Set([ "length"]); + return Object.keys(this).find((key) => $root.tensorflow.TensorSliceProto.Extent.has_lengthSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorSliceProto.Extent(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int64(); + break; + case 2: + message.length = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorSliceProto.Extent(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "start": + message.start = reader.int64(); + break; + case "length": + message.length = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorSliceProto.Extent.prototype.start = protobuf.Int64.create(0); + +$root.tensorflow.SavedSliceMeta = class SavedSliceMeta { + + constructor() { + this.slice = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedSliceMeta(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.type = reader.int32(); + break; + case 4: + message.slice.push($root.tensorflow.TensorSliceProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedSliceMeta(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "slice": + message.slice.push($root.tensorflow.TensorSliceProto.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedSliceMeta.prototype.name = ""; +$root.tensorflow.SavedSliceMeta.prototype.shape = null; +$root.tensorflow.SavedSliceMeta.prototype.type = 0; + +$root.tensorflow.SavedTensorSliceMeta = class SavedTensorSliceMeta { + + constructor() { + this.tensor = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedTensorSliceMeta(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor.push($root.tensorflow.SavedSliceMeta.decode(reader, reader.uint32())); + break; + case 2: + message.versions = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedTensorSliceMeta(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor": + message.tensor.push($root.tensorflow.SavedSliceMeta.decodeText(reader)); + break; + case "versions": + message.versions = $root.tensorflow.VersionDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedTensorSliceMeta.prototype.versions = null; + +$root.tensorflow.SavedSlice = class SavedSlice { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedSlice(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.slice = $root.tensorflow.TensorSliceProto.decode(reader, reader.uint32()); + break; + case 3: + message.data = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedSlice(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "slice": + message.slice = $root.tensorflow.TensorSliceProto.decodeText(reader); + break; + case "data": + message.data = $root.tensorflow.TensorProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedSlice.prototype.name = ""; +$root.tensorflow.SavedSlice.prototype.slice = null; +$root.tensorflow.SavedSlice.prototype.data = null; + +$root.tensorflow.SavedTensorSlices = class SavedTensorSlices { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SavedTensorSlices(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta = $root.tensorflow.SavedTensorSliceMeta.decode(reader, reader.uint32()); + break; + case 2: + message.data = $root.tensorflow.SavedSlice.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SavedTensorSlices(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "meta": + message.meta = $root.tensorflow.SavedTensorSliceMeta.decodeText(reader); + break; + case "data": + message.data = $root.tensorflow.SavedSlice.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SavedTensorSlices.prototype.meta = null; +$root.tensorflow.SavedTensorSlices.prototype.data = null; + +$root.tensorflow.Event = class Event { + + constructor() { + } + + get what() { + $root.tensorflow.Event.whatSet = $root.tensorflow.Event.whatSet || new Set([ "file_version", "graph_def", "summary", "log_message", "session_log", "tagged_run_metadata", "meta_graph_def"]); + return Object.keys(this).find((key) => $root.tensorflow.Event.whatSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.Event(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wall_time = reader.double(); + break; + case 2: + message.step = reader.int64(); + break; + case 3: + message.file_version = reader.string(); + break; + case 4: + message.graph_def = reader.bytes(); + break; + case 5: + message.summary = $root.tensorflow.Summary.decode(reader, reader.uint32()); + break; + case 6: + message.log_message = $root.tensorflow.LogMessage.decode(reader, reader.uint32()); + break; + case 7: + message.session_log = $root.tensorflow.SessionLog.decode(reader, reader.uint32()); + break; + case 8: + message.tagged_run_metadata = $root.tensorflow.TaggedRunMetadata.decode(reader, reader.uint32()); + break; + case 9: + message.meta_graph_def = reader.bytes(); + break; + case 10: + message.source_metadata = $root.tensorflow.SourceMetadata.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.Event(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "wall_time": + message.wall_time = reader.double(); + break; + case "step": + message.step = reader.int64(); + break; + case "file_version": + message.file_version = reader.string(); + break; + case "graph_def": + message.graph_def = reader.bytes(); + break; + case "summary": + message.summary = $root.tensorflow.Summary.decodeText(reader); + break; + case "log_message": + message.log_message = $root.tensorflow.LogMessage.decodeText(reader); + break; + case "session_log": + message.session_log = $root.tensorflow.SessionLog.decodeText(reader); + break; + case "tagged_run_metadata": + message.tagged_run_metadata = $root.tensorflow.TaggedRunMetadata.decodeText(reader); + break; + case "meta_graph_def": + message.meta_graph_def = reader.bytes(); + break; + case "source_metadata": + message.source_metadata = $root.tensorflow.SourceMetadata.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.Event.prototype.wall_time = 0; +$root.tensorflow.Event.prototype.step = protobuf.Int64.create(0); +$root.tensorflow.Event.prototype.source_metadata = null; + +$root.tensorflow.SourceMetadata = class SourceMetadata { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SourceMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.writer = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SourceMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "writer": + message.writer = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SourceMetadata.prototype.writer = ""; + +$root.tensorflow.LogMessage = class LogMessage { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.LogMessage(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.level = reader.int32(); + break; + case 2: + message.message = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.LogMessage(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "level": + message.level = reader.enum($root.tensorflow.LogMessage.Level); + break; + case "message": + message.message = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.LogMessage.prototype.level = 0; +$root.tensorflow.LogMessage.prototype.message = ""; + +$root.tensorflow.LogMessage.Level = { + "UNKNOWN": 0, + "DEBUGGING": 10, + "INFO": 20, + "WARN": 30, + "ERROR": 40, + "FATAL": 50 +}; + +$root.tensorflow.SessionLog = class SessionLog { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SessionLog(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32(); + break; + case 2: + message.checkpoint_path = reader.string(); + break; + case 3: + message.msg = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SessionLog(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "status": + message.status = reader.enum($root.tensorflow.SessionLog.SessionStatus); + break; + case "checkpoint_path": + message.checkpoint_path = reader.string(); + break; + case "msg": + message.msg = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SessionLog.prototype.status = 0; +$root.tensorflow.SessionLog.prototype.checkpoint_path = ""; +$root.tensorflow.SessionLog.prototype.msg = ""; + +$root.tensorflow.SessionLog.SessionStatus = { + "STATUS_UNSPECIFIED": 0, + "START": 1, + "STOP": 2, + "CHECKPOINT": 3 +}; + +$root.tensorflow.TaggedRunMetadata = class TaggedRunMetadata { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TaggedRunMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tag = reader.string(); + break; + case 2: + message.run_metadata = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TaggedRunMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tag": + message.tag = reader.string(); + break; + case "run_metadata": + message.run_metadata = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TaggedRunMetadata.prototype.tag = ""; +$root.tensorflow.TaggedRunMetadata.prototype.run_metadata = new Uint8Array([]); + +$root.tensorflow.WorkerHealth = { + "OK": 0, + "RECEIVED_SHUTDOWN_SIGNAL": 1, + "INTERNAL_ERROR": 2, + "SHUTTING_DOWN": 3 +}; + +$root.tensorflow.WorkerShutdownMode = { + "DEFAULT": 0, + "NOT_CONFIGURED": 1, + "WAIT_FOR_COORDINATOR": 2, + "SHUTDOWN_AFTER_TIMEOUT": 3 +}; + +$root.tensorflow.WatchdogConfig = class WatchdogConfig { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.WatchdogConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timeout_ms = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.WatchdogConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "timeout_ms": + message.timeout_ms = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.WatchdogConfig.prototype.timeout_ms = protobuf.Int64.create(0); + +$root.tensorflow.RequestedExitCode = class RequestedExitCode { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RequestedExitCode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.exit_code = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RequestedExitCode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "exit_code": + message.exit_code = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RequestedExitCode.prototype.exit_code = 0; + +$root.tensorflow.WorkerHeartbeatRequest = class WorkerHeartbeatRequest { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.WorkerHeartbeatRequest(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shutdown_mode = reader.int32(); + break; + case 2: + message.watchdog_config = $root.tensorflow.WatchdogConfig.decode(reader, reader.uint32()); + break; + case 3: + message.exit_code = $root.tensorflow.RequestedExitCode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.WorkerHeartbeatRequest(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "shutdown_mode": + message.shutdown_mode = reader.enum($root.tensorflow.WorkerShutdownMode); + break; + case "watchdog_config": + message.watchdog_config = $root.tensorflow.WatchdogConfig.decodeText(reader); + break; + case "exit_code": + message.exit_code = $root.tensorflow.RequestedExitCode.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.WorkerHeartbeatRequest.prototype.shutdown_mode = 0; +$root.tensorflow.WorkerHeartbeatRequest.prototype.watchdog_config = null; +$root.tensorflow.WorkerHeartbeatRequest.prototype.exit_code = null; + +$root.tensorflow.WorkerHeartbeatResponse = class WorkerHeartbeatResponse { + + constructor() { + this.worker_log = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.WorkerHeartbeatResponse(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.health_status = reader.int32(); + break; + case 2: + message.worker_log.push($root.tensorflow.Event.decode(reader, reader.uint32())); + break; + case 3: + message.hostname = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.WorkerHeartbeatResponse(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "health_status": + message.health_status = reader.enum($root.tensorflow.WorkerHealth); + break; + case "worker_log": + message.worker_log.push($root.tensorflow.Event.decodeText(reader)); + break; + case "hostname": + message.hostname = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.WorkerHeartbeatResponse.prototype.health_status = 0; +$root.tensorflow.WorkerHeartbeatResponse.prototype.hostname = ""; + +$root.tensorflow.SummaryDescription = class SummaryDescription { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SummaryDescription(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_hint = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SummaryDescription(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "type_hint": + message.type_hint = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SummaryDescription.prototype.type_hint = ""; + +$root.tensorflow.SummaryMetadata = class SummaryMetadata { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SummaryMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plugin_data = $root.tensorflow.SummaryMetadata.PluginData.decode(reader, reader.uint32()); + break; + case 2: + message.display_name = reader.string(); + break; + case 3: + message.summary_description = reader.string(); + break; + case 4: + message.data_class = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SummaryMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "plugin_data": + message.plugin_data = $root.tensorflow.SummaryMetadata.PluginData.decodeText(reader); + break; + case "display_name": + message.display_name = reader.string(); + break; + case "summary_description": + message.summary_description = reader.string(); + break; + case "data_class": + message.data_class = reader.enum($root.tensorflow.DataClass); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SummaryMetadata.prototype.plugin_data = null; +$root.tensorflow.SummaryMetadata.prototype.display_name = ""; +$root.tensorflow.SummaryMetadata.prototype.summary_description = ""; +$root.tensorflow.SummaryMetadata.prototype.data_class = 0; + +$root.tensorflow.SummaryMetadata.PluginData = class PluginData { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SummaryMetadata.PluginData(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.plugin_name = reader.string(); + break; + case 2: + message.content = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SummaryMetadata.PluginData(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "plugin_name": + message.plugin_name = reader.string(); + break; + case "content": + message.content = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SummaryMetadata.PluginData.prototype.plugin_name = ""; +$root.tensorflow.SummaryMetadata.PluginData.prototype.content = new Uint8Array([]); + +$root.tensorflow.DataClass = { + "DATA_CLASS_UNKNOWN": 0, + "DATA_CLASS_SCALAR": 1, + "DATA_CLASS_TENSOR": 2, + "DATA_CLASS_BLOB_SEQUENCE": 3 +}; + +$root.tensorflow.Summary = class Summary { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.Summary(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push($root.tensorflow.Summary.Value.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.Summary(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value.push($root.tensorflow.Summary.Value.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.Summary.Image = class Image { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.Summary.Image(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.height = reader.int32(); + break; + case 2: + message.width = reader.int32(); + break; + case 3: + message.colorspace = reader.int32(); + break; + case 4: + message.encoded_image_string = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.Summary.Image(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "height": + message.height = reader.int32(); + break; + case "width": + message.width = reader.int32(); + break; + case "colorspace": + message.colorspace = reader.int32(); + break; + case "encoded_image_string": + message.encoded_image_string = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.Summary.Image.prototype.height = 0; +$root.tensorflow.Summary.Image.prototype.width = 0; +$root.tensorflow.Summary.Image.prototype.colorspace = 0; +$root.tensorflow.Summary.Image.prototype.encoded_image_string = new Uint8Array([]); + +$root.tensorflow.Summary.Audio = class Audio { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.Summary.Audio(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sample_rate = reader.float(); + break; + case 2: + message.num_channels = reader.int64(); + break; + case 3: + message.length_frames = reader.int64(); + break; + case 4: + message.encoded_audio_string = reader.bytes(); + break; + case 5: + message.content_type = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.Summary.Audio(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "sample_rate": + message.sample_rate = reader.float(); + break; + case "num_channels": + message.num_channels = reader.int64(); + break; + case "length_frames": + message.length_frames = reader.int64(); + break; + case "encoded_audio_string": + message.encoded_audio_string = reader.bytes(); + break; + case "content_type": + message.content_type = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.Summary.Audio.prototype.sample_rate = 0; +$root.tensorflow.Summary.Audio.prototype.num_channels = protobuf.Int64.create(0); +$root.tensorflow.Summary.Audio.prototype.length_frames = protobuf.Int64.create(0); +$root.tensorflow.Summary.Audio.prototype.encoded_audio_string = new Uint8Array([]); +$root.tensorflow.Summary.Audio.prototype.content_type = ""; + +$root.tensorflow.Summary.Value = class Value { + + constructor() { + } + + get value() { + $root.tensorflow.Summary.Value.valueSet = $root.tensorflow.Summary.Value.valueSet || new Set([ "simple_value", "obsolete_old_style_histogram", "image", "histo", "audio", "tensor"]); + return Object.keys(this).find((key) => $root.tensorflow.Summary.Value.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.tensorflow.Summary.Value(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 7: + message.node_name = reader.string(); + break; + case 1: + message.tag = reader.string(); + break; + case 9: + message.metadata = $root.tensorflow.SummaryMetadata.decode(reader, reader.uint32()); + break; + case 2: + message.simple_value = reader.float(); + break; + case 3: + message.obsolete_old_style_histogram = reader.bytes(); + break; + case 4: + message.image = $root.tensorflow.Summary.Image.decode(reader, reader.uint32()); + break; + case 5: + message.histo = $root.tensorflow.HistogramProto.decode(reader, reader.uint32()); + break; + case 6: + message.audio = $root.tensorflow.Summary.Audio.decode(reader, reader.uint32()); + break; + case 8: + message.tensor = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.Summary.Value(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_name": + message.node_name = reader.string(); + break; + case "tag": + message.tag = reader.string(); + break; + case "metadata": + message.metadata = $root.tensorflow.SummaryMetadata.decodeText(reader); + break; + case "simple_value": + message.simple_value = reader.float(); + break; + case "obsolete_old_style_histogram": + message.obsolete_old_style_histogram = reader.bytes(); + break; + case "image": + message.image = $root.tensorflow.Summary.Image.decodeText(reader); + break; + case "histo": + message.histo = $root.tensorflow.HistogramProto.decodeText(reader); + break; + case "audio": + message.audio = $root.tensorflow.Summary.Audio.decodeText(reader); + break; + case "tensor": + message.tensor = $root.tensorflow.TensorProto.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.Summary.Value.prototype.node_name = ""; +$root.tensorflow.Summary.Value.prototype.tag = ""; +$root.tensorflow.Summary.Value.prototype.metadata = null; + +$root.tensorflow.HistogramProto = class HistogramProto { + + constructor() { + this.bucket_limit = []; + this.bucket = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.HistogramProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.min = reader.double(); + break; + case 2: + message.max = reader.double(); + break; + case 3: + message.num = reader.double(); + break; + case 4: + message.sum = reader.double(); + break; + case 5: + message.sum_squares = reader.double(); + break; + case 6: + message.bucket_limit = reader.doubles(message.bucket_limit, tag); + break; + case 7: + message.bucket = reader.doubles(message.bucket, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.HistogramProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "min": + message.min = reader.double(); + break; + case "max": + message.max = reader.double(); + break; + case "num": + message.num = reader.double(); + break; + case "sum": + message.sum = reader.double(); + break; + case "sum_squares": + message.sum_squares = reader.double(); + break; + case "bucket_limit": + reader.array(message.bucket_limit, () => reader.double()); + break; + case "bucket": + reader.array(message.bucket, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.HistogramProto.prototype.min = 0; +$root.tensorflow.HistogramProto.prototype.max = 0; +$root.tensorflow.HistogramProto.prototype.num = 0; +$root.tensorflow.HistogramProto.prototype.sum = 0; +$root.tensorflow.HistogramProto.prototype.sum_squares = 0; + +$root.tensorflow.GPUOptions = class GPUOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.GPUOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.per_process_gpu_memory_fraction = reader.double(); + break; + case 4: + message.allow_growth = reader.bool(); + break; + case 2: + message.allocator_type = reader.string(); + break; + case 3: + message.deferred_deletion_bytes = reader.int64(); + break; + case 5: + message.visible_device_list = reader.string(); + break; + case 6: + message.polling_active_delay_usecs = reader.int32(); + break; + case 7: + message.polling_inactive_delay_msecs = reader.int32(); + break; + case 8: + message.force_gpu_compatible = reader.bool(); + break; + case 9: + message.experimental = $root.tensorflow.GPUOptions.Experimental.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GPUOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "per_process_gpu_memory_fraction": + message.per_process_gpu_memory_fraction = reader.double(); + break; + case "allow_growth": + message.allow_growth = reader.bool(); + break; + case "allocator_type": + message.allocator_type = reader.string(); + break; + case "deferred_deletion_bytes": + message.deferred_deletion_bytes = reader.int64(); + break; + case "visible_device_list": + message.visible_device_list = reader.string(); + break; + case "polling_active_delay_usecs": + message.polling_active_delay_usecs = reader.int32(); + break; + case "polling_inactive_delay_msecs": + message.polling_inactive_delay_msecs = reader.int32(); + break; + case "force_gpu_compatible": + message.force_gpu_compatible = reader.bool(); + break; + case "experimental": + message.experimental = $root.tensorflow.GPUOptions.Experimental.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GPUOptions.prototype.per_process_gpu_memory_fraction = 0; +$root.tensorflow.GPUOptions.prototype.allow_growth = false; +$root.tensorflow.GPUOptions.prototype.allocator_type = ""; +$root.tensorflow.GPUOptions.prototype.deferred_deletion_bytes = protobuf.Int64.create(0); +$root.tensorflow.GPUOptions.prototype.visible_device_list = ""; +$root.tensorflow.GPUOptions.prototype.polling_active_delay_usecs = 0; +$root.tensorflow.GPUOptions.prototype.polling_inactive_delay_msecs = 0; +$root.tensorflow.GPUOptions.prototype.force_gpu_compatible = false; +$root.tensorflow.GPUOptions.prototype.experimental = null; + +$root.tensorflow.GPUOptions.Experimental = class Experimental { + + constructor() { + this.virtual_devices = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.GPUOptions.Experimental(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.virtual_devices.push($root.tensorflow.GPUOptions.Experimental.VirtualDevices.decode(reader, reader.uint32())); + break; + case 15: + message.num_virtual_devices_per_gpu = reader.int32(); + break; + case 2: + message.use_unified_memory = reader.bool(); + break; + case 3: + message.num_dev_to_dev_copy_streams = reader.int32(); + break; + case 4: + message.collective_ring_order = reader.string(); + break; + case 5: + message.timestamped_allocator = reader.bool(); + break; + case 7: + message.kernel_tracker_max_interval = reader.int32(); + break; + case 8: + message.kernel_tracker_max_bytes = reader.int32(); + break; + case 9: + message.kernel_tracker_max_pending = reader.int32(); + break; + case 10: + message.internal_fragmentation_fraction = reader.double(); + break; + case 11: + message.use_cuda_malloc_async = reader.bool(); + break; + case 12: + message.disallow_retry_on_allocation_failure = reader.bool(); + break; + case 13: + message.gpu_host_mem_limit_in_mb = reader.float(); + break; + case 14: + message.gpu_host_mem_disallow_growth = reader.bool(); + break; + case 16: + message.gpu_system_memory_size_in_mb = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GPUOptions.Experimental(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "virtual_devices": + message.virtual_devices.push($root.tensorflow.GPUOptions.Experimental.VirtualDevices.decodeText(reader)); + break; + case "num_virtual_devices_per_gpu": + message.num_virtual_devices_per_gpu = reader.int32(); + break; + case "use_unified_memory": + message.use_unified_memory = reader.bool(); + break; + case "num_dev_to_dev_copy_streams": + message.num_dev_to_dev_copy_streams = reader.int32(); + break; + case "collective_ring_order": + message.collective_ring_order = reader.string(); + break; + case "timestamped_allocator": + message.timestamped_allocator = reader.bool(); + break; + case "kernel_tracker_max_interval": + message.kernel_tracker_max_interval = reader.int32(); + break; + case "kernel_tracker_max_bytes": + message.kernel_tracker_max_bytes = reader.int32(); + break; + case "kernel_tracker_max_pending": + message.kernel_tracker_max_pending = reader.int32(); + break; + case "internal_fragmentation_fraction": + message.internal_fragmentation_fraction = reader.double(); + break; + case "use_cuda_malloc_async": + message.use_cuda_malloc_async = reader.bool(); + break; + case "disallow_retry_on_allocation_failure": + message.disallow_retry_on_allocation_failure = reader.bool(); + break; + case "gpu_host_mem_limit_in_mb": + message.gpu_host_mem_limit_in_mb = reader.float(); + break; + case "gpu_host_mem_disallow_growth": + message.gpu_host_mem_disallow_growth = reader.bool(); + break; + case "gpu_system_memory_size_in_mb": + message.gpu_system_memory_size_in_mb = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GPUOptions.Experimental.prototype.num_virtual_devices_per_gpu = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.use_unified_memory = false; +$root.tensorflow.GPUOptions.Experimental.prototype.num_dev_to_dev_copy_streams = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.collective_ring_order = ""; +$root.tensorflow.GPUOptions.Experimental.prototype.timestamped_allocator = false; +$root.tensorflow.GPUOptions.Experimental.prototype.kernel_tracker_max_interval = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.kernel_tracker_max_bytes = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.kernel_tracker_max_pending = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.internal_fragmentation_fraction = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.use_cuda_malloc_async = false; +$root.tensorflow.GPUOptions.Experimental.prototype.disallow_retry_on_allocation_failure = false; +$root.tensorflow.GPUOptions.Experimental.prototype.gpu_host_mem_limit_in_mb = 0; +$root.tensorflow.GPUOptions.Experimental.prototype.gpu_host_mem_disallow_growth = false; +$root.tensorflow.GPUOptions.Experimental.prototype.gpu_system_memory_size_in_mb = 0; + +$root.tensorflow.GPUOptions.Experimental.VirtualDevices = class VirtualDevices { + + constructor() { + this.memory_limit_mb = []; + this.priority = []; + this.device_ordinal = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.GPUOptions.Experimental.VirtualDevices(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.memory_limit_mb = reader.floats(message.memory_limit_mb, tag); + break; + case 2: + message.priority = reader.array(message.priority, () => reader.int32(), tag); + break; + case 3: + message.device_ordinal = reader.array(message.device_ordinal, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GPUOptions.Experimental.VirtualDevices(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "memory_limit_mb": + reader.array(message.memory_limit_mb, () => reader.float()); + break; + case "priority": + reader.array(message.priority, () => reader.int32()); + break; + case "device_ordinal": + reader.array(message.device_ordinal, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OptimizerOptions = class OptimizerOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.OptimizerOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.do_common_subexpression_elimination = reader.bool(); + break; + case 2: + message.do_constant_folding = reader.bool(); + break; + case 6: + message.max_folded_constant_in_bytes = reader.int64(); + break; + case 4: + message.do_function_inlining = reader.bool(); + break; + case 3: + message.opt_level = reader.int32(); + break; + case 5: + message.global_jit_level = reader.int32(); + break; + case 7: + message.cpu_global_jit = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.OptimizerOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "do_common_subexpression_elimination": + message.do_common_subexpression_elimination = reader.bool(); + break; + case "do_constant_folding": + message.do_constant_folding = reader.bool(); + break; + case "max_folded_constant_in_bytes": + message.max_folded_constant_in_bytes = reader.int64(); + break; + case "do_function_inlining": + message.do_function_inlining = reader.bool(); + break; + case "opt_level": + message.opt_level = reader.enum($root.tensorflow.OptimizerOptions.Level); + break; + case "global_jit_level": + message.global_jit_level = reader.enum($root.tensorflow.OptimizerOptions.GlobalJitLevel); + break; + case "cpu_global_jit": + message.cpu_global_jit = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.OptimizerOptions.prototype.do_common_subexpression_elimination = false; +$root.tensorflow.OptimizerOptions.prototype.do_constant_folding = false; +$root.tensorflow.OptimizerOptions.prototype.max_folded_constant_in_bytes = protobuf.Int64.create(0); +$root.tensorflow.OptimizerOptions.prototype.do_function_inlining = false; +$root.tensorflow.OptimizerOptions.prototype.opt_level = 0; +$root.tensorflow.OptimizerOptions.prototype.global_jit_level = 0; +$root.tensorflow.OptimizerOptions.prototype.cpu_global_jit = false; + +$root.tensorflow.OptimizerOptions.Level = { + "L1": 0, + "L0": -1 +}; + +$root.tensorflow.OptimizerOptions.GlobalJitLevel = { + "DEFAULT": 0, + "OFF": -1, + "ON_1": 1, + "ON_2": 2 +}; + +$root.tensorflow.GraphOptions = class GraphOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.GraphOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.enable_recv_scheduling = reader.bool(); + break; + case 3: + message.optimizer_options = $root.tensorflow.OptimizerOptions.decode(reader, reader.uint32()); + break; + case 4: + message.build_cost_model = reader.int64(); + break; + case 9: + message.build_cost_model_after = reader.int64(); + break; + case 5: + message.infer_shapes = reader.bool(); + break; + case 6: + message.place_pruned_graph = reader.bool(); + break; + case 7: + message.enable_bfloat16_sendrecv = reader.bool(); + break; + case 8: + message.timeline_step = reader.int32(); + break; + case 10: + message.rewrite_options = $root.tensorflow.RewriterConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.GraphOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "enable_recv_scheduling": + message.enable_recv_scheduling = reader.bool(); + break; + case "optimizer_options": + message.optimizer_options = $root.tensorflow.OptimizerOptions.decodeText(reader); + break; + case "build_cost_model": + message.build_cost_model = reader.int64(); + break; + case "build_cost_model_after": + message.build_cost_model_after = reader.int64(); + break; + case "infer_shapes": + message.infer_shapes = reader.bool(); + break; + case "place_pruned_graph": + message.place_pruned_graph = reader.bool(); + break; + case "enable_bfloat16_sendrecv": + message.enable_bfloat16_sendrecv = reader.bool(); + break; + case "timeline_step": + message.timeline_step = reader.int32(); + break; + case "rewrite_options": + message.rewrite_options = $root.tensorflow.RewriterConfig.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.GraphOptions.prototype.enable_recv_scheduling = false; +$root.tensorflow.GraphOptions.prototype.optimizer_options = null; +$root.tensorflow.GraphOptions.prototype.build_cost_model = protobuf.Int64.create(0); +$root.tensorflow.GraphOptions.prototype.build_cost_model_after = protobuf.Int64.create(0); +$root.tensorflow.GraphOptions.prototype.infer_shapes = false; +$root.tensorflow.GraphOptions.prototype.place_pruned_graph = false; +$root.tensorflow.GraphOptions.prototype.enable_bfloat16_sendrecv = false; +$root.tensorflow.GraphOptions.prototype.timeline_step = 0; +$root.tensorflow.GraphOptions.prototype.rewrite_options = null; + +$root.tensorflow.ThreadPoolOptionProto = class ThreadPoolOptionProto { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.ThreadPoolOptionProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_threads = reader.int32(); + break; + case 2: + message.global_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ThreadPoolOptionProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "num_threads": + message.num_threads = reader.int32(); + break; + case "global_name": + message.global_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ThreadPoolOptionProto.prototype.num_threads = 0; +$root.tensorflow.ThreadPoolOptionProto.prototype.global_name = ""; + +$root.tensorflow.SessionMetadata = class SessionMetadata { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.SessionMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.SessionMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "version": + message.version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.SessionMetadata.prototype.name = ""; +$root.tensorflow.SessionMetadata.prototype.version = protobuf.Int64.create(0); + +$root.tensorflow.ConfigProto = class ConfigProto { + + constructor() { + this.device_count = {}; + this.session_inter_op_thread_pool = []; + this.device_filters = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.ConfigProto(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.device_count, () => reader.string(), () => reader.int32()); + break; + case 2: + message.intra_op_parallelism_threads = reader.int32(); + break; + case 5: + message.inter_op_parallelism_threads = reader.int32(); + break; + case 9: + message.use_per_session_threads = reader.bool(); + break; + case 12: + message.session_inter_op_thread_pool.push($root.tensorflow.ThreadPoolOptionProto.decode(reader, reader.uint32())); + break; + case 3: + message.placement_period = reader.int32(); + break; + case 4: + message.device_filters.push(reader.string()); + break; + case 6: + message.gpu_options = $root.tensorflow.GPUOptions.decode(reader, reader.uint32()); + break; + case 7: + message.allow_soft_placement = reader.bool(); + break; + case 8: + message.log_device_placement = reader.bool(); + break; + case 10: + message.graph_options = $root.tensorflow.GraphOptions.decode(reader, reader.uint32()); + break; + case 11: + message.operation_timeout_in_ms = reader.int64(); + break; + case 13: + message.rpc_options = $root.tensorflow.RPCOptions.decode(reader, reader.uint32()); + break; + case 14: + message.cluster_def = $root.tensorflow.ClusterDef.decode(reader, reader.uint32()); + break; + case 15: + message.isolate_session_state = reader.bool(); + break; + case 17: + message.share_cluster_devices_in_session = reader.bool(); + break; + case 16: + message.experimental = $root.tensorflow.ConfigProto.Experimental.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ConfigProto(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "device_count": + reader.entry(message.device_count, () => reader.string(), () => reader.int32()); + break; + case "intra_op_parallelism_threads": + message.intra_op_parallelism_threads = reader.int32(); + break; + case "inter_op_parallelism_threads": + message.inter_op_parallelism_threads = reader.int32(); + break; + case "use_per_session_threads": + message.use_per_session_threads = reader.bool(); + break; + case "session_inter_op_thread_pool": + message.session_inter_op_thread_pool.push($root.tensorflow.ThreadPoolOptionProto.decodeText(reader)); + break; + case "placement_period": + message.placement_period = reader.int32(); + break; + case "device_filters": + reader.array(message.device_filters, () => reader.string()); + break; + case "gpu_options": + message.gpu_options = $root.tensorflow.GPUOptions.decodeText(reader); + break; + case "allow_soft_placement": + message.allow_soft_placement = reader.bool(); + break; + case "log_device_placement": + message.log_device_placement = reader.bool(); + break; + case "graph_options": + message.graph_options = $root.tensorflow.GraphOptions.decodeText(reader); + break; + case "operation_timeout_in_ms": + message.operation_timeout_in_ms = reader.int64(); + break; + case "rpc_options": + message.rpc_options = $root.tensorflow.RPCOptions.decodeText(reader); + break; + case "cluster_def": + message.cluster_def = $root.tensorflow.ClusterDef.decodeText(reader); + break; + case "isolate_session_state": + message.isolate_session_state = reader.bool(); + break; + case "share_cluster_devices_in_session": + message.share_cluster_devices_in_session = reader.bool(); + break; + case "experimental": + message.experimental = $root.tensorflow.ConfigProto.Experimental.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ConfigProto.prototype.intra_op_parallelism_threads = 0; +$root.tensorflow.ConfigProto.prototype.inter_op_parallelism_threads = 0; +$root.tensorflow.ConfigProto.prototype.use_per_session_threads = false; +$root.tensorflow.ConfigProto.prototype.placement_period = 0; +$root.tensorflow.ConfigProto.prototype.gpu_options = null; +$root.tensorflow.ConfigProto.prototype.allow_soft_placement = false; +$root.tensorflow.ConfigProto.prototype.log_device_placement = false; +$root.tensorflow.ConfigProto.prototype.graph_options = null; +$root.tensorflow.ConfigProto.prototype.operation_timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.ConfigProto.prototype.rpc_options = null; +$root.tensorflow.ConfigProto.prototype.cluster_def = null; +$root.tensorflow.ConfigProto.prototype.isolate_session_state = false; +$root.tensorflow.ConfigProto.prototype.share_cluster_devices_in_session = false; +$root.tensorflow.ConfigProto.prototype.experimental = null; + +$root.tensorflow.ConfigProto.Experimental = class Experimental { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.ConfigProto.Experimental(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.collective_group_leader = reader.string(); + break; + case 3: + message.executor_type = reader.string(); + break; + case 4: + message.recv_buf_max_chunk = reader.int32(); + break; + case 5: + message.use_numa_affinity = reader.bool(); + break; + case 6: + message.collective_deterministic_sequential_execution = reader.bool(); + break; + case 7: + message.collective_nccl = reader.bool(); + break; + case 8: + message.share_session_state_in_clusterspec_propagation = reader.bool(); + break; + case 9: + message.disable_thread_spinning = reader.bool(); + break; + case 10: + message.share_cluster_devices_in_session = reader.bool(); + break; + case 11: + message.session_metadata = $root.tensorflow.SessionMetadata.decode(reader, reader.uint32()); + break; + case 12: + message.optimize_for_static_graph = reader.bool(); + break; + case 13: + message.enable_mlir_bridge = reader.bool(); + break; + case 17: + message.mlir_bridge_rollout = reader.int32(); + break; + case 16: + message.enable_mlir_graph_optimization = reader.bool(); + break; + case 14: + message.disable_output_partition_graphs = reader.bool(); + break; + case 15: + message.xla_fusion_autotuner_thresh = reader.int64(); + break; + case 18: + message.use_tfrt = reader.bool(); + break; + case 27: + message.enable_multi_host = reader.bool(); + break; + case 28: + message.backend_server_port = reader.int32(); + break; + case 29: + message.target_tpu = reader.bool(); + break; + case 30: + message.target_gpu = reader.bool(); + break; + case 31: + message.stream_merge_threshold = reader.int32(); + break; + case 21: + message.disable_functional_ops_lowering = reader.bool(); + break; + case 22: + message.xla_prefer_single_graph_cluster = reader.bool(); + break; + case 23: + message.coordination_config = $root.tensorflow.CoordinationServiceConfig.decode(reader, reader.uint32()); + break; + case 24: + message.disable_optimize_for_static_graph = reader.bool(); + break; + case 26: + message.disable_eager_executor_streaming_enqueue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ConfigProto.Experimental(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "collective_group_leader": + message.collective_group_leader = reader.string(); + break; + case "executor_type": + message.executor_type = reader.string(); + break; + case "recv_buf_max_chunk": + message.recv_buf_max_chunk = reader.int32(); + break; + case "use_numa_affinity": + message.use_numa_affinity = reader.bool(); + break; + case "collective_deterministic_sequential_execution": + message.collective_deterministic_sequential_execution = reader.bool(); + break; + case "collective_nccl": + message.collective_nccl = reader.bool(); + break; + case "share_session_state_in_clusterspec_propagation": + message.share_session_state_in_clusterspec_propagation = reader.bool(); + break; + case "disable_thread_spinning": + message.disable_thread_spinning = reader.bool(); + break; + case "share_cluster_devices_in_session": + message.share_cluster_devices_in_session = reader.bool(); + break; + case "session_metadata": + message.session_metadata = $root.tensorflow.SessionMetadata.decodeText(reader); + break; + case "optimize_for_static_graph": + message.optimize_for_static_graph = reader.bool(); + break; + case "enable_mlir_bridge": + message.enable_mlir_bridge = reader.bool(); + break; + case "mlir_bridge_rollout": + message.mlir_bridge_rollout = reader.enum($root.tensorflow.ConfigProto.Experimental.MlirBridgeRollout); + break; + case "enable_mlir_graph_optimization": + message.enable_mlir_graph_optimization = reader.bool(); + break; + case "disable_output_partition_graphs": + message.disable_output_partition_graphs = reader.bool(); + break; + case "xla_fusion_autotuner_thresh": + message.xla_fusion_autotuner_thresh = reader.int64(); + break; + case "use_tfrt": + message.use_tfrt = reader.bool(); + break; + case "enable_multi_host": + message.enable_multi_host = reader.bool(); + break; + case "backend_server_port": + message.backend_server_port = reader.int32(); + break; + case "target_tpu": + message.target_tpu = reader.bool(); + break; + case "target_gpu": + message.target_gpu = reader.bool(); + break; + case "stream_merge_threshold": + message.stream_merge_threshold = reader.int32(); + break; + case "disable_functional_ops_lowering": + message.disable_functional_ops_lowering = reader.bool(); + break; + case "xla_prefer_single_graph_cluster": + message.xla_prefer_single_graph_cluster = reader.bool(); + break; + case "coordination_config": + message.coordination_config = $root.tensorflow.CoordinationServiceConfig.decodeText(reader); + break; + case "disable_optimize_for_static_graph": + message.disable_optimize_for_static_graph = reader.bool(); + break; + case "disable_eager_executor_streaming_enqueue": + message.disable_eager_executor_streaming_enqueue = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.ConfigProto.Experimental.prototype.collective_group_leader = ""; +$root.tensorflow.ConfigProto.Experimental.prototype.executor_type = ""; +$root.tensorflow.ConfigProto.Experimental.prototype.recv_buf_max_chunk = 0; +$root.tensorflow.ConfigProto.Experimental.prototype.use_numa_affinity = false; +$root.tensorflow.ConfigProto.Experimental.prototype.collective_deterministic_sequential_execution = false; +$root.tensorflow.ConfigProto.Experimental.prototype.collective_nccl = false; +$root.tensorflow.ConfigProto.Experimental.prototype.share_session_state_in_clusterspec_propagation = false; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_thread_spinning = false; +$root.tensorflow.ConfigProto.Experimental.prototype.share_cluster_devices_in_session = false; +$root.tensorflow.ConfigProto.Experimental.prototype.session_metadata = null; +$root.tensorflow.ConfigProto.Experimental.prototype.optimize_for_static_graph = false; +$root.tensorflow.ConfigProto.Experimental.prototype.enable_mlir_bridge = false; +$root.tensorflow.ConfigProto.Experimental.prototype.mlir_bridge_rollout = 0; +$root.tensorflow.ConfigProto.Experimental.prototype.enable_mlir_graph_optimization = false; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_output_partition_graphs = false; +$root.tensorflow.ConfigProto.Experimental.prototype.xla_fusion_autotuner_thresh = protobuf.Int64.create(0); +$root.tensorflow.ConfigProto.Experimental.prototype.use_tfrt = false; +$root.tensorflow.ConfigProto.Experimental.prototype.enable_multi_host = false; +$root.tensorflow.ConfigProto.Experimental.prototype.backend_server_port = 0; +$root.tensorflow.ConfigProto.Experimental.prototype.target_tpu = false; +$root.tensorflow.ConfigProto.Experimental.prototype.target_gpu = false; +$root.tensorflow.ConfigProto.Experimental.prototype.stream_merge_threshold = 0; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_functional_ops_lowering = false; +$root.tensorflow.ConfigProto.Experimental.prototype.xla_prefer_single_graph_cluster = false; +$root.tensorflow.ConfigProto.Experimental.prototype.coordination_config = null; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_optimize_for_static_graph = false; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_eager_executor_streaming_enqueue = false; + +$root.tensorflow.ConfigProto.Experimental.MlirBridgeRollout = { + "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED": 0, + "MLIR_BRIDGE_ROLLOUT_ENABLED": 1, + "MLIR_BRIDGE_ROLLOUT_DISABLED": 2 +}; + +$root.tensorflow.RunOptions = class RunOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RunOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.trace_level = reader.int32(); + break; + case 2: + message.timeout_in_ms = reader.int64(); + break; + case 3: + message.inter_op_thread_pool = reader.int32(); + break; + case 5: + message.output_partition_graphs = reader.bool(); + break; + case 6: + message.debug_options = $root.tensorflow.DebugOptions.decode(reader, reader.uint32()); + break; + case 7: + message.report_tensor_allocations_upon_oom = reader.bool(); + break; + case 8: + message.experimental = $root.tensorflow.RunOptions.Experimental.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RunOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "trace_level": + message.trace_level = reader.enum($root.tensorflow.RunOptions.TraceLevel); + break; + case "timeout_in_ms": + message.timeout_in_ms = reader.int64(); + break; + case "inter_op_thread_pool": + message.inter_op_thread_pool = reader.int32(); + break; + case "output_partition_graphs": + message.output_partition_graphs = reader.bool(); + break; + case "debug_options": + message.debug_options = $root.tensorflow.DebugOptions.decodeText(reader); + break; + case "report_tensor_allocations_upon_oom": + message.report_tensor_allocations_upon_oom = reader.bool(); + break; + case "experimental": + message.experimental = $root.tensorflow.RunOptions.Experimental.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RunOptions.prototype.trace_level = 0; +$root.tensorflow.RunOptions.prototype.timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.RunOptions.prototype.inter_op_thread_pool = 0; +$root.tensorflow.RunOptions.prototype.output_partition_graphs = false; +$root.tensorflow.RunOptions.prototype.debug_options = null; +$root.tensorflow.RunOptions.prototype.report_tensor_allocations_upon_oom = false; +$root.tensorflow.RunOptions.prototype.experimental = null; + +$root.tensorflow.RunOptions.TraceLevel = { + "NO_TRACE": 0, + "SOFTWARE_TRACE": 1, + "HARDWARE_TRACE": 2, + "FULL_TRACE": 3 +}; + +$root.tensorflow.RunOptions.Experimental = class Experimental { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RunOptions.Experimental(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.collective_graph_key = reader.int64(); + break; + case 2: + message.use_run_handler_pool = reader.bool(); + break; + case 3: + message.run_handler_pool_options = $root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RunOptions.Experimental(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "collective_graph_key": + message.collective_graph_key = reader.int64(); + break; + case "use_run_handler_pool": + message.use_run_handler_pool = reader.bool(); + break; + case "run_handler_pool_options": + message.run_handler_pool_options = $root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RunOptions.Experimental.prototype.collective_graph_key = protobuf.Int64.create(0); +$root.tensorflow.RunOptions.Experimental.prototype.use_run_handler_pool = false; +$root.tensorflow.RunOptions.Experimental.prototype.run_handler_pool_options = null; + +$root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions = class RunHandlerPoolOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.priority = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "priority": + message.priority = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RunOptions.Experimental.RunHandlerPoolOptions.prototype.priority = protobuf.Int64.create(0); + +$root.tensorflow.RunMetadata = class RunMetadata { + + constructor() { + this.partition_graphs = []; + this.function_graphs = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.RunMetadata(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.step_stats = $root.tensorflow.StepStats.decode(reader, reader.uint32()); + break; + case 2: + message.cost_graph = $root.tensorflow.CostGraphDef.decode(reader, reader.uint32()); + break; + case 3: + message.partition_graphs.push($root.tensorflow.GraphDef.decode(reader, reader.uint32())); + break; + case 4: + message.function_graphs.push($root.tensorflow.RunMetadata.FunctionGraphs.decode(reader, reader.uint32())); + break; + case 5: + message.session_metadata = $root.tensorflow.SessionMetadata.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RunMetadata(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "step_stats": + message.step_stats = $root.tensorflow.StepStats.decodeText(reader); + break; + case "cost_graph": + message.cost_graph = $root.tensorflow.CostGraphDef.decodeText(reader); + break; + case "partition_graphs": + message.partition_graphs.push($root.tensorflow.GraphDef.decodeText(reader)); + break; + case "function_graphs": + message.function_graphs.push($root.tensorflow.RunMetadata.FunctionGraphs.decodeText(reader)); + break; + case "session_metadata": + message.session_metadata = $root.tensorflow.SessionMetadata.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RunMetadata.prototype.step_stats = null; +$root.tensorflow.RunMetadata.prototype.cost_graph = null; +$root.tensorflow.RunMetadata.prototype.session_metadata = null; + +$root.tensorflow.RunMetadata.FunctionGraphs = class FunctionGraphs { + + constructor() { + this.partition_graphs = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.RunMetadata.FunctionGraphs(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.partition_graphs.push($root.tensorflow.GraphDef.decode(reader, reader.uint32())); + break; + case 2: + message.pre_optimization_graph = $root.tensorflow.GraphDef.decode(reader, reader.uint32()); + break; + case 3: + message.post_optimization_graph = $root.tensorflow.GraphDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RunMetadata.FunctionGraphs(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "partition_graphs": + message.partition_graphs.push($root.tensorflow.GraphDef.decodeText(reader)); + break; + case "pre_optimization_graph": + message.pre_optimization_graph = $root.tensorflow.GraphDef.decodeText(reader); + break; + case "post_optimization_graph": + message.post_optimization_graph = $root.tensorflow.GraphDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RunMetadata.FunctionGraphs.prototype.pre_optimization_graph = null; +$root.tensorflow.RunMetadata.FunctionGraphs.prototype.post_optimization_graph = null; + +$root.tensorflow.TensorConnection = class TensorConnection { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorConnection(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.from_tensor = reader.string(); + break; + case 2: + message.to_tensor = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorConnection(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "from_tensor": + message.from_tensor = reader.string(); + break; + case "to_tensor": + message.to_tensor = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorConnection.prototype.from_tensor = ""; +$root.tensorflow.TensorConnection.prototype.to_tensor = ""; + +$root.tensorflow.CallableOptions = class CallableOptions { + + constructor() { + this.feed = []; + this.fetch = []; + this.target = []; + this.tensor_connection = []; + this.feed_devices = {}; + this.fetch_devices = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CallableOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.feed.push(reader.string()); + break; + case 2: + message.fetch.push(reader.string()); + break; + case 3: + message.target.push(reader.string()); + break; + case 4: + message.run_options = $root.tensorflow.RunOptions.decode(reader, reader.uint32()); + break; + case 5: + message.tensor_connection.push($root.tensorflow.TensorConnection.decode(reader, reader.uint32())); + break; + case 6: + reader.entry(message.feed_devices, () => reader.string(), () => reader.string()); + break; + case 7: + reader.entry(message.fetch_devices, () => reader.string(), () => reader.string()); + break; + case 8: + message.fetch_skip_sync = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CallableOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "feed": + reader.array(message.feed, () => reader.string()); + break; + case "fetch": + reader.array(message.fetch, () => reader.string()); + break; + case "target": + reader.array(message.target, () => reader.string()); + break; + case "run_options": + message.run_options = $root.tensorflow.RunOptions.decodeText(reader); + break; + case "tensor_connection": + message.tensor_connection.push($root.tensorflow.TensorConnection.decodeText(reader)); + break; + case "feed_devices": + reader.entry(message.feed_devices, () => reader.string(), () => reader.string()); + break; + case "fetch_devices": + reader.entry(message.fetch_devices, () => reader.string(), () => reader.string()); + break; + case "fetch_skip_sync": + message.fetch_skip_sync = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CallableOptions.prototype.run_options = null; +$root.tensorflow.CallableOptions.prototype.fetch_skip_sync = false; + +$root.tensorflow.CostGraphDef = class CostGraphDef { + + constructor() { + this.node = []; + this.cost = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CostGraphDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node.push($root.tensorflow.CostGraphDef.Node.decode(reader, reader.uint32())); + break; + case 2: + message.cost.push($root.tensorflow.CostGraphDef.AggregatedCost.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CostGraphDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node": + message.node.push($root.tensorflow.CostGraphDef.Node.decodeText(reader)); + break; + case "cost": + message.cost.push($root.tensorflow.CostGraphDef.AggregatedCost.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CostGraphDef.Node = class Node { + + constructor() { + this.input_info = []; + this.output_info = []; + this.control_input = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CostGraphDef.Node(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.device = reader.string(); + break; + case 3: + message.id = reader.int32(); + break; + case 4: + message.input_info.push($root.tensorflow.CostGraphDef.Node.InputInfo.decode(reader, reader.uint32())); + break; + case 5: + message.output_info.push($root.tensorflow.CostGraphDef.Node.OutputInfo.decode(reader, reader.uint32())); + break; + case 6: + message.temporary_memory_size = reader.int64(); + break; + case 12: + message.persistent_memory_size = reader.int64(); + break; + case 10: + message.host_temp_memory_size = reader.int64(); + break; + case 11: + message.device_temp_memory_size = reader.int64(); + break; + case 16: + message.device_persistent_memory_size = reader.int64(); + break; + case 9: + message.compute_cost = reader.int64(); + break; + case 14: + message.compute_time = reader.int64(); + break; + case 15: + message.memory_time = reader.int64(); + break; + case 7: + message.is_final = reader.bool(); + break; + case 8: + message.control_input = reader.array(message.control_input, () => reader.int32(), tag); + break; + case 17: + message.inaccurate = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CostGraphDef.Node(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "device": + message.device = reader.string(); + break; + case "id": + message.id = reader.int32(); + break; + case "input_info": + message.input_info.push($root.tensorflow.CostGraphDef.Node.InputInfo.decodeText(reader)); + break; + case "output_info": + message.output_info.push($root.tensorflow.CostGraphDef.Node.OutputInfo.decodeText(reader)); + break; + case "temporary_memory_size": + message.temporary_memory_size = reader.int64(); + break; + case "persistent_memory_size": + message.persistent_memory_size = reader.int64(); + break; + case "host_temp_memory_size": + message.host_temp_memory_size = reader.int64(); + break; + case "device_temp_memory_size": + message.device_temp_memory_size = reader.int64(); + break; + case "device_persistent_memory_size": + message.device_persistent_memory_size = reader.int64(); + break; + case "compute_cost": + message.compute_cost = reader.int64(); + break; + case "compute_time": + message.compute_time = reader.int64(); + break; + case "memory_time": + message.memory_time = reader.int64(); + break; + case "is_final": + message.is_final = reader.bool(); + break; + case "control_input": + reader.array(message.control_input, () => reader.int32()); + break; + case "inaccurate": + message.inaccurate = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CostGraphDef.Node.prototype.name = ""; +$root.tensorflow.CostGraphDef.Node.prototype.device = ""; +$root.tensorflow.CostGraphDef.Node.prototype.id = 0; +$root.tensorflow.CostGraphDef.Node.prototype.temporary_memory_size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.persistent_memory_size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.host_temp_memory_size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.device_temp_memory_size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.device_persistent_memory_size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.compute_cost = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.compute_time = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.memory_time = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.prototype.is_final = false; +$root.tensorflow.CostGraphDef.Node.prototype.inaccurate = false; + +$root.tensorflow.CostGraphDef.Node.InputInfo = class InputInfo { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.CostGraphDef.Node.InputInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.preceding_node = reader.int32(); + break; + case 2: + message.preceding_port = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CostGraphDef.Node.InputInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "preceding_node": + message.preceding_node = reader.int32(); + break; + case "preceding_port": + message.preceding_port = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CostGraphDef.Node.InputInfo.prototype.preceding_node = 0; +$root.tensorflow.CostGraphDef.Node.InputInfo.prototype.preceding_port = 0; + +$root.tensorflow.CostGraphDef.Node.OutputInfo = class OutputInfo { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.CostGraphDef.Node.OutputInfo(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.int64(); + break; + case 2: + message.alias_input_port = reader.int64(); + break; + case 3: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 4: + message.dtype = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CostGraphDef.Node.OutputInfo(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "size": + message.size = reader.int64(); + break; + case "alias_input_port": + message.alias_input_port = reader.int64(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CostGraphDef.Node.OutputInfo.prototype.size = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.OutputInfo.prototype.alias_input_port = protobuf.Int64.create(0); +$root.tensorflow.CostGraphDef.Node.OutputInfo.prototype.shape = null; +$root.tensorflow.CostGraphDef.Node.OutputInfo.prototype.dtype = 0; + +$root.tensorflow.CostGraphDef.AggregatedCost = class AggregatedCost { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.CostGraphDef.AggregatedCost(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cost = reader.float(); + break; + case 2: + message.dimension = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CostGraphDef.AggregatedCost(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "cost": + message.cost = reader.float(); + break; + case "dimension": + message.dimension = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CostGraphDef.AggregatedCost.prototype.cost = 0; +$root.tensorflow.CostGraphDef.AggregatedCost.prototype.dimension = ""; + +$root.tensorflow.AllocationRecord = class AllocationRecord { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.AllocationRecord(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alloc_micros = reader.int64(); + break; + case 2: + message.alloc_bytes = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AllocationRecord(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "alloc_micros": + message.alloc_micros = reader.int64(); + break; + case "alloc_bytes": + message.alloc_bytes = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AllocationRecord.prototype.alloc_micros = protobuf.Int64.create(0); +$root.tensorflow.AllocationRecord.prototype.alloc_bytes = protobuf.Int64.create(0); + +$root.tensorflow.AllocatorMemoryUsed = class AllocatorMemoryUsed { + + constructor() { + this.allocation_records = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.AllocatorMemoryUsed(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.allocator_name = reader.string(); + break; + case 2: + message.total_bytes = reader.int64(); + break; + case 3: + message.peak_bytes = reader.int64(); + break; + case 4: + message.live_bytes = reader.int64(); + break; + case 6: + message.allocation_records.push($root.tensorflow.AllocationRecord.decode(reader, reader.uint32())); + break; + case 5: + message.allocator_bytes_in_use = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AllocatorMemoryUsed(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "allocator_name": + message.allocator_name = reader.string(); + break; + case "total_bytes": + message.total_bytes = reader.int64(); + break; + case "peak_bytes": + message.peak_bytes = reader.int64(); + break; + case "live_bytes": + message.live_bytes = reader.int64(); + break; + case "allocation_records": + message.allocation_records.push($root.tensorflow.AllocationRecord.decodeText(reader)); + break; + case "allocator_bytes_in_use": + message.allocator_bytes_in_use = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AllocatorMemoryUsed.prototype.allocator_name = ""; +$root.tensorflow.AllocatorMemoryUsed.prototype.total_bytes = protobuf.Int64.create(0); +$root.tensorflow.AllocatorMemoryUsed.prototype.peak_bytes = protobuf.Int64.create(0); +$root.tensorflow.AllocatorMemoryUsed.prototype.live_bytes = protobuf.Int64.create(0); +$root.tensorflow.AllocatorMemoryUsed.prototype.allocator_bytes_in_use = protobuf.Int64.create(0); + +$root.tensorflow.NodeOutput = class NodeOutput { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.NodeOutput(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.slot = reader.int32(); + break; + case 3: + message.tensor_description = $root.tensorflow.TensorDescription.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NodeOutput(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "slot": + message.slot = reader.int32(); + break; + case "tensor_description": + message.tensor_description = $root.tensorflow.TensorDescription.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NodeOutput.prototype.slot = 0; +$root.tensorflow.NodeOutput.prototype.tensor_description = null; + +$root.tensorflow.MemoryStats = class MemoryStats { + + constructor() { + this.persistent_tensor_alloc_ids = []; + this.device_persistent_tensor_alloc_ids = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.MemoryStats(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.temp_memory_size = reader.int64(); + break; + case 3: + message.persistent_memory_size = reader.int64(); + break; + case 5: + message.persistent_tensor_alloc_ids = reader.array(message.persistent_tensor_alloc_ids, () => reader.int64(), tag); + break; + case 2: + message.device_temp_memory_size = reader.int64(); + break; + case 4: + message.device_persistent_memory_size = reader.int64(); + break; + case 6: + message.device_persistent_tensor_alloc_ids = reader.array(message.device_persistent_tensor_alloc_ids, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.MemoryStats(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "temp_memory_size": + message.temp_memory_size = reader.int64(); + break; + case "persistent_memory_size": + message.persistent_memory_size = reader.int64(); + break; + case "persistent_tensor_alloc_ids": + reader.array(message.persistent_tensor_alloc_ids, () => reader.int64()); + break; + case "device_temp_memory_size": + message.device_temp_memory_size = reader.int64(); + break; + case "device_persistent_memory_size": + message.device_persistent_memory_size = reader.int64(); + break; + case "device_persistent_tensor_alloc_ids": + reader.array(message.device_persistent_tensor_alloc_ids, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.MemoryStats.prototype.temp_memory_size = protobuf.Int64.create(0); +$root.tensorflow.MemoryStats.prototype.persistent_memory_size = protobuf.Int64.create(0); +$root.tensorflow.MemoryStats.prototype.device_temp_memory_size = protobuf.Int64.create(0); +$root.tensorflow.MemoryStats.prototype.device_persistent_memory_size = protobuf.Int64.create(0); + +$root.tensorflow.NodeExecStats = class NodeExecStats { + + constructor() { + this.memory = []; + this.output = []; + this.referenced_tensor = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.NodeExecStats(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_name = reader.string(); + break; + case 2: + message.all_start_micros = reader.int64(); + break; + case 3: + message.op_start_rel_micros = reader.int64(); + break; + case 4: + message.op_end_rel_micros = reader.int64(); + break; + case 5: + message.all_end_rel_micros = reader.int64(); + break; + case 6: + message.memory.push($root.tensorflow.AllocatorMemoryUsed.decode(reader, reader.uint32())); + break; + case 7: + message.output.push($root.tensorflow.NodeOutput.decode(reader, reader.uint32())); + break; + case 8: + message.timeline_label = reader.string(); + break; + case 9: + message.scheduled_micros = reader.int64(); + break; + case 10: + message.thread_id = reader.uint32(); + break; + case 11: + message.referenced_tensor.push($root.tensorflow.AllocationDescription.decode(reader, reader.uint32())); + break; + case 12: + message.memory_stats = $root.tensorflow.MemoryStats.decode(reader, reader.uint32()); + break; + case 13: + message.all_start_nanos = reader.int64(); + break; + case 14: + message.op_start_rel_nanos = reader.int64(); + break; + case 15: + message.op_end_rel_nanos = reader.int64(); + break; + case 16: + message.all_end_rel_nanos = reader.int64(); + break; + case 17: + message.scheduled_nanos = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.NodeExecStats(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_name": + message.node_name = reader.string(); + break; + case "all_start_micros": + message.all_start_micros = reader.int64(); + break; + case "op_start_rel_micros": + message.op_start_rel_micros = reader.int64(); + break; + case "op_end_rel_micros": + message.op_end_rel_micros = reader.int64(); + break; + case "all_end_rel_micros": + message.all_end_rel_micros = reader.int64(); + break; + case "memory": + message.memory.push($root.tensorflow.AllocatorMemoryUsed.decodeText(reader)); + break; + case "output": + message.output.push($root.tensorflow.NodeOutput.decodeText(reader)); + break; + case "timeline_label": + message.timeline_label = reader.string(); + break; + case "scheduled_micros": + message.scheduled_micros = reader.int64(); + break; + case "thread_id": + message.thread_id = reader.uint32(); + break; + case "referenced_tensor": + message.referenced_tensor.push($root.tensorflow.AllocationDescription.decodeText(reader)); + break; + case "memory_stats": + message.memory_stats = $root.tensorflow.MemoryStats.decodeText(reader); + break; + case "all_start_nanos": + message.all_start_nanos = reader.int64(); + break; + case "op_start_rel_nanos": + message.op_start_rel_nanos = reader.int64(); + break; + case "op_end_rel_nanos": + message.op_end_rel_nanos = reader.int64(); + break; + case "all_end_rel_nanos": + message.all_end_rel_nanos = reader.int64(); + break; + case "scheduled_nanos": + message.scheduled_nanos = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.NodeExecStats.prototype.node_name = ""; +$root.tensorflow.NodeExecStats.prototype.all_start_micros = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.op_start_rel_micros = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.op_end_rel_micros = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.all_end_rel_micros = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.timeline_label = ""; +$root.tensorflow.NodeExecStats.prototype.scheduled_micros = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.thread_id = 0; +$root.tensorflow.NodeExecStats.prototype.memory_stats = null; +$root.tensorflow.NodeExecStats.prototype.all_start_nanos = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.op_start_rel_nanos = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.op_end_rel_nanos = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.all_end_rel_nanos = protobuf.Int64.create(0); +$root.tensorflow.NodeExecStats.prototype.scheduled_nanos = protobuf.Int64.create(0); + +$root.tensorflow.DeviceStepStats = class DeviceStepStats { + + constructor() { + this.node_stats = []; + this.thread_names = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DeviceStepStats(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device = reader.string(); + break; + case 2: + message.node_stats.push($root.tensorflow.NodeExecStats.decode(reader, reader.uint32())); + break; + case 3: + reader.entry(message.thread_names, () => reader.uint32(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DeviceStepStats(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "device": + message.device = reader.string(); + break; + case "node_stats": + message.node_stats.push($root.tensorflow.NodeExecStats.decodeText(reader)); + break; + case "thread_names": + reader.entry(message.thread_names, () => reader.uint32(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DeviceStepStats.prototype.device = ""; + +$root.tensorflow.StepStats = class StepStats { + + constructor() { + this.dev_stats = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.StepStats(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dev_stats.push($root.tensorflow.DeviceStepStats.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.StepStats(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dev_stats": + message.dev_stats.push($root.tensorflow.DeviceStepStats.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AllocationDescription = class AllocationDescription { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.AllocationDescription(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.requested_bytes = reader.int64(); + break; + case 2: + message.allocated_bytes = reader.int64(); + break; + case 3: + message.allocator_name = reader.string(); + break; + case 4: + message.allocation_id = reader.int64(); + break; + case 5: + message.has_single_reference = reader.bool(); + break; + case 6: + message.ptr = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AllocationDescription(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "requested_bytes": + message.requested_bytes = reader.int64(); + break; + case "allocated_bytes": + message.allocated_bytes = reader.int64(); + break; + case "allocator_name": + message.allocator_name = reader.string(); + break; + case "allocation_id": + message.allocation_id = reader.int64(); + break; + case "has_single_reference": + message.has_single_reference = reader.bool(); + break; + case "ptr": + message.ptr = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AllocationDescription.prototype.requested_bytes = protobuf.Int64.create(0); +$root.tensorflow.AllocationDescription.prototype.allocated_bytes = protobuf.Int64.create(0); +$root.tensorflow.AllocationDescription.prototype.allocator_name = ""; +$root.tensorflow.AllocationDescription.prototype.allocation_id = protobuf.Int64.create(0); +$root.tensorflow.AllocationDescription.prototype.has_single_reference = false; +$root.tensorflow.AllocationDescription.prototype.ptr = protobuf.Uint64.create(0); + +$root.tensorflow.TensorDescription = class TensorDescription { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.TensorDescription(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 4: + message.allocation_description = $root.tensorflow.AllocationDescription.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.TensorDescription(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader); + break; + case "allocation_description": + message.allocation_description = $root.tensorflow.AllocationDescription.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.TensorDescription.prototype.dtype = 0; +$root.tensorflow.TensorDescription.prototype.shape = null; +$root.tensorflow.TensorDescription.prototype.allocation_description = null; + +$root.tensorflow.JobDef = class JobDef { + + constructor() { + this.tasks = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.JobDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.entry(message.tasks, () => reader.int32(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.JobDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "tasks": + reader.entry(message.tasks, () => reader.int32(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.JobDef.prototype.name = ""; + +$root.tensorflow.ClusterDef = class ClusterDef { + + constructor() { + this.job = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.ClusterDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.job.push($root.tensorflow.JobDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ClusterDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "job": + message.job.push($root.tensorflow.JobDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DebugTensorWatch = class DebugTensorWatch { + + constructor() { + this.debug_ops = []; + this.debug_urls = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DebugTensorWatch(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_name = reader.string(); + break; + case 2: + message.output_slot = reader.int32(); + break; + case 3: + message.debug_ops.push(reader.string()); + break; + case 4: + message.debug_urls.push(reader.string()); + break; + case 5: + message.tolerate_debug_op_creation_failures = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DebugTensorWatch(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "node_name": + message.node_name = reader.string(); + break; + case "output_slot": + message.output_slot = reader.int32(); + break; + case "debug_ops": + reader.array(message.debug_ops, () => reader.string()); + break; + case "debug_urls": + reader.array(message.debug_urls, () => reader.string()); + break; + case "tolerate_debug_op_creation_failures": + message.tolerate_debug_op_creation_failures = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DebugTensorWatch.prototype.node_name = ""; +$root.tensorflow.DebugTensorWatch.prototype.output_slot = 0; +$root.tensorflow.DebugTensorWatch.prototype.tolerate_debug_op_creation_failures = false; + +$root.tensorflow.DebugOptions = class DebugOptions { + + constructor() { + this.debug_tensor_watch_opts = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DebugOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 4: + message.debug_tensor_watch_opts.push($root.tensorflow.DebugTensorWatch.decode(reader, reader.uint32())); + break; + case 10: + message.global_step = reader.int64(); + break; + case 11: + message.reset_disk_byte_usage = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DebugOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "debug_tensor_watch_opts": + message.debug_tensor_watch_opts.push($root.tensorflow.DebugTensorWatch.decodeText(reader)); + break; + case "global_step": + message.global_step = reader.int64(); + break; + case "reset_disk_byte_usage": + message.reset_disk_byte_usage = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DebugOptions.prototype.global_step = protobuf.Int64.create(0); +$root.tensorflow.DebugOptions.prototype.reset_disk_byte_usage = false; + +$root.tensorflow.DebuggedSourceFile = class DebuggedSourceFile { + + constructor() { + this.lines = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DebuggedSourceFile(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.host = reader.string(); + break; + case 2: + message.file_path = reader.string(); + break; + case 3: + message.last_modified = reader.int64(); + break; + case 4: + message.bytes = reader.int64(); + break; + case 5: + message.lines.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DebuggedSourceFile(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "host": + message.host = reader.string(); + break; + case "file_path": + message.file_path = reader.string(); + break; + case "last_modified": + message.last_modified = reader.int64(); + break; + case "bytes": + message.bytes = reader.int64(); + break; + case "lines": + reader.array(message.lines, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.DebuggedSourceFile.prototype.host = ""; +$root.tensorflow.DebuggedSourceFile.prototype.file_path = ""; +$root.tensorflow.DebuggedSourceFile.prototype.last_modified = protobuf.Int64.create(0); +$root.tensorflow.DebuggedSourceFile.prototype.bytes = protobuf.Int64.create(0); + +$root.tensorflow.DebuggedSourceFiles = class DebuggedSourceFiles { + + constructor() { + this.source_files = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.DebuggedSourceFiles(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source_files.push($root.tensorflow.DebuggedSourceFile.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.DebuggedSourceFiles(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "source_files": + message.source_files.push($root.tensorflow.DebuggedSourceFile.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AutoParallelOptions = class AutoParallelOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.AutoParallelOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enable = reader.bool(); + break; + case 2: + message.num_replicas = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.AutoParallelOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "enable": + message.enable = reader.bool(); + break; + case "num_replicas": + message.num_replicas = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.AutoParallelOptions.prototype.enable = false; +$root.tensorflow.AutoParallelOptions.prototype.num_replicas = 0; + +$root.tensorflow.ScopedAllocatorOptions = class ScopedAllocatorOptions { + + constructor() { + this.enable_op = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.ScopedAllocatorOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enable_op.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.ScopedAllocatorOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "enable_op": + reader.array(message.enable_op, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RewriterConfig = class RewriterConfig { + + constructor() { + this.optimizers = []; + this.custom_optimizers = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.RewriterConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 50: + message.cpu_layout_conversion = reader.int32(); + break; + case 1: + message.layout_optimizer = reader.int32(); + break; + case 3: + message.constant_folding = reader.int32(); + break; + case 13: + message.shape_optimization = reader.int32(); + break; + case 14: + message.remapping = reader.int32(); + break; + case 24: + message.common_subgraph_elimination = reader.int32(); + break; + case 7: + message.arithmetic_optimization = reader.int32(); + break; + case 8: + message.dependency_optimization = reader.int32(); + break; + case 9: + message.loop_optimization = reader.int32(); + break; + case 10: + message.function_optimization = reader.int32(); + break; + case 11: + message.debug_stripper = reader.int32(); + break; + case 2: + message.disable_model_pruning = reader.bool(); + break; + case 15: + message.scoped_allocator_optimization = reader.int32(); + break; + case 18: + message.pin_to_host_optimization = reader.int32(); + break; + case 22: + message.implementation_selector = reader.int32(); + break; + case 23: + message.auto_mixed_precision = reader.int32(); + break; + case 25: + message.auto_mixed_precision_mkl = reader.int32(); + break; + case 31: + message.auto_mixed_precision_onednn_bfloat16 = reader.int32(); + break; + case 29: + message.auto_mixed_precision_cpu = reader.int32(); + break; + case 19: + message.disable_meta_optimizer = reader.bool(); + break; + case 32: + message.disable_tfg_optimizer = reader.bool(); + break; + case 28: + message.use_plugin_optimizers = reader.int32(); + break; + case 30: + message.experimental_conditional_code_motion = reader.int32(); + break; + case 12: + message.meta_optimizer_iterations = reader.int32(); + break; + case 17: + message.min_graph_nodes = reader.int32(); + break; + case 26: + message.experimental_disable_compressed_tensor_optimization = reader.bool(); + break; + case 27: + message.experimental_disable_folding_quantization_emulation = reader.bool(); + break; + case 4: + message.memory_optimization = reader.int32(); + break; + case 6: + message.memory_optimizer_target_node_name_scope = reader.string(); + break; + case 20: + message.meta_optimizer_timeout_ms = reader.int64(); + break; + case 5: + message.auto_parallel = $root.tensorflow.AutoParallelOptions.decode(reader, reader.uint32()); + break; + case 21: + message.fail_on_optimizer_errors = reader.bool(); + break; + case 16: + message.scoped_allocator_opts = $root.tensorflow.ScopedAllocatorOptions.decode(reader, reader.uint32()); + break; + case 100: + message.optimizers.push(reader.string()); + break; + case 200: + message.custom_optimizers.push($root.tensorflow.RewriterConfig.CustomGraphOptimizer.decode(reader, reader.uint32())); + break; + case 300: + message.inter_optimizer_verifier_config = $root.tensorflow.VerifierConfig.decode(reader, reader.uint32()); + break; + case 301: + message.post_optimization_verifier_config = $root.tensorflow.VerifierConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RewriterConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "cpu_layout_conversion": + message.cpu_layout_conversion = reader.enum($root.tensorflow.RewriterConfig.CpuLayout); + break; + case "layout_optimizer": + message.layout_optimizer = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "constant_folding": + message.constant_folding = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "shape_optimization": + message.shape_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "remapping": + message.remapping = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "common_subgraph_elimination": + message.common_subgraph_elimination = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "arithmetic_optimization": + message.arithmetic_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "dependency_optimization": + message.dependency_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "loop_optimization": + message.loop_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "function_optimization": + message.function_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "debug_stripper": + message.debug_stripper = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "disable_model_pruning": + message.disable_model_pruning = reader.bool(); + break; + case "scoped_allocator_optimization": + message.scoped_allocator_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "pin_to_host_optimization": + message.pin_to_host_optimization = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "implementation_selector": + message.implementation_selector = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "auto_mixed_precision": + message.auto_mixed_precision = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "auto_mixed_precision_mkl": + message.auto_mixed_precision_mkl = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "auto_mixed_precision_onednn_bfloat16": + message.auto_mixed_precision_onednn_bfloat16 = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "auto_mixed_precision_cpu": + message.auto_mixed_precision_cpu = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "disable_meta_optimizer": + message.disable_meta_optimizer = reader.bool(); + break; + case "disable_tfg_optimizer": + message.disable_tfg_optimizer = reader.bool(); + break; + case "use_plugin_optimizers": + message.use_plugin_optimizers = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "experimental_conditional_code_motion": + message.experimental_conditional_code_motion = reader.enum($root.tensorflow.RewriterConfig.Toggle); + break; + case "meta_optimizer_iterations": + message.meta_optimizer_iterations = reader.enum($root.tensorflow.RewriterConfig.NumIterationsType); + break; + case "min_graph_nodes": + message.min_graph_nodes = reader.int32(); + break; + case "experimental_disable_compressed_tensor_optimization": + message.experimental_disable_compressed_tensor_optimization = reader.bool(); + break; + case "experimental_disable_folding_quantization_emulation": + message.experimental_disable_folding_quantization_emulation = reader.bool(); + break; + case "memory_optimization": + message.memory_optimization = reader.enum($root.tensorflow.RewriterConfig.MemOptType); + break; + case "memory_optimizer_target_node_name_scope": + message.memory_optimizer_target_node_name_scope = reader.string(); + break; + case "meta_optimizer_timeout_ms": + message.meta_optimizer_timeout_ms = reader.int64(); + break; + case "auto_parallel": + message.auto_parallel = $root.tensorflow.AutoParallelOptions.decodeText(reader); + break; + case "fail_on_optimizer_errors": + message.fail_on_optimizer_errors = reader.bool(); + break; + case "scoped_allocator_opts": + message.scoped_allocator_opts = $root.tensorflow.ScopedAllocatorOptions.decodeText(reader); + break; + case "optimizers": + reader.array(message.optimizers, () => reader.string()); + break; + case "custom_optimizers": + message.custom_optimizers.push($root.tensorflow.RewriterConfig.CustomGraphOptimizer.decodeText(reader)); + break; + case "inter_optimizer_verifier_config": + message.inter_optimizer_verifier_config = $root.tensorflow.VerifierConfig.decodeText(reader); + break; + case "post_optimization_verifier_config": + message.post_optimization_verifier_config = $root.tensorflow.VerifierConfig.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RewriterConfig.prototype.cpu_layout_conversion = 0; +$root.tensorflow.RewriterConfig.prototype.layout_optimizer = 0; +$root.tensorflow.RewriterConfig.prototype.constant_folding = 0; +$root.tensorflow.RewriterConfig.prototype.shape_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.remapping = 0; +$root.tensorflow.RewriterConfig.prototype.common_subgraph_elimination = 0; +$root.tensorflow.RewriterConfig.prototype.arithmetic_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.dependency_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.loop_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.function_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.debug_stripper = 0; +$root.tensorflow.RewriterConfig.prototype.disable_model_pruning = false; +$root.tensorflow.RewriterConfig.prototype.scoped_allocator_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.pin_to_host_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.implementation_selector = 0; +$root.tensorflow.RewriterConfig.prototype.auto_mixed_precision = 0; +$root.tensorflow.RewriterConfig.prototype.auto_mixed_precision_mkl = 0; +$root.tensorflow.RewriterConfig.prototype.auto_mixed_precision_onednn_bfloat16 = 0; +$root.tensorflow.RewriterConfig.prototype.auto_mixed_precision_cpu = 0; +$root.tensorflow.RewriterConfig.prototype.disable_meta_optimizer = false; +$root.tensorflow.RewriterConfig.prototype.disable_tfg_optimizer = false; +$root.tensorflow.RewriterConfig.prototype.use_plugin_optimizers = 0; +$root.tensorflow.RewriterConfig.prototype.experimental_conditional_code_motion = 0; +$root.tensorflow.RewriterConfig.prototype.meta_optimizer_iterations = 0; +$root.tensorflow.RewriterConfig.prototype.min_graph_nodes = 0; +$root.tensorflow.RewriterConfig.prototype.experimental_disable_compressed_tensor_optimization = false; +$root.tensorflow.RewriterConfig.prototype.experimental_disable_folding_quantization_emulation = false; +$root.tensorflow.RewriterConfig.prototype.memory_optimization = 0; +$root.tensorflow.RewriterConfig.prototype.memory_optimizer_target_node_name_scope = ""; +$root.tensorflow.RewriterConfig.prototype.meta_optimizer_timeout_ms = protobuf.Int64.create(0); +$root.tensorflow.RewriterConfig.prototype.auto_parallel = null; +$root.tensorflow.RewriterConfig.prototype.fail_on_optimizer_errors = false; +$root.tensorflow.RewriterConfig.prototype.scoped_allocator_opts = null; +$root.tensorflow.RewriterConfig.prototype.inter_optimizer_verifier_config = null; +$root.tensorflow.RewriterConfig.prototype.post_optimization_verifier_config = null; + +$root.tensorflow.RewriterConfig.Toggle = { + "DEFAULT": 0, + "ON": 1, + "OFF": 2, + "AGGRESSIVE": 3, + "EXPERIMENTAL_MLIR": 4, + "EXPERIMENTAL_BOTH": 5 +}; + +$root.tensorflow.RewriterConfig.CpuLayout = { + "NO_CONVERSION_ON_CPU": 0, + "NCHW_TO_NHWC": 1, + "NHWC_TO_NCHW": 2 +}; + +$root.tensorflow.RewriterConfig.NumIterationsType = { + "DEFAULT_NUM_ITERS": 0, + "ONE": 1, + "TWO": 2 +}; + +$root.tensorflow.RewriterConfig.MemOptType = { + "DEFAULT_MEM_OPT": 0, + "NO_MEM_OPT": 1, + "MANUAL": 2, + "SWAPPING_HEURISTICS": 4, + "RECOMPUTATION_HEURISTICS": 5, + "SCHEDULING_HEURISTICS": 6, + "HEURISTICS": 3 +}; + +$root.tensorflow.RewriterConfig.CustomGraphOptimizer = class CustomGraphOptimizer { + + constructor() { + this.parameter_map = {}; + } + + static decode(reader, length) { + const message = new $root.tensorflow.RewriterConfig.CustomGraphOptimizer(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.entry(message.parameter_map, () => reader.string(), () => $root.tensorflow.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RewriterConfig.CustomGraphOptimizer(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "parameter_map": + reader.entry(message.parameter_map, () => reader.string(), () => $root.tensorflow.AttrValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RewriterConfig.CustomGraphOptimizer.prototype.name = ""; + +$root.tensorflow.VerifierConfig = class VerifierConfig { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.VerifierConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.verification_timeout_in_ms = reader.int64(); + break; + case 2: + message.structure_verifier = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.VerifierConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "verification_timeout_in_ms": + message.verification_timeout_in_ms = reader.int64(); + break; + case "structure_verifier": + message.structure_verifier = reader.enum($root.tensorflow.VerifierConfig.Toggle); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.VerifierConfig.prototype.verification_timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.VerifierConfig.prototype.structure_verifier = 0; + +$root.tensorflow.VerifierConfig.Toggle = { + "DEFAULT": 0, + "ON": 1, + "OFF": 2 +}; + +$root.tensorflow.dummy = {}; + +$root.tensorflow.RPCOptions = class RPCOptions { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.RPCOptions(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.use_rpc_for_inprocess_master = reader.bool(); + break; + case 2: + message.compression_algorithm = reader.string(); + break; + case 3: + message.compression_level = reader.int32(); + break; + case 4: + message.cache_rpc_response = reader.bool(); + break; + case 5: + message.disable_session_connection_sharing = reader.bool(); + break; + case 6: + message.num_channels_per_target = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.RPCOptions(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "use_rpc_for_inprocess_master": + message.use_rpc_for_inprocess_master = reader.bool(); + break; + case "compression_algorithm": + message.compression_algorithm = reader.string(); + break; + case "compression_level": + message.compression_level = reader.int32(); + break; + case "cache_rpc_response": + message.cache_rpc_response = reader.bool(); + break; + case "disable_session_connection_sharing": + message.disable_session_connection_sharing = reader.bool(); + break; + case "num_channels_per_target": + message.num_channels_per_target = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.RPCOptions.prototype.use_rpc_for_inprocess_master = false; +$root.tensorflow.RPCOptions.prototype.compression_algorithm = ""; +$root.tensorflow.RPCOptions.prototype.compression_level = 0; +$root.tensorflow.RPCOptions.prototype.cache_rpc_response = false; +$root.tensorflow.RPCOptions.prototype.disable_session_connection_sharing = false; +$root.tensorflow.RPCOptions.prototype.num_channels_per_target = 0; + +$root.tensorflow.CoordinatedJob = class CoordinatedJob { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.CoordinatedJob(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.num_tasks = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CoordinatedJob(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "num_tasks": + message.num_tasks = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CoordinatedJob.prototype.name = ""; +$root.tensorflow.CoordinatedJob.prototype.num_tasks = 0; + +$root.tensorflow.CoordinationServiceConfig = class CoordinationServiceConfig { + + constructor() { + this.coordinated_job_list = []; + this.recoverable_jobs = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.CoordinationServiceConfig(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.service_type = reader.string(); + break; + case 2: + message.service_leader = reader.string(); + break; + case 3: + message.enable_health_check = reader.bool(); + break; + case 4: + message.cluster_register_timeout_in_ms = reader.int64(); + break; + case 5: + message.heartbeat_timeout_in_ms = reader.int64(); + break; + case 10: + message.coordinated_job_list.push($root.tensorflow.CoordinatedJob.decode(reader, reader.uint32())); + break; + case 7: + message.shutdown_barrier_timeout_in_ms = reader.int64(); + break; + case 8: + message.agent_destruction_without_shutdown = reader.bool(); + break; + case 9: + message.recoverable_jobs.push(reader.string()); + break; + case 11: + message.allow_new_incarnation_to_reconnect = reader.bool(); + break; + case 12: + message.force_disable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.CoordinationServiceConfig(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "service_type": + message.service_type = reader.string(); + break; + case "service_leader": + message.service_leader = reader.string(); + break; + case "enable_health_check": + message.enable_health_check = reader.bool(); + break; + case "cluster_register_timeout_in_ms": + message.cluster_register_timeout_in_ms = reader.int64(); + break; + case "heartbeat_timeout_in_ms": + message.heartbeat_timeout_in_ms = reader.int64(); + break; + case "coordinated_job_list": + message.coordinated_job_list.push($root.tensorflow.CoordinatedJob.decodeText(reader)); + break; + case "shutdown_barrier_timeout_in_ms": + message.shutdown_barrier_timeout_in_ms = reader.int64(); + break; + case "agent_destruction_without_shutdown": + message.agent_destruction_without_shutdown = reader.bool(); + break; + case "recoverable_jobs": + reader.array(message.recoverable_jobs, () => reader.string()); + break; + case "allow_new_incarnation_to_reconnect": + message.allow_new_incarnation_to_reconnect = reader.bool(); + break; + case "force_disable": + message.force_disable = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.CoordinationServiceConfig.prototype.service_type = ""; +$root.tensorflow.CoordinationServiceConfig.prototype.service_leader = ""; +$root.tensorflow.CoordinationServiceConfig.prototype.enable_health_check = false; +$root.tensorflow.CoordinationServiceConfig.prototype.cluster_register_timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.CoordinationServiceConfig.prototype.heartbeat_timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.CoordinationServiceConfig.prototype.shutdown_barrier_timeout_in_ms = protobuf.Int64.create(0); +$root.tensorflow.CoordinationServiceConfig.prototype.agent_destruction_without_shutdown = false; +$root.tensorflow.CoordinationServiceConfig.prototype.allow_new_incarnation_to_reconnect = false; +$root.tensorflow.CoordinationServiceConfig.prototype.force_disable = false; + +$root.tensorflow.MemmappedFileSystemDirectoryElement = class MemmappedFileSystemDirectoryElement { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.MemmappedFileSystemDirectoryElement(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.offset = reader.uint64(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.length = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.MemmappedFileSystemDirectoryElement(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "offset": + message.offset = reader.uint64(); + break; + case "name": + message.name = reader.string(); + break; + case "length": + message.length = reader.uint64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.MemmappedFileSystemDirectoryElement.prototype.offset = protobuf.Uint64.create(0); +$root.tensorflow.MemmappedFileSystemDirectoryElement.prototype.name = ""; +$root.tensorflow.MemmappedFileSystemDirectoryElement.prototype.length = protobuf.Uint64.create(0); + +$root.tensorflow.MemmappedFileSystemDirectory = class MemmappedFileSystemDirectory { + + constructor() { + this.element = []; + } + + static decode(reader, length) { + const message = new $root.tensorflow.MemmappedFileSystemDirectory(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.element.push($root.tensorflow.MemmappedFileSystemDirectoryElement.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.MemmappedFileSystemDirectory(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "element": + message.element.push($root.tensorflow.MemmappedFileSystemDirectoryElement.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FingerprintDef = class FingerprintDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.tensorflow.FingerprintDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.saved_model_checksum = reader.uint64(); + break; + case 2: + message.graph_def_program_hash = reader.uint64(); + break; + case 3: + message.signature_def_hash = reader.uint64(); + break; + case 4: + message.saved_object_graph_hash = reader.uint64(); + break; + case 5: + message.checkpoint_hash = reader.uint64(); + break; + case 6: + message.version = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.tensorflow.FingerprintDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "saved_model_checksum": + message.saved_model_checksum = reader.uint64(); + break; + case "graph_def_program_hash": + message.graph_def_program_hash = reader.uint64(); + break; + case "signature_def_hash": + message.signature_def_hash = reader.uint64(); + break; + case "saved_object_graph_hash": + message.saved_object_graph_hash = reader.uint64(); + break; + case "checkpoint_hash": + message.checkpoint_hash = reader.uint64(); + break; + case "version": + message.version = $root.tensorflow.VersionDef.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.tensorflow.FingerprintDef.prototype.saved_model_checksum = protobuf.Uint64.create(0); +$root.tensorflow.FingerprintDef.prototype.graph_def_program_hash = protobuf.Uint64.create(0); +$root.tensorflow.FingerprintDef.prototype.signature_def_hash = protobuf.Uint64.create(0); +$root.tensorflow.FingerprintDef.prototype.saved_object_graph_hash = protobuf.Uint64.create(0); +$root.tensorflow.FingerprintDef.prototype.checkpoint_hash = protobuf.Uint64.create(0); +$root.tensorflow.FingerprintDef.prototype.version = null; + +$root.google = {}; + +$root.google.protobuf = {}; + +$root.google.protobuf.Any = class Any { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.google.protobuf.Any(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_url = reader.string(); + break; + case 2: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + return reader.any(() => new $root.google.protobuf.Any()); + } +}; + +$root.google.protobuf.Any.prototype.type_url = ""; +$root.google.protobuf.Any.prototype.value = new Uint8Array([]); + +$root.google.protobuf.BoolValue = class BoolValue { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.google.protobuf.BoolValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.google.protobuf.BoolValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.google.protobuf.BoolValue.prototype.value = false; diff --git a/tf.js b/tf.js new file mode 100644 index 00000000000..2d3be60054b --- /dev/null +++ b/tf.js @@ -0,0 +1,2448 @@ + +// Experimental + +import * as base from './base.js'; +import * as protobuf from './protobuf.js'; +import * as zip from './zip.js'; + +const tf = {}; + +tf.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'pbtxt' || extension === 'prototxt' || extension === 'pt' || extension === 'txt') { + if (identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') || + identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) { + return undefined; + } + const tags = context.tags('pbtxt'); + if (['input_stream', 'output_stream', 'input_side_packet', 'output_side_packet'].some((key) => tags.has(key) || tags.has(`node.${key}`))) { + return undefined; + } + if (tags.has('saved_model_schema_version') || tags.has('meta_graphs')) { + return 'tf.pbtxt.SavedModel'; + } + if (tags.has('graph_def')) { + return 'tf.pbtxt.MetaGraphDef'; + } + if (tags.has('node')) { + return 'tf.pbtxt.GraphDef'; + } + } + if (extension === 'pb' || extension === 'pbtxt' || extension === 'prototxt' || extension === 'graphdef' || extension === 'meta') { + if (identifier.endsWith('predict_net.pb') || identifier.endsWith('init_net.pb')) { + return undefined; + } + if (identifier == 'tfhub_module.pb') { + const stream = context.stream; + const signature = [ 0x08, 0x03 ]; + if (signature.length === stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return undefined; + } + } + const tags = context.tags('pb'); + if (tags.size > 0) { + if (Array.from(tags).every(([key, value]) => key < 8 && value !== 5)) { + const match = (tags, schema) => { + for (const [key, inner] of schema) { + const value = tags[key]; + if (value === undefined) { + continue; + } + if (inner === false) { + return false; + } + if (Array.isArray(inner)) { + if (typeof value !== 'object' || !match(value, inner)) { + return false; + } + } else if (inner !== value) { + if (inner === 2 && !Array.isArray(value) && Object(value) === (value) && Object.keys(value).length === 0) { + return true; + } + return false; + } + } + return true; + }; + const signatureGraphDef = [ + [1 /* node */, [ + [1 /* name */, 2], + [2 /* op */, 2], + [3 /* input */, 2], + [4 /* device */,2], + [5 /* attr */, [ + [1,2], + [2,[]] + ]], + [6 /* experimental_debug_info */, []] + ]], + [2 /* library */, []], + [3 /* version */, 0], + [4 /* versions */, [[1,0],[2,0]]] + ]; + const signatureMetaGraphDef = [ + [1 /* meta_info_def */, [[1,2],[2,[]],[3,[]],/* [4,2], */[6,2],[7,0],[8,[]]]], + [2 /* graph_def */, signatureGraphDef], + [3 /* saver_def */, [[1,2],[2,2],[3,2],[4,0],[5,0],[6,5],[7,0]]], + [4 /* collection_def */,[]], + [5 /* signature_def */, []], + [6 /* asset_file_def */, []], + [7 /* object_graph_def */, []] + ]; + const signatureSavedModel = [[1,0],[2,signatureMetaGraphDef]]; + // optimization_guide.proto.PageTopicsOverrideList + if (identifier === 'override_list.pb' && tags.size === 1 && tags.get(1) === 2) { + return undefined; + } + if (tags.size === 1 && tags.get(1) === 2) { + const tags = context.tags('pb+'); + // mediapipe.BoxDetectorIndex + if (match(tags, [[1,[[1,[[1,[[1,5],[2,5],[3,5],[4,5],[6,0],[7,5],[8,5],[10,5],[11,0],[12,0]]],[2,5],[3,[]]]],[2,false],[3,false],[4,false],[5,false]]],[2,false],[3,false]])) { + return undefined; + } + // third_party.tensorflow.python.keras.protobuf.SavedMetadata + if (match(tags, [[1,[[1,[[1,0],[2,0]]],[2,0],[3,2],[4,2],[5,2]]]])) { + return undefined; + } + } + if ((!tags.has(1) || tags.get(1) === 0) && tags.get(2) === 2) { + const tags = context.tags('pb+'); + if (match(tags, signatureSavedModel)) { + return 'tf.pb.SavedModel'; + } + } + if ((!tags.has(1) || tags.get(1) === 2) && + (!tags.has(2) || tags.get(2) === 2) && + (!tags.has(3) || tags.get(3) === 2) && + (!tags.has(4) || tags.get(4) === 2)) { + const tags = context.tags('pb+'); + if (match(tags, signatureMetaGraphDef)) { + return 'tf.pb.MetaGraphDef'; + } + } + if (tags.get(1) !== 2) { + const tags = context.tags('pb+'); + if (match(tags, signatureGraphDef)) { + return 'tf.pb.GraphDef'; + } + } + // tensorflow.FingerprintDef + if (identifier === 'fingerprint.pb' && + tags.get(1) === 0 && tags.get(2) === 0 && + tags.get(3) === 0 && tags.get(5) === 0 && tags.get(6) === 2) { + return 'tf.pb.FingerprintDef'; + } + const decode = (buffer, value) => { + const reader = protobuf.BinaryReader.open(buffer); + const length = reader.length; + while (reader.position < length) { + const tag = reader.uint32(); + const number = tag >>> 3; + const type = tag & 7; + if (value === number) { + return type === 2 ? reader.bytes() : null; + } + reader.skipType(type); + } + return null; + }; + const stream = context.stream; + const buffer = stream.peek(); + const nodeBuffer = decode(buffer, 1); + if (nodeBuffer) { + const nameBuffer = decode(nodeBuffer, 1); + if (nameBuffer) { + const decoder = new TextDecoder('utf-8'); + const name = decoder.decode(nameBuffer); + if (Array.from(name).filter((c) => c <= ' ').length < 256) { + return 'tf.pb.GraphDef'; + } + } + } + } + } else { + const tags = context.tags('pbtxt'); + if (['input_stream', 'output_stream', 'input_side_packet', 'output_side_packet'].some((key) => tags.has(key) || tags.has(`node.${key}`))) { + return undefined; + } + if (tags.has('node')) { + return 'tf.pbtxt.GraphDef'; + } + if (tags.has('graph_def')) { + return 'tf.pbtxt.MetaGraphDef'; + } + if (tags.has('saved_model_schema_version') || tags.has('meta_graphs')) { + return 'tf.pbtxt.SavedModel'; + } + } + } + if (extension === 'json') { + for (const type of [ 'json', 'json.gz' ]) { + const obj = context.peek(type); + if (obj && obj.modelTopology && (obj.format === 'graph-model' || Array.isArray(obj.modelTopology.node))) { + return `tf.${type}`; + } + } + } + if (extension === 'index' || extension === 'ckpt') { + const stream = context.stream; + if (stream.length > 8) { + stream.seek(-8); + const buffer = stream.read(8); + stream.seek(0); + const signature = [ 0x57, 0xfb, 0x80, 0x8b, 0x24, 0x75, 0x47, 0xdb ]; + if (buffer.every((value, index) => value === signature[index])) { + return 'tf.bundle'; + } + } + } + if (/.data-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]$/.exec(identifier)) { + return 'tf.data'; + } + if (/^events.out.tfevents./.exec(identifier)) { + const stream = context.stream; + if (tf.EventFileReader.open(stream)) { + return 'tf.events'; + } + } + if (extension === 'pbmm') { + const stream = context.stream; + if (stream.length > 8) { + stream.seek(-8); + const buffer = stream.read(8); + stream.seek(0); + const reader = new base.BinaryReader(buffer); + const offset = reader.uint64(); + if (offset < stream.length) { + return 'tf.pb.mmap'; + } + } + } + return undefined; + } + + async open(context, target) { + await context.require('./tf-proto'); + tf.proto = protobuf.get('tf'); + const openModel = async (saved_model, format, producer, bundle) => { + const metadata = await context.metadata('tf-metadata.json'); + return new tf.Model(metadata, saved_model, format, producer, bundle); + }; + const openSavedModel = async (context, saved_model, format, producer) => { + if (format === '') { + format = 'TensorFlow Saved Model'; + if (saved_model && saved_model.saved_model_schema_version) { + format = `${format} v${saved_model.saved_model_schema_version}`; + } + } + if (saved_model.meta_graphs.length === 1 && + saved_model.meta_graphs[0].object_graph_def && + saved_model.meta_graphs[0].object_graph_def.nodes && + saved_model.meta_graphs[0].object_graph_def.nodes.length > 0) { + const identifier = 'variables/variables.index'; + try { + const content = await context.fetch(identifier); + const stream = content.stream; + const bundle = await tf.TensorBundle.open(stream, identifier, context); + return openModel(saved_model, format, producer, bundle); + } catch (error) { + return openModel(saved_model, format, producer, null); + } + } + if (saved_model && Array.isArray(saved_model.meta_graphs) && saved_model.meta_graphs.length > 0 && + saved_model.meta_graphs[0].meta_info_def && + Object.prototype.hasOwnProperty.call(saved_model.meta_graphs[0].meta_info_def, 'tensorflow_version')) { + producer = `TensorFlow v${saved_model.meta_graphs[0].meta_info_def.tensorflow_version}`; + } + return openModel(saved_model, format, producer, null); + }; + const openBundle = async (context, stream, identifier) => { + stream = stream || context.stream; + identifier = identifier || context.identifier; + try { + const bundle = await tf.TensorBundle.open(stream, identifier, context); + return openModel(null, `TensorFlow Tensor Bundle v${bundle.format}`, null, bundle); + } catch (error) { + context.exception(error, false); + throw error; + } + }; + const openData = async (context) => { + const identifier = context.identifier; + const base = identifier.split('.'); + base.pop(); + const file = `${base.join('.')}.index`; + try { + const content = await context.fetch(file); + const stream = content.stream; + return openBundle(context, stream, file); + } catch (error) { + const file = `${base.join('.')}.ckpt`; + const content = await context.fetch(file); + const stream = content.stream; + return openBundle(context, stream, file); + } + }; + const openEventFile = async (context) => { + let format = 'TensorFlow Event File'; + let producer = null; + const stream = context.stream; + const eventFileReader = tf.EventFileReader.open(stream); + const saved_model = new tf.proto.tensorflow.SavedModel(); + const run_metadata = []; + const summaries = []; + for (;;) { + const event = eventFileReader.read(); + if (!event) { + break; + } + switch (event.what) { + case 'file_version': { + const formats = new Map([ + [ 'brain.Event:1', 'TensorFlow Event File v1' ], + [ 'brain.Event:2', 'TensorFlow Event File v2' ] + ]); + if (!formats.has(event.file_version)) { + throw new tf.Error(`Unsupported event file version '${event.file_version}'.`); + } + format = formats.get(event.file_version); + break; + } + case 'graph_def': { + const buffer = event.graph_def; + const reader = protobuf.BinaryReader.open(buffer); + const graph_def = tf.proto.tensorflow.GraphDef.decode(reader); + const meta_graph_def = new tf.proto.tensorflow.MetaGraphDef(); + meta_graph_def.meta_info_def = new tf.proto.tensorflow.MetaGraphDef.MetaInfoDef(); + meta_graph_def.meta_info_def.any_info = event.wall_time.toString(); + meta_graph_def.graph_def = graph_def; + saved_model.meta_graphs.push(meta_graph_def); + break; + } + case 'meta_graph_def': { + const buffer = event.meta_graph_def; + const reader = protobuf.BinaryReader.open(buffer); + const meta_graph_def = tf.proto.tensorflow.MetaGraphDef.decode(reader); + saved_model.meta_graphs.push(meta_graph_def); + break; + } + case 'summary': { + for (const value of event.summary.value) { + summaries.push(value); + } + break; + } + case 'tagged_run_metadata': { + const entry = event.tagged_run_metadata; + const buffer = entry.run_metadata; + const reader = protobuf.BinaryReader.open(buffer); + const metadata = tf.proto.tensorflow.RunMetadata.decode(reader); + run_metadata.push(metadata); + break; + } + default: { + throw new tf.Error(`Unsupported event type '${event.what}'.`); + } + } + } + if (saved_model.meta_graphs.every((meta_graph) => meta_graph.graph_def.node.every((node) => node.op.startsWith('aten::') || node.op.startsWith('prim::') || node.op.startsWith('quantized::') || node.op === 'IO Node'))) { + producer = 'PyTorch'; + const openPyTorchMetadata = async (context, saved_model) => { + try { + const data = await context.request('pytorch-metadata.json'); + const metadata = new Map(); + for (const item of JSON.parse(data)) { + const name = item.name; + if (name.indexOf('::') !== -1) { + const index = name.indexOf('.'); + const key = (index !== -1) ? name.substring(0, index) : name; + if (!metadata.has(key)) { + metadata.set(key, []); + } + metadata.get(key).push(item); + } + } + for (const graph of saved_model.meta_graphs) { + for (const node of graph.graph_def.node) { + node.__metadata__ = Array.from(metadata.get(node.op) || []); + } + } + return saved_model; + } catch (error) { + return saved_model; + } + }; + const updated_saved_model = await openPyTorchMetadata(context, saved_model); + return openModel(updated_saved_model, format, producer, null); + } + return openSavedModel(context, saved_model, format, producer); + }; + const openJson = async (context, type) => { + try { + const obj = context.peek(type); + const format = `TensorFlow.js ${obj.format || 'graph-model'}`; + const producer = obj.convertedBy || obj.generatedBy || ''; + const meta_graph = new tf.proto.tensorflow.MetaGraphDef(); + meta_graph.graph_def = tf.JsonReader.decodeGraphDef(obj.modelTopology); + const saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + const nodes = new Map(); + for (const node of meta_graph.graph_def.node) { + node.input = node.input || []; + if (node.op === 'Const') { + nodes.set(node.name, node); + } + } + const shards = new Map(); + const manifests = Array.isArray(obj.weightsManifest) ? obj.weightsManifest : []; + for (const manifest of manifests) { + for (const path of manifest.paths) { + if (!shards.has(path)) { + shards.set(path, context.fetch(path)); + } + } + } + const openShards = (shards) => { + const dtype_size_map = new Map([ + [ 'float16', 2 ], [ 'float32', 4 ], [ 'float64', 8 ], + [ 'int8', 1 ], [ 'int16', 2 ], [ 'int32', 4 ], [ 'int64', 8 ], + [ 'uint8', 1 ], [ 'uint16', 2 ], [ 'uint32', 4 ], [ 'uint64', 8 ], + [ 'bool', 1 ] + ]); + for (const manifest of manifests) { + let buffer = null; + if (Array.isArray(manifest.paths) && manifest.paths.length > 0 && manifest.paths.every((path) => shards.has(path))) { + const list = manifest.paths.map((path) => shards.get(path)); + const size = list.reduce((a, b) => a + b.length, 0); + buffer = new Uint8Array(size); + let offset = 0; + for (const item of list) { + buffer.set(item, offset); + offset += item.length; + } + } + let offset = 0; + for (const weight of manifest.weights) { + const dtype = weight.quantization && weight.quantization.dtype ? weight.quantization.dtype : weight.dtype; + const size = weight.shape.reduce((a, b) => a * b, 1); + switch (dtype) { + case 'string': { + const data = []; + if (buffer && size > 0) { + const reader = new tf.BinaryReader(buffer.subarray(offset)); + for (let i = 0; i < size; i++) { + data[i] = reader.string(); + } + offset += reader.position; + } + if (nodes.has(weight.name)) { + const node = nodes.get(weight.name); + node.attr.value.tensor.dtype = tf.Utility.dataTypeKey(dtype); + node.attr.value.tensor.string_val = data; + } + break; + } + default: { + if (!dtype_size_map.has(dtype)) { + throw new tf.Error(`Unsupported weight data type size '${dtype}'.`); + } + const itemsize = dtype_size_map.get(dtype); + const length = itemsize * size; + const tensor_content = buffer ? buffer.slice(offset, offset + length) : null; + offset += length; + if (nodes.has(weight.name)) { + const node = nodes.get(weight.name); + node.attr.value.tensor.dtype = tf.Utility.dataTypeKey(dtype); + node.attr.value.tensor.tensor_content = tensor_content; + } + break; + } + } + } + } + return openSavedModel(context, saved_model, format, producer); + }; + try { + const contexts = await Promise.all(shards.values()); + for (const key of shards.keys()) { + const context = contexts.shift(); + const buffer = context.stream.peek(); + shards.set(key, buffer); + } + if (type === 'json.gz') { + try { + for (const key of shards.keys()) { + const stream = shards.get(key); + /* eslint-disable no-await-in-loop */ + const archive = zip.Archive.open(stream, 'gzip'); + /* eslint-enable no-await-in-loop */ + if (archive && archive.entries.size === 1) { + const stream = archive.entries.values().next().value; + const buffer = stream.peek(); + shards.set(key, buffer); + } + } + } catch (error) { + // continue regardless of error + } + } + return openShards(shards); + } catch (error) { + shards.clear(); + return openShards(shards); + } + } catch (error) { + throw new tf.Error(`File text format is not TensorFlow.js graph-model (${error.message}).`); + } + }; + const openTextGraphDef = (context) => { + try { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + const graph_def = tf.proto.tensorflow.GraphDef.decodeText(reader); + const meta_graph = new tf.proto.tensorflow.MetaGraphDef(); + meta_graph.graph_def = graph_def; + const saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + const format = 'TensorFlow Graph'; + return openSavedModel(context, saved_model, format, null); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(`File text format is not tensorflow.GraphDef (${message.replace(/\.$/, '')}).`); + } + }; + const openTextMetaGraphDef = (context) => { + try { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + const meta_graph = tf.proto.tensorflow.MetaGraphDef.decodeText(reader); + const saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + const format = 'TensorFlow MetaGraph'; + return openSavedModel(context, saved_model, format, null); + } catch (error) { + throw new tf.Error(`File text format is not tensorflow.MetaGraphDef (${error.message}).`); + } + }; + const openTextSavedModel = (stream) => { + try { + const reader = protobuf.TextReader.open(stream); + return tf.proto.tensorflow.SavedModel.decodeText(reader); + } catch (error) { + throw new tf.Error(`File text format is not tensorflow.SavedModel (${error.message}).`); + } + }; + const openBinaryGraphDef = (context) => { + let saved_model = null; + const format = 'TensorFlow Graph'; + try { + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + const graph_def = tf.proto.tensorflow.GraphDef.decode(reader); + const meta_graph = new tf.proto.tensorflow.MetaGraphDef(); + meta_graph.graph_def = graph_def; + saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(`File format is not tensorflow.GraphDef (${message.replace(/\.$/, '')}).`); + } + return openSavedModel(context, saved_model, format, null); + }; + const openBinaryMetaGraphDef = (context) => { + let saved_model = null; + const format = 'TensorFlow MetaGraph'; + try { + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + const meta_graph = tf.proto.tensorflow.MetaGraphDef.decode(reader); + saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(`File format is not tensorflow.MetaGraphDef (${message.replace(/\.$/, '')}).`); + } + return openSavedModel(context, saved_model, format, null); + }; + const openBinarySavedModel = (stream) => { + try { + const reader = protobuf.BinaryReader.open(stream); + return tf.proto.tensorflow.SavedModel.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(`File format is not tensorflow.SavedModel (${message.replace(/\.$/, '')}).`); + } + }; + const openFingerprint = async (context) => { + let format = ''; + let saved_model = null; + try { + const identifier = 'saved_model.pb'; + const content = await context.fetch(identifier); + const stream = content.stream; + saved_model = openBinarySavedModel(stream); + + } catch (error) { + format = 'TensorFlow Fingerprint'; + saved_model = new tf.proto.tensorflow.SavedModel(); + } + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + saved_model.fingerprint = tf.proto.tensorflow.FingerprintDef.decode(reader); + return openSavedModel(context, saved_model, format, null); + }; + const openMemmapped = (context) => { + const stream = context.stream; + const readDirectoryOffset = (stream) => { + stream.seek(-8); + const buffer = stream.read(8); + const reader = new base.BinaryReader(buffer); + return reader.uint64(); + }; + const readDirectory = (stream, offset) => { + const end = stream.position - 8; + stream.seek(offset); + const buffer = stream.read(end - offset); + const reader = protobuf.BinaryReader.open(buffer); + return tf.proto.tensorflow.MemmappedFileSystemDirectory.decode(reader); + }; + const offset = readDirectoryOffset(stream); + const directory = readDirectory(stream, offset); + const elements = new Map(); + for (const element of directory.element) { + const name = element.name; + if (elements.has(name)) { + throw new tf.Error(`Memory mapped file directory contains duplicate '${name}'.`); + } + elements.set(name, { + offset: element.offset ? element.offset.toNumber() : 0, + length: element.length ? element.length.toNumber() : 0 + }); + } + const offsets = Array.from(elements).map(([, value]) => value.offset); + offsets.push(offset); + for (const value of elements.values()) { + if (value.length === 0) { + const min = Math.min.apply(null, offsets.filter((offset) => offset > value.offset)); + if (Number.isInteger(min)) { + value.length = min - value.offset; + } + } + } + for (const [, value] of elements) { + const offset = value.offset; + const length = value.length; + stream.seek(offset); + value.buffer = stream.read(length); + } + if (!elements.has('memmapped_package://.')) { + throw new tf.Error('Memory mapped file directory does not contain tensorflow.GraphDef root.'); + } + const element = elements.get('memmapped_package://.'); + const buffer = element.buffer; + const reader = protobuf.BinaryReader.open(buffer); + const graph_def = tf.proto.tensorflow.GraphDef.decode(reader); + const format = 'TensorFlow GraphDef Memmapped'; + const meta_graph = new tf.proto.tensorflow.MetaGraphDef(); + meta_graph.graph_def = graph_def; + const saved_model = new tf.proto.tensorflow.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + return openSavedModel(context, saved_model, format, null); + }; + switch (target) { + case 'tf.bundle': + return openBundle(context); + case 'tf.data': + return openData(context); + case 'tf.events': + return openEventFile(context); + case 'tf.json': + return openJson(context, 'json'); + case 'tf.json.gz': + return openJson(context, 'json.gz'); + case 'tf.pbtxt.GraphDef': + return openTextGraphDef(context); + case 'tf.pbtxt.MetaGraphDef': + return openTextMetaGraphDef(context); + case 'tf.pbtxt.SavedModel': + return openSavedModel(context, openTextSavedModel(context.stream), '', null); + case 'tf.pb.GraphDef': + return openBinaryGraphDef(context); + case 'tf.pb.MetaGraphDef': + return openBinaryMetaGraphDef(context); + case 'tf.pb.SavedModel': + return openSavedModel(context, openBinarySavedModel(context.stream), '', null); + case 'tf.pb.FingerprintDef': + return openFingerprint(context); + case 'tf.pb.mmap': + return openMemmapped(context); + default: + throw new tf.Error(`Unsupported TensorFlow format '${target}'.`); + } + } +}; + +tf.Model = class { + + constructor(metadata, model, format, producer, bundle) { + this._format = format; + this._producer = producer || ''; + this._graphs = []; + if (model) { + for (let i = 0; i < model.meta_graphs.length; i++) { + const meta_graph = model.meta_graphs[i]; + const name = meta_graph.meta_info_def && meta_graph.meta_info_def.any_info ? meta_graph.meta_info_def.any_info.toString() : model.meta_graphs.length > 1 ? i.toString() : ''; + const graph = new tf.Graph(metadata, meta_graph, name, bundle); + this._graphs.push(graph); + } + } else { + const graph = new tf.Graph(metadata, null, '', bundle); + this._graphs.push(graph); + } + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get description() { + return null; + } + + get graphs() { + return this._graphs; + } +}; + +tf.Graph = class { + + constructor(metadata, meta_graph, name, bundle) { + this._name = name; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._version = null; + if (meta_graph && meta_graph.graph_def) { + const graph = meta_graph.graph_def; + if (graph.versions) { + this._version = `v${graph.versions.producer}`; + } else if (graph.version) { + this._version = graph.version; + } else if (meta_graph.meta_info_def && meta_graph.meta_info_def.tensorflow_version) { + this._version = meta_graph.meta_info_def.tensorflow_version; + } + if (meta_graph.meta_info_def && meta_graph.meta_info_def.tags) { + this._tags = meta_graph.meta_info_def.tags.join(', '); + } + metadata = new tf.GraphMetadata(metadata, graph.library); + const nodes = graph.node || []; + const context = new tf.Context(); + context.graph(metadata, nodes); + this._nodes = context.nodes; + this._inputs = context.inputs; + this._outputs = context.outputs; + } else if (bundle) { + const nodes = new Map(); + for (const tensor of bundle.tensors) { + const parts = tensor.name.split('/'); + if (bundle.format === 2) { + if (tensor.name === '_CHECKPOINTABLE_OBJECT_GRAPH' || + tensor.name.startsWith('optimizer/') || + tensor.name.startsWith('keras_api/metrics/') || + tensor.name.endsWith('/ExponentialMovingAverage') || + tensor.name.indexOf('.OPTIMIZER_SLOT') !== -1) { + continue; + } + if (tensor.name.endsWith('/.ATTRIBUTES/VARIABLE_VALUE')) { + parts.pop(); + parts.pop(); + } + } + const tensorName = parts.pop(); + const name = parts.join('/'); + if (!nodes.has(name)) { + nodes.set(name, []); + } + nodes.get(name).push({ name: tensorName, value: tensor }); + } + const namespaces = new Set(); + this._nodes = Array.from(nodes).map(([name, value]) => { + const node = { op: 'Node', name: name }; + return new tf.Node(metadata, node, namespaces, new tf.Context(), value); + }); + } + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get tags() { + return this._tags; + } + + get groups() { + return false; + // TODO return true; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + get metadata() { + return this._metadata; + } +}; + +tf.Argument = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } +}; + +tf.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new tf.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +tf.Function = class { + + constructor(metadata, name, func) { + this._name = name; + this._version = null; + this._tags = null; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._description = !func ? 'Function definition not found.' : null; + const context = new tf.Context(); + const input_arg = func && func.signature ? func.signature.input_arg : []; + const output_arg = func && func.signature ? func.signature.output_arg : []; + const ret = func && func.ret ? func.ret : {}; + const nodes = func && func.node_def ? func.node_def : []; + if (input_arg) { + for (const input of input_arg) { + const value = context.value(input.name, new tf.TensorType(input.type, null), null); + this._inputs.push(new tf.Argument(input.name, [ value ])); + } + } + const output_arg_map = new Map(); + if (output_arg) { + const ret_map = new Map(); + for (const key of Object.keys(ret)) { + const value = func.ret[key]; + const split = value.split(':', 2); + ret_map.set(key, split[0]); + } + for (const output of output_arg) { + const name = ret_map.get(output.name); + const type = new tf.TensorType(output.type, null); + const argument = new tf.Argument(output.name, [ context.value(name, type, null) ]); + this._outputs.push(argument); + output_arg_map.set(name, output.name); + } + } + context.graph(metadata, nodes, output_arg_map); + this._nodes = context.nodes; + this._inputs = this._inputs.concat(context.inputs); + this._outputs = this._outputs.concat(context.outputs); + } + + get type() { + return 'function'; + } + + get name() { + return this._name; + } + + get description() { + return this._description || ''; + } + + get version() { + return this._version; + } + + get tags() { + return this._tags; + } + + get groups() { + return false; + // TODO return true; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +tf.Node = class { + + constructor(metadata, node, namespaces, context, tensors) { + this._type = node.metadata || metadata.type(node.op) || { name: node.op }; + this._name = node.name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._group = ''; + if (node.name) { + if (namespaces.has(node.name)) { + this._group = node.name; + } else { + const index = node.name.lastIndexOf('/'); + if (index != -1) { + const namespace = node.name.substring(0, index); + if (namespaces.has(namespace)) { + this._group = namespace; + } + } + } + } + if (tensors) { + for (const tensor of tensors) { + const value = context.value(tensor.value.name, null, tensor.value); + const argument = new tf.Argument(tensor.name, [ value ]); + this._inputs.push(argument); + } + } else { + if (node.device !== undefined) { + this._device = node.device; + } + if (node.attr) { + this._attributes = Object.entries(node.attr).map(([name, value]) => { + return new tf.Attribute(metadata, node.op, name, value); + }); + } + let inputIndex = 0; + const inputs = (node.input || []).filter((input) => !input.name.startsWith('^')); + if (this._type && this._type.inputs) { + for (const input of this._type.inputs) { + let count = 1; + if (input.numberAttr) { + const inputNumber = node.attr[input.numberAttr]; + if (inputNumber && inputNumber.i) { + count = inputNumber.i; + } + } else if (input.typeListAttr) { + const inputTypeListAttr = node.attr[input.typeListAttr]; + if (inputTypeListAttr && inputTypeListAttr.list && inputTypeListAttr.list.type) { + count = inputTypeListAttr.list.type.length; + } + } + const values = inputs.slice(inputIndex, inputIndex + count).map((input) => context.value(input.name, null, null)); + const argument = new tf.Argument(input.name, values); + this._inputs.push(argument); + inputIndex += count; + } + } + this._inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const name = input.label ? input.label : (inputIndex + index).toString(); + return new tf.Argument(name, [ context.value(input.name) ]); + })); + let outputIndex = 0; + const outputs = node.output || []; + if (this._type && this._type.outputs) { + for (const output of this._type.outputs) { + let count = 1; + if (output.numberAttr) { + const outputNumber = node.attr[output.numberAttr]; + if (outputNumber && outputNumber.i) { + count = outputNumber.i; + } + } else if (output.typeListAttr) { + const outputTypeListAttr = node.attr[output.typeListAttr]; + if (outputTypeListAttr && outputTypeListAttr.list && outputTypeListAttr.list.type) { + count = outputTypeListAttr.list.type.length; + } + } + const values = outputs.slice(outputIndex, outputIndex + count).map((output) => { + return context.value(output.name ? output.name : '-', null, null); + }); + const name = output.name ? output.name : `output${this._outputs.length == 0 ? '' : this._outputs.length}`; + const argument = new tf.Argument(name, values); + this._outputs.push(argument); + outputIndex += count; + } + } + this._outputs.push(...outputs.slice(outputIndex).map((output, index) => { + const name = (outputIndex + index).toString(); + const value = context.value(output.name ? output.name : '-', null, null); + return new tf.Argument(name, [ value ]); + })); + const controlDependencies = node.controlDependencies || []; + this._controlDependencies = controlDependencies.map((input) => context.value(input.name)); + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get device() { + return this._device || null; + } + + get group() { + return this._group; + } + + get description() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get controlDependencies() { + return this._controlDependencies; + } + + get attributes() { + return this._attributes; + } +}; + +tf.Attribute = class { + + constructor(metadata, op, name, value) { + this._name = name; + this._value = null; + this._type = null; + const schema = value && value.metadata ? value.metadata : metadata.attribute(op, name); + const visible = metadata.visible(op, name); + if (schema && schema.type) { + this._type = schema.type; + } + switch (value.value) { + case undefined: + this._type = ''; + this._value = null; + break; + case 'type': + this._type = 'type'; + this._value = tf.Utility.dataType(value.type); + break; + case 'i': + this._value = value.i; + break; + case 'f': + this._value = value.f; + break; + case 'b': + this._value = value.b; + break; + case 'shape': + this._type = 'shape'; + this._value = new tf.TensorShape(value.shape); + break; + case 's': + this._value = tf.Utility.decodeText(value.s); + break; + case 'tensor': { + this._type = 'tensor'; + this._value = new tf.Tensor(value.tensor); + break; + } + case 'func': { + this._type = 'function'; + this._value = new tf.Node(metadata, { op: value.func.name, attr: value.func.attr }, null, new tf.Context()); + break; + } + case 'placeholder': { + this._type = 'placeholder'; + this._value = value; + break; + } + case 'list': { + const list = value.list; + if (list.s && list.s.length > 0) { + this._value = list.s.map((s) => tf.Utility.decodeText(s)); + } else if (list.i && list.i.length > 0) { + this._value = list.i; + } else if (list.f && list.f.length > 0) { + this._value = list.f; + } else if (list.type && list.type.length > 0) { + this._type = 'type[]'; + this._value = list.type.map((type) => tf.Utility.dataType(type)); + } else if (list.shape && list.shape.length > 0) { + this._type = 'shape[]'; + this._value = list.shape.map((shape) => new tf.TensorShape(shape)); + } else if (list.func && list.func.length > 0) { + this._type = 'function[]'; + this._value = list.func.map((func) => new tf.Node(metadata, { op: func.name, attr: func.attr })); + } else { + this._value = []; + } + break; + } + default: { + throw new tf.Error(`Unsupported attribute value type '${JSON.stringify(value).substring(0, 32)}'.`); + } + } + if (schema) { + if (schema.visible === false) { + this._visible = false; + } else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + const equals = (value, defaultValue) => { + if (!Array.isArray(defaultValue) && defaultValue === Object(defaultValue)) { + switch (defaultValue.type) { + case 'type': + defaultValue = tf.Utility.dataType(defaultValue.value); + break; + case 'shape': + case 'tensor': + defaultValue = defaultValue.value; + break; + default: + throw new tf.Error(JSON.stringify(defaultValue)); + } + } + if (typeof value === 'boolean' || typeof value === 'number' || typeof value === 'string') { + return value === defaultValue; + } + if (value instanceof base.Int64 || value instanceof base.Uint64) { + return value.toNumber() === defaultValue; + } + return false; + }; + const value = this._value; + const defaultValue = schema.default; + if (Array.isArray(value) && Array.isArray(defaultValue)) { + if (value.length === defaultValue.length && value.every((item, index) => equals(item, defaultValue[index]))) { + this._visible = false; + } + } else if (equals(value, defaultValue)) { + this._visible = false; + } + } + } + if (name == '_output_shapes') { + this._visible = false; + } + if (name == '_class') { + this._visible = false; + } + if (visible === false) { + this._visible = false; + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +tf.Tensor = class { + + constructor(tensor, name, category) { + this._name = name; + this._category = category || null; + if (tensor) { + this._type = new tf.TensorType(tensor.dtype, tensor.tensor_shape || tensor.tensorShape); + this._tensor = tensor; + if (Object.prototype.hasOwnProperty.call(tensor, 'tensor_content')) { + this._values = tensor.tensor_content; + this._encoding = '<'; + } else { + const DataType = tf.proto.tensorflow.DataType; + switch (tensor.dtype) { + case DataType.DT_INVALID: { + break; + } + case DataType.DT_BFLOAT16: { + const values = tensor.half_val || []; + this._values = new Uint8Array(values.length << 2); + const view = new DataView(this._values.buffer, this._values.byteOffset, this._values.byteLength); + for (let i = 0; i < values.length; i++) { + view.setUint32(i << 2, values[i] << 16, true); + } + this._encoding = '<'; + break; + } + case DataType.DT_HALF: { + const values = tensor.half_val || []; + this._values = new Uint8Array(values.length << 1); + const view = new DataView(this._values.buffer, this._values.byteOffset, this._values.byteLength); + for (let i = 0; i < values.length; i++) { + view.setUint16(i << 1, values[i], true); + } + this._encoding = '<'; + break; + } + case DataType.DT_FLOAT: { + this._values = tensor.float_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_DOUBLE: { + this._values = tensor.double_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_UINT8: + case DataType.DT_UINT16: + case DataType.DT_INT8: + case DataType.DT_INT16: + case DataType.DT_INT32: { + this._values = tensor.int_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_UINT32: { + this._values = tensor.uint32_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_INT64: { + this._values = tensor.int64_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_UINT64: { + this._values = tensor.uint64_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_BOOL: { + this._values = tensor.bool_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_STRING: { + this._values = tensor.string_val || null; + this._encoding = '|'; + break; + } + case DataType.DT_COMPLEX64: { + this._encoding = '|'; + const values = tensor.scomplex_val || null; + this._values = new Array(values.length >> 1); + for (let i = 0; i < values.length; i += 2) { + this._values[i >> 1] = base.Complex64.create(values[i], values[i + 1]); + } + break; + } + case DataType.DT_COMPLEX128: { + this._encoding = '|'; + const values = tensor.dcomplex_val || null; + this._values = new Array(values.length >> 1); + for (let i = 0; i < values.length; i += 2) { + this._values[i >> 1] = base.Complex128.create(values[i], values[i + 1]); + } + break; + } + default: { + throw new tf.Error(`Unsupported tensor data type '${tensor.dtype}'.`); + } + } + } + } else { + this._type = new tf.TensorType('?', null); + this._tensor = null; + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get category() { + return this._category; + } + + get encoding() { + return this._encoding; + } + + get values() { + let values = this._values; + if (this._encoding === '|' && Array.isArray(values)) { + if (this._type.dataType === 'string') { + values = values.map((value) => tf.Utility.decodeText(value)); + } + const shape = (this._tensor.tensor_shape || this._tensor.tensorShape).dim.map((dim) => dim.size); + const size = shape.reduce((a, b) => a * b, 1); + if (values.length === 1 && size > 1) { + values = new Array(size).fill(values[0]); + } + } + return values; + } +}; + +tf.TensorType = class { + + constructor(dtype, shape) { + this._dtype = dtype; + this._shape = new tf.TensorShape(shape); + } + + get dataType() { + return this._dtype ? tf.Utility.dataType(this._dtype) : '?'; + } + + get shape() { + return this._shape; + } + + equals(obj) { + return obj && this.dataType === obj.dataType && this.shape.equals(obj.shape); + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +tf.TensorShape = class { + + constructor(shape) { + this._dimensions = null; + if (shape) { + if (shape.unknown_rank) { + this._dimensions = null; + } else if (Array.isArray(shape.dim)) { + if (shape.dim.length == 0) { + this._dimensions = []; + } else if (shape.dim.length == 1 && !shape.dim[0].size) { + this._dimensions = [ 0 ]; + } else { + this._dimensions = shape.dim.map((dim) => (dim.size && dim.size != -1) ? dim.size : '?'); + } + } + } + } + + get dimensions() { + return this._unknownRank ? null : this._dimensions; + } + + equals(obj) { + return (this.dimensions === null && obj.dimensions === null) || (Array.isArray(this.dimensions) && Array.isArray(obj.dimensions) && this.dimensions.length === obj.dimensions.length && this.dimensions.every((value, index) => obj.dimensions[index] === value)); + } + + toString() { + if (this._dimensions === null) { + return '[?]'; + } + if (this._dimensions.length === 0) { + return ''; + } + return `[${this._dimensions.map((dim) => (dim && dim != -1) ? dim.toString() : '?').join(',')}]`; + } +}; + +tf.TensorBundle = class { + + static async open(stream, identifier, context) { + const format = !identifier.toLowerCase().endsWith('.index') ? 1 : 2; + const table = new tf.TensorBundle.Table(stream); + if (!table.entries.has('')) { + throw new tf.Error('Bundle header not available.'); + } + if (format === 1) { + return new tf.TensorBundle(format, table.entries, []); + } + const buffer = table.entries.get(''); + const reader = protobuf.BinaryReader.open(buffer); + const header = tf.proto.tensorflow.BundleHeaderProto.decode(reader); + const numShards = header.num_shards; + const promises = []; + for (let i = 0; i < numShards; i++) { + const shardIndex = (`0000${i}`).slice(-5); + const shardCount = (`0000${numShards}`).slice(-5); + const filename = identifier.split('.'); + filename.pop(); + const basename = filename.join('.'); + const name = `${basename}.data-${shardIndex}-of-${shardCount}`; + promises.push(context.fetch(name)); + } + try { + const contexts = await Promise.all(promises); + const streams = contexts.map((context) => context.stream); + return new tf.TensorBundle(format, table.entries, streams); + } catch (error) { + context.exception(error, false); + return new tf.TensorBundle(format, table.entries, null); + } + } + + constructor(format, entries, streams) { + this._format = format; + this._tensors = []; + switch (format) { + case 1: { + const buffer = entries.get(''); + const reader = protobuf.BinaryReader.open(buffer); + const header = tf.proto.tensorflow.SavedTensorSlices.decode(reader); + const data = new Map(); + for (const [name, buffer] of entries) { + if (name !== '' && name !== 'global_step') { + const reader = protobuf.BinaryReader.open(buffer); + const slices = tf.proto.tensorflow.SavedTensorSlices.decode(reader); + const name = slices.data.name; + const tensor = slices.data.data; + if (!data.has(name)) { + if (tensor.tensor_content && tensor.tensor_content.length > 0) { + data.set(name, { key: 'tensor_content', value: tensor.tensor_content }); + } else { + const keys = Object.keys(tensor).filter((key) => key.endsWith('_val') && tensor[key] && tensor[key].length > 0); + data.set(name, keys.length == 1 ? { key: keys[0], value: tensor[keys[0]] } : null); + } + } else { + const item = data.get(name); + if (item !== null) { + if (tensor[item.key] && tensor[item.key].length > 0) { + item.value = item.value.concat(tensor[item.key]); + } else { + data.set(name, null); + } + } + } + } + } + for (const meta of header.meta.tensor) { + if (meta.name !== 'global_step') { + const tensor = new tf.proto.tensorflow.TensorProto(); + tensor.dtype = meta.type; + tensor.tensor_shape = meta.shape; + const item = data.get(meta.name); + if (item) { + tensor[item.key] = item.value; + } + this._tensors.push(new tf.Tensor(tensor, meta.name, null)); + } + } + break; + } + case 2: { + entries.forEach((buffer, name) => { + if (name !== '') { + const reader = protobuf.BinaryReader.open(buffer); + const entry = tf.proto.tensorflow.BundleEntryProto.decode(reader); + const tensor = new tf.proto.tensorflow.TensorProto(); + tensor.dtype = entry.dtype; + tensor.tensor_shape = entry.shape; + const offset = Number.isInteger(entry.offset) ? entry.offset : entry.offset.toNumber(); + const size = Number.isInteger(entry.size) ? entry.size : entry.size.toNumber(); + if (streams) { + const stream = streams[entry.shard_id]; + stream.seek(offset); + tensor.tensor_content = stream.peek(size); + } + this._tensors.push(new tf.Tensor(tensor, name, null)); + } + }); + break; + } + default: { + throw new tf.Error(`Unsupported Tensor Bundle format '${format}'.`); + } + } + } + + get format() { + return this._format; + } + + get tensors() { + return this._tensors; + } +}; + +tf.TensorBundle.Table = class { + + constructor(stream) { + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/io/table.cc + this.entries = new Map(); + if (stream.length <= 54) { + throw new tf.Error('Invalid index file size.'); + } + stream.seek(-48); + const buffer = stream.peek(48); + const reader = new tf.BinaryReader(buffer); + reader.seek(-8); + const signature = [ 0x57, 0xfb, 0x80, 0x8b, 0x24, 0x75, 0x47, 0xdb ]; + if (!reader.read(8).every((value, index) => value === signature[index])) { + throw new tf.Error('Invalid table signature.'); + } + reader.seek(-48); // kEncodedLength + reader.varint64(); // metaindex offset + reader.varint64(); // metaindex size + const indexOffset = reader.varint64(); + const indexSize = reader.varint64(); + const indexBlock = new tf.TensorBundle.Table.Block(stream, indexOffset, indexSize); + for (const [, value] of indexBlock.entries) { + const valueReader = new tf.BinaryReader(value); + const offset = valueReader.varint64(); + const size = valueReader.varint64(); + const block = new tf.TensorBundle.Table.Block(stream, offset, size); + for (const [name, value] of block.entries) { + this.entries.set(name, value); + } + } + stream.seek(0); + } +}; + +tf.TensorBundle.Table.Block = class { + + constructor(stream, offset, size) { + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/io/block.cc + this.entries = new Map(); + stream.seek(offset); + const buffer = stream.read(size); // blockContents + const compression = stream.byte(); + stream.skip(4); // crc32 + let reader = new tf.BinaryReader(buffer); + switch (compression) { + case 0: // kNoCompression + break; + case 1: // kSnappyCompression + reader = new tf.BinaryReader(reader.unsnappy()); + break; + default: + throw new tf.Error(`Unsupported block compression '${compression}'.`); + } + reader.seek(-4); + const numRestarts = reader.int32(); + reader.seek(-4 - (4 * numRestarts)); + const restartOffsets = []; + for (let i = 0; i < numRestarts; i++) { + restartOffsets.push(reader.int32()); + } + const decoder = new TextDecoder(); + for (let i = 0; i < numRestarts; i++) { + reader.seek(restartOffsets[i]); + let key = ''; + while (reader.position < reader.length) { + const sharedSize = reader.varint32(); // index shared size + const nonSharedSize = reader.varint32(); // index non shared size + const valueSize = reader.varint32(); + if (sharedSize === 0 && nonSharedSize === 0 && valueSize === 0) { + break; + } + key = key.substring(0, sharedSize); + key = key + decoder.decode(reader.read(nonSharedSize)); + const value = reader.read(valueSize); + this.entries.set(key, value); + } + } + } +}; + +tf.BinaryReader = class extends base.BinaryReader { + + constructor(buffer) { + super(buffer); + this._decoder = new TextDecoder('utf-8'); + } + + string() { + const size = this.uint32(); + const buffer = this.read(size); + return this._decoder.decode(buffer); + } + + varint32() { + return this.varint64(); + } + + varint64() { + let result = 0; + for (let shift = 0; shift <= 63; shift += 7) { + const byte = this.byte(); + if (byte & 128) { + result |= (byte & 127) << shift; + } else { + result |= byte << shift; + break; + } + } + return result; + } + + unsnappy() { + const data = new Uint8Array(this.varint64()); + const mask = [0, 0xff, 0xffff, 0xffffff, 0xffffffff]; + let position = 0; + while (this._position < this._length) { + let length = 0; + const c = this.byte(); + switch (c & 0x03) { + case 0: { + length = (c >>> 2) + 1; + if (length > 60) { + const short = length - 60; + length = (this.uint32() & mask[short]) + 1; + this._position += short - 4; + } + data.set(this.read(length), position); + break; + } + case 1: { + length = ((c >>> 2) & 0x07) + 4; + const offset = this.byte() + ((c >>> 5) << 8); + data.set(data.subarray(position - offset, position - offset + length), position); + break; + } + case 2: { + length = (c >>> 2) + 1; + const offset = this.uint16(); + data.set(data.subarray(position - offset, position - offset + length), position); + break; + } + case 3: { + length = (c >>> 2) + 1; + const offset = this.uint32(); + data.set(data.subarray(position - offset, position - offset + length), position); + break; + } + default: { + break; + } + } + position += length; + } + return data; + } +}; + +tf.EventFileReader = class { + + static open(stream) { + if (stream.length < 16) { + return null; + } + const masked_crc32c = (bytes) => { + const poly = 0x82f63b78; + let crc = 0xffffffff; + for (let n = 0; n < bytes.length; n++) { + crc ^= bytes[n]; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc & 1 ? (crc >>> 1) ^ poly : crc >>> 1; + crc = crc >>> 0; + } + crc = crc ^ 0xffffffff; + crc = crc >>> 0; + crc = ((crc >> 15) | (crc << 17)) + 0xa282ead8; + crc = crc >>> 0; + return crc; + }; + const buffer = stream.peek(12); + const reader = new tf.BinaryReader(buffer); + const length_bytes = reader.read(8); + const length_crc = reader.uint32(); + if (masked_crc32c(length_bytes) !== length_crc) { + return null; + } + return new tf.EventFileReader(stream); + } + + constructor(stream) { + this._stream = stream; + } + + read() { + if (this._stream.position < this._stream.length) { + const uint64 = (stream) => { + const buffer = stream.read(8); + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + return view.getUint64(0, true).toNumber(); + }; + const length = uint64(this._stream); + this._stream.skip(4); // masked crc of length + const buffer = this._stream.read(length); + const reader = protobuf.BinaryReader.open(buffer); + const event = tf.proto.tensorflow.Event.decode(reader); + this._stream.skip(4); // masked crc of data + return event; + } + return null; + } +}; + +tf.GraphMetadata = class { + + constructor(metadata, library) { + this._metadata = metadata; + this._functions = new Map(); + this._attributes = new Map(); + this._visibleCache = new Map(); + + if (library && Array.isArray(library.function)) { + for (const func of library.function) { + const name = func.signature.name; + if (this._functions.has(func.name)) { + throw new tf.Error(`Duplicate function name '${func.name}'.`); + } + this._functions.set(name, func); + } + } + } + + type(name) { + if (this._functions.has(name)) { + const func = this._functions.get(name); + if (func instanceof tf.Function) { + return func; + } + this._functions.set(name, new tf.Function(this, func.signature.name, func)); + return this._functions.get(name); + } + const type = this._metadata.type(name); + if (!type) { + this._functions.set(name, new tf.Function(this, name, null)); + return this._functions.get(name); + } + return type; + } + + attribute(type, name) { + const key = `${type}::${name}`; + if (!this._attributes.has(key)) { + const schema = this.type(type); + if (schema && schema.attributes) { + for (const attribute of schema.attributes) { + const key = `${type}::${attribute.name}`; + this._attributes.set(key, attribute); + } + } + } + return this._attributes.get(key); + } + + visible(type, name) { + if (!this._visibleCache.has(type)) { + const set = new Set(); + const schema = this.type(type); + if (schema && schema.inputs) { + for (const input of schema.inputs) { + if (input.typeAttr) { + set.add(input.typeAttr); + } else if (input.typeListAttr) { + set.add(input.typeListAttr); + } + if (input.numberAttr) { + set.add(input.numberAttr); + } + } + } + if (schema && schema.outputs) { + for (const output of schema.outputs) { + if (output.typeAttr) { + set.add(output.typeAttr); + } else if (output.typeListAttr) { + set.add(output.typeListAttr); + } + if (output.numberAttr) { + set.add(output.numberAttr); + } + } + } + this._visibleCache.set(type, set); + } + return !this._visibleCache.get(type).has(name); + } +}; + +tf.Context = class { + + constructor() { + this._values = new Map(); + this.inputs = []; + this.outputs = []; + this.nodes = []; + } + + value(name, type, tensor) { + if (name.length === 0 && tensor) { + return new tf.Value(name, type || null, tensor); + } + if (!this._values.has(name)) { + this._values.set(name, new tf.Value(name, type || null, tensor || null)); + } else if ((type && !type.equals(this._values.get(name).type)) || tensor) { + throw new tf.Error(`Duplicate value '${name}'.`); + } + return this._values.get(name); + } + + graph(metadata, nodes, output_arg_map) { + const namespaces = new Set(); + const node_map = new Map(); + for (const node of nodes) { + const nodeName = node.name; + node_map.set(nodeName, node); + if (node.op != 'Const') { + const index = nodeName.lastIndexOf('/'); + if (index != -1) { + const namespace = nodeName.substring(0, index); + namespaces.add(namespace); + } + } + node.output = []; + } + for (const node of nodes) { + const inputs = node.input; + node.input = []; + node.controlDependencies = []; + for (const input of inputs) { + const split = input.split(':', 3); + const [input_name] = split; + const input_index = split.length == 1 ? 0 : parseInt(split[split.length - 1]); + const from_name = input_name.startsWith('^') ? input_name.substring(1) : input_name; + const from = node_map.get(from_name); + const output_name = input_index == 0 ? from_name : `${from_name}:${input_index}`; + const input_arg = from ? { name: output_name, from: from } : { name: output_name }; + if (input_name.startsWith('^')) { + node.controlDependencies.push(input_arg); + } else { + node.input.push(input_arg); + } + if (from) { + for (let i = from.output.length; i <= input_index; i++) { + from.output.push({ name: i === 0 ? from_name : `${from_name}:${i}`, to: [] }); + } + from.output[input_index].to.push(node); + } + } + } + if (output_arg_map) { + for (const node of nodes) { + if (output_arg_map.has(node.name)) { + node.output.push({ name: node.name, to: [] }); + } + } + } + const map_tensor = (name, node, kind) => { + if (node && node.op === 'Const' && node.input.length === 0 && node.output.length === 1 && node.output[0].to.length === 1 && node.controlDependencies.length === 0) { + const value = node.attr.value; + if (value && Object.prototype.hasOwnProperty.call(value, 'tensor')) { + const tensor = new tf.Tensor(value.tensor, name, kind); + return this.value(name, tensor.type, tensor); + } + } + return null; + }; + const map_resource = (name, node, tensor) => { + if (node && node.op === 'Placeholder' && node.input.length === 0 && node.output.length === 1 && node.controlDependencies.length === 0) { + const dtype = node.attr.dtype.type; + if (dtype === tf.proto.tensorflow.DataType.DT_RESOURCE) { + return this.value(name, null, tensor); + } + } + return null; + }; + for (const node of node_map.values()) { + if (node.op === 'Identity' && node.input.length === 1 && node.output.length === 1 && node.output[0].to.length === 1 && node.controlDependencies.length === 0) { + const initializer = map_tensor(node.name, node.input[0].from, 'Identity Constant'); + if (initializer) { + node_map.delete(initializer.name); + node_map.delete(node.input[0].name); + } + const identity = node.input[0].from; + if (identity && identity.op === 'Identity' && identity.input.length === 1 && identity.output.length === 1 && node.output[0].to.length === 1 && node.controlDependencies.length === 0) { + const initializer = map_tensor(node.name, identity.input[0].from, 'Identity Constant'); + if (initializer) { + node_map.delete(initializer.name); + node_map.delete(initializer.name); + node_map.delete(identity.name); + node_map.delete(node.name); + } + } + } + } + for (const node of node_map.values()) { + const initializer = map_tensor(node.name, node, 'Const'); + if (initializer) { + node_map.delete(node.name); + node_map.delete(initializer.name); + } + } + for (const node of node_map.values()) { + if (node.op === 'ReadVariableOp' && node.input.length === 1 && node.output.length === 1 && node.output[0].to.length === 1 && node.controlDependencies.length === 0) { + if (node.attr && node.attr.dtype && node.attr._output_shapes && node.attr._output_shapes.list && node.attr._output_shapes.list.shape) { + const tensor = new tf.proto.tensorflow.TensorProto(); + tensor.dtype = node.attr.dtype.type; + /* eslint-disable prefer-destructuring */ + tensor.tensor_shape = node.attr._output_shapes.list.shape[0]; + /* eslint-enable prefer-destructuring */ + const name = node.name; + const initializer = map_resource(name, node.input[0].from, new tf.Tensor(tensor, name, 'Resource Variable')); + if (initializer) { + node_map.delete(initializer.name); + node_map.delete(node.input[0].name); + } + } + } + } + const input_map = new Map(); + for (const node of node_map.values()) { + if (node.op == 'Placeholder' && node.input.length === 0 && node.output.length === 1 && node.controlDependencies.length === 0) { + const dtype = node.attr.dtype; + const shape = node.attr.shape; + if (dtype && dtype.type && shape && shape.shape) { + const name = node.name; + const type = new tf.TensorType(dtype.type, shape.shape); + const value = this.value(name, type, null); + input_map.set(name, new tf.Argument(name, [ value ])); + node_map.delete(name); + } + } + } + const updateTorchScript = (node_map) => { + for (const node of node_map.values()) { + if (node.op === 'prim::Constant' && node.input.length === 0 && node.controlDependencies.length === 0 && node.attr && Object.keys(node.attr).length === 1 && node.attr.attr && node.attr.attr.s) { + const value = tf.Utility.decodeText(node.attr.attr.s); + const match = /{\s*value\s*:\s*(.*)\s*}/.exec(value); + if (match) { + node.value = match[1].trim(); + } + const empty = /{\s*}/.exec(value); + if (empty) { + node.value = null; + } + } + if (node.op === 'prim::GetAttr' && node.input.length === 1 && node.controlDependencies.length === 0 && node.attr && Object.keys(node.attr).length === 1 && node.attr.attr && node.attr.attr.s) { + const value = tf.Utility.decodeText(node.attr.attr.s); + const match = /{\s*name\s*:\s*([A-Za-z0-9_]*)\s*}/.exec(value); + if (match) { + node.value = match[1].trim(); + } + } + if (node.op === 'IO Node' && node.controlDependencies.length === 0) { + const shape = node.attr && node.attr._output_shapes && node.attr._output_shapes.list && node.attr._output_shapes.list.shape ? node.attr._output_shapes.list.shape[0] : null; + const type = shape ? new tf.TensorType('?', shape) : null; + if (node.input.length === 0 && node.output.length === 1) { + const argument = new tf.Argument(node.name, [ this.value(node.output[0].name, type, null) ]); + this.inputs.push(argument); + node_map.delete(node.name); + } + if (node.input.length === 1 && node.output.length === 0) { + const argument = new tf.Argument(node.name, [ this.value(node.input[0].name, type, null) ]); + this.outputs.push(argument); + node_map.delete(node.name); + } + } + if (Object.keys(node.attr).length === 2 && + node.attr.attr && node.attr.attr.s && node.attr._output_shapes) { + const value = tf.Utility.decodeText(node.attr.attr.s); + if (/\s*/.exec(value) || /{\s*}/.exec(value)) { + node.attr = {}; + delete node._output_shapes; + } + } + } + const remove_input = (input, node) => { + const from = input.from; + if (from) { + for (const output of from.output) { + output.to = output.to.filter((to) => to !== node); + } + if (from.output.every((output) => output.to.length === 0) && from.controlDependencies.length === 0) { + from.remove = true; + } + delete input.from; + } + }; + for (const node of node_map.values()) { + if (node.op === 'prim::ListConstruct' && node.input.every((input) => input.from.value !== undefined) && node.controlDependencies.length === 0) { + node.value = node.input.map((input) => input.from.value); + for (const input of node.input) { + remove_input(input, node); + } + node.input = []; + } + } + for (const node of node_map.values()) { + const remove = new Set(); + for (let i = 0; i < node.input.length; i++) { + const input = node.input[i]; + const from = input.from; + if (from) { + if (from.op === 'prim::GetAttr' && from.input.length === 1 && from.output.length === 1 && from.controlDependencies.length === 0 && from.value !== undefined) { + remove_input(input, node); + input.label = from.value; + const tensor = new tf.Tensor(null, input.name, from.op); + this.value(input.name, null, tensor); + } + if (from.op === 'prim::Constant' && from.input.length === 0 && from.controlDependencies.length === 0 && from.value !== undefined) { + input.constant = from.value; + remove_input(input, node); + remove.add(input.name); + } + if (from.op === 'prim::ListConstruct' && from.output.length === 1 && from.controlDependencies.length === 0 && from.value !== undefined) { + input.list = from.value; + remove_input(input, node); + remove.add(input.name); + } + } + } + if (node.__metadata__) { + const match = (node, schema) => { + const args = schema.inputs || []; + const inputs = node.input || []; + if (inputs.length > args.length) { + return false; + } + for (let i = 0; i < inputs.length; i++) { + const input = inputs[i]; + const arg = args[i]; + switch (arg.type) { + case 'Tensor': { + if ((input.constant === undefined && input.list === undefined) || input.constant === null) { + continue; + } + break; + } + case 'int64': + case 'SymInt': { + if (input.constant !== undefined && Number.isInteger(parseInt(input.constant))) { + continue; + } + break; + } + case 'float32': { + if (input.constant !== undefined && !isNaN(parseFloat(input.constant))) { + continue; + } + break; + } + case 'int64[]': + case 'int64[2]': + case 'SymInt[]': + case 'SymInt[2]': { + if (Array.isArray(input.list)) { + const list = input.list.map((item) => parseInt(item)); + if (list.every((value) => Number.isInteger(value))) { + continue; + } + } + break; + } + case 'boolean': { + if (input.constant === 'false' || + input.constant === 'true' || + input.constant === '0' || + input.constant === '1') { + continue; + } + break; + } + case 'Scalar': { + if (input.constant !== undefined && Number.isInteger(parseInt(input.constant))) { + continue; + } + break; + } + default: { + break; + } + } + return false; + } + return true; + }; + const schema = node.__metadata__.find((schema) => match(node, schema)); + if (schema) { + const args = schema.inputs || []; + const inputs = node.input || []; + for (let i = 0; i < inputs.length; i++) { + const input = inputs[i]; + delete input.metadata; + const arg = args[i]; + switch (arg.type) { + case 'Tensor': { + input.metadata = arg; + break; + } + case 'int64': + case 'SymInt': { + const value = parseInt(input.constant); + input.attr = new tf.proto.tensorflow.AttrValue(); + input.attr.i = value; + input.attr.metadata = arg; + break; + } + case 'float32': { + const value = parseFloat(input.constant); + input.attr = new tf.proto.tensorflow.AttrValue(); + input.attr.f = value; + input.attr.metadata = arg; + break; + } + case 'int64[]': + case 'int64[2]': + case 'SymInt[]': + case 'SymInt[2]': { + const list = input.list.map((item) => parseInt(item)); + input.attr = new tf.proto.tensorflow.AttrValue(); + input.attr.list = new tf.proto.tensorflow.ListValue(); + input.attr.list.i = list; + input.attr.metadata = arg; + break; + } + case 'boolean': { + input.attr = new tf.proto.tensorflow.AttrValue(); + input.attr.b = input.constant === 'true' || input.constant === '1'; + input.attr.metadata = arg; + break; + } + case 'Scalar': { + const value = parseInt(input.constant); + input.attr = new tf.proto.tensorflow.AttrValue(); + input.attr.i = value; + input.attr.metadata = arg; + break; + } + default: { + break; + } + } + } + node.metadata = Object.assign({}, schema); + node.metadata.name = node.op; + } + } + node.input = node.input.filter((input, index) => { + if (input.attr) { + const name = input.attr.metadata ? input.attr.metadata.name : index.toString(); + node.attr[name] = input.attr; + } else if (input.constant !== undefined && input.constant !== null) { + const attr = new tf.proto.tensorflow.AttrValue(); + attr.s = input.constant; + node.attr[index.toString()] = attr; + } else if (input.list !== undefined) { + const attr = new tf.proto.tensorflow.AttrValue(); + attr.list = new tf.proto.tensorflow.ListValue(); + attr.list.s = input.list; + node.attr[index.toString()] = attr; + } + return !remove.has(input.name); + }); + } + for (const node of node_map.values()) { + if (node.op === 'prim::GetAttr' && node.remove) { + node_map.delete(node.name); + } + if (node.op === 'prim::Constant' && node.remove) { + node_map.delete(node.name); + } + if (node.op === 'prim::ListConstruct' && node.remove) { + node_map.delete(node.name); + } + } + }; + updateTorchScript(node_map); + for (const input of input_map.values()) { + this.inputs.push(input); + } + for (const node of node_map.values()) { + this.nodes.push(new tf.Node(metadata, node, namespaces, this)); + } + } +}; + +tf.Utility = class { + + static decodeText(value) { + if (typeof value === 'string') { + return value; + } + if (value.length === 0) { + return ''; + } + tf.Utility._utf8Decoder = tf.Utility._utf8Decoder || new TextDecoder('utf-8'); + return tf.Utility._utf8Decoder.decode(value); + } + + static dataType(type) { + if (!tf.Utility._dataTypes) { + const DataType = tf.proto.tensorflow.DataType; + const dataTypes = new Map(Object.entries(DataType).map(([name, value]) => { + const key = name.startsWith('DT_') ? name.substring(3) : name; + return [ value, key.toLowerCase() ]; + })); + dataTypes.set(DataType.DT_HALF, 'float16'); + dataTypes.set(DataType.DT_FLOAT, 'float32'); + dataTypes.set(DataType.DT_DOUBLE, 'float64'); + dataTypes.set(DataType.DT_BOOL, 'boolean'); + tf.Utility._dataTypes = dataTypes; + } + return tf.Utility._dataTypes.has(type) ? tf.Utility._dataTypes.get(type) : '?'; + } + + static dataTypeKey(type) { + if (!tf.Utility._dataTypeKeys) { + tf.Utility.dataType(0); + tf.Utility._dataTypeKeys = new Map(Array.from(tf.Utility._dataTypes).map(([key, value]) => [ value, key ])); + } + return tf.Utility._dataTypeKeys.get(type); + } +}; + +tf.JsonReader = class { + + static decodeGraphDef(json) { + const message = new tf.proto.tensorflow.GraphDef(); + message.node = json.node.map((node) => tf.JsonReader.decodeNodeDef(node)); + message.library = tf.JsonReader.decodeFunctionDefLibrary(json.library); + if (message.versions) { + message.versions = tf.JsonReader.decodeVersionDef(json.versions); + } + return message; + } + + static decodeNodeDef(json) { + const message = new tf.proto.tensorflow.NodeDef(); + message.name = json.name; + message.op = json.op; + message.input = json.input || []; + if (json.device) { + message.device = json.device; + } + message.attr = {}; + if (json.attr) { + for (const [name, value] of Object.entries(json.attr)) { + message.attr[name] = tf.JsonReader.decodeAttrValue(value); + } + } + return message; + } + + static decodeAttrValue(json) { + const message = new tf.proto.tensorflow.AttrValue(); + const keys = Object.keys(json); + if (keys.length !== 1) { + throw new tf.Error(`Unsupported JSON tensorflow.AttrValue '${JSON.stringify(keys)}'.`); + } + const [key] = keys; + const value = json[key]; + switch (key) { + case 'type': + message.type = typeof value === 'number' ? value : tf.proto.tensorflow.DataType[value]; + break; + case 'shape': + message.shape = tf.JsonReader.decodeTensorShapeProto(value); + break; + case 'tensor': + message.tensor = tf.JsonReader.decodeTensorProto(value); + break; + case 'b': + message[key] = value; + break; + case 'f': + message[key] = parseFloat(value); + break; + case 'i': + message[key] = parseInt(value, 10); + break; + case 's': + message[key] = typeof value === 'string' ? atob(value) : tf.Utility.decodeText(Uint8Array.from(value)); + break; + case 'list': + message.list = tf.JsonReader.decodeAttrValueListValue(json.list); + break; + case 'func': + message[key]= value; + break; + default: + throw new tf.Error(`Unsupported JSON 'tensorflow.AttrValue.${key}'.`); + } + return message; + } + + static decodeAttrValueListValue(json) { + const message = new tf.proto.tensorflow.AttrValue.ListValue(); + const entries = Object.entries(json); + if (entries.length > 0) { + const entry = entries.find(([, value]) => Array.isArray(value) && value.length > 0); + if (!entry) { + throw new tf.Error(`Unsupported JSON tensorflow.AttrValue.ListValue '${JSON.stringify(entries.map(([key]) => key))}'.`); + } + const [key, value] = entry; + switch (key) { + case 'i': + message[key] = value.map((value) => parseInt(value, 10)); + break; + case 's': + message[key] = value.map((value) => typeof value === 'string' ? atob(value) : tf.Utility.decodeText(Uint8Array.from(value))); + break; + case 'type': + message[key] = value.map((value) => tf.proto.tensorflow.DataType[value]); + break; + case 'shape': + message[key] = value.map((shape) => tf.JsonReader.decodeTensorShapeProto(shape)); + break; + default: + throw new tf.Error(`Unsupported JSON 'tensorflow.AttrValue.ListValue.${key}'.`); + } + } + return message; + } + + static decodeTensorProto(json) { + const message = new tf.proto.tensorflow.TensorProto(); + message.dtype = tf.proto.tensorflow.DataType[json.dtype]; + message.tensor_shape = tf.JsonReader.decodeTensorShapeProto(json.tensorShape); + return message; + } + + static decodeTensorShapeProto(json) { + const message = new tf.proto.tensorflow.TensorShapeProto(); + message.dim = (json.dim || []).map((json) => { + const message = new tf.proto.tensorflow.TensorShapeProto.Dim(); + message.size = typeof json.size === 'string' ? parseInt(json.size, 10) : json.size; + message.name = json.name; + return message; + }); + return message; + } + + static decodeVersionDef(json) { + const message = new tf.proto.tensorflow.VersionDef(); + message.producer = json.producer; + message.min_consumer = json.min_consumer; + message.bad_consumers = json.bad_consumers ? json.bad_consumers : []; + return message; + } + + static decodeFunctionDefLibrary(json) { + const message = new tf.proto.tensorflow.FunctionDefLibrary(); + message.function = json ? (json.function || []).map((json) => tf.JsonReader.decodeFunctionDef(json)) : []; + return message; + } + + static decodeFunctionDef(json) { + const message = new tf.proto.tensorflow.FunctionDef(); + message.signature = tf.JsonReader.decodeOpDef(json.signature); + message.attr = {}; + if (json.attr) { + for (const [name, value] of Object.entries(json.attr)) { + message.attr[name] = tf.JsonReader.decodeAttrValue(value); + } + } + message.nodeDef = (json.nodeDef || []).map((json) => tf.JsonReader.decodeNodeDef(json)); + message.ret = json.ret; + message.control_ret = json.control_ret; + return message; + } + + static decodeOpDef(json) { + const message = new tf.proto.tensorflow.OpDef(); + message.name = json.name; + message.input_arg = json.inputArg.map((json) => tf.JsonReader.decodeArgDef(json)); + message.output_arg = json.outputArg.map((json) => tf.JsonReader.decodeArgDef(json)); + return message; + } + + static decodeArgDef(json) { + const message = new tf.proto.tensorflow.OpDef.ArgDef(); + message.name = json.name; + message.description = json.decscription; + return message; + } +}; + +tf.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorFlow model.'; + } +}; + +export const ModelFactory = tf.ModelFactory; diff --git a/tflite-metadata.json b/tflite-metadata.json new file mode 100644 index 00000000000..92b399a48ad --- /dev/null +++ b/tflite-metadata.json @@ -0,0 +1,1197 @@ +[ + { + "name": "Add", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "pot_scale_int16", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "ArgMax", + "attributes": [ + { "name": "output_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "dimension", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ArgMin", + "attributes": [ + { "name": "output_type", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "AveragePool2D", + "category": "Pool", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ], + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "BatchMatMul", + "attributes": [ + { "name": "adj_x", "type": "boolean", "default": false }, + { "name": "adj_y", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "BidirectionalSequenceLSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "merge_outputs", "type": "boolean", "default": false }, + { "name": "time_major", "type": "boolean", "default": true }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "BidirectionalSequenceRNN", + "attributes": [ + { "name": "time_major", "type": "boolean", "default": false }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "merge_outputs", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ] + }, + { + "name": "Bucketize", + "attributes": [ + { "name": "boundaries", "type": "float32[]", "default": 0 } + ] + }, + { + "name": "Call", + "attributes": [ + { "name": "subgraph", "type": "uint32", "default": 0 } + ] + }, + { + "name": "CallOnce", + "attributes": [ + { "name": "init_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "Cast", + "attributes": [ + { "name": "in_data_type", "type": "TensorType", "default": "FLOAT32" }, + { "name": "out_data_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ConcatEmbeddings", + "attributes": [ + { "name": "num_channels", "type": "int32", "default": 0 }, + { "name": "num_columns_per_channel", "type": "int32[]", "default": 0 }, + { "name": "embedding_dim_per_channel", "type": "int32[]", "default": 0 } + ] + }, + { + "name": "Concatenation", + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "inputs", "list": true } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Conv2D", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME", "description": "`SAME`|`VALID`" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE", "description": "`NONE`|`RELU`|`RELU6`" }, + { "name": "stride_w", "type": "int32", "default": 0, "description": "stride of the filter window" }, + { "name": "stride_h", "type": "int32", "default": 0, "description": "stride of the filter window" }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "description": "4D tensor" }, + { "name": "filter" }, + { "name": "bias", "description": "(optional)" } + ], + "outputs": [ + { "name": "output", "description": "result of 2D convolution of the input tensor" } + ] + }, + { + "name": "Cumsum", + "attributes": [ + { "name": "exclusive", "type": "boolean", "default": false }, + { "name": "reverse", "type": "boolean", "default": false } + ] + }, + { + "name": "Densify", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "DepthToSpace", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "DepthwiseConv2D", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "depth_multiplier", "type": "int32", "default": 0 }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Dequantize", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Div", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "EmbeddingLookupSparse", + "attributes": [ + { "name": "combiner", "type": "CombinerType", "default": "SUM" } + ] + }, + { + "name": "Exp", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ExpandDims", + "inputs": [ + { "name": "input" }, + { "name": "axis_param" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ExtractFeatures", + "inputs": [ + { "name": "ngrams" } + ], + "outputs": [ + { "name": "features" }, + { "name": "weights" } + ] + }, + { + "name": "FakeQuant", + "attributes": [ + { "name": "min", "type": "float32", "default": 0 }, + { "name": "max", "type": "float32", "default": 0 }, + { "name": "num_bits", "type": "int32", "default": 0 }, + { "name": "narrow_range", "type": "boolean", "default": false } + ] + }, + { + "name": "Fill", + "inputs": [ + { "name": "dims", "type": "T" }, + { "name": "value", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "FullyConnected", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "weights_format", "type": "FullyConnectedOptionsWeightsFormat", "default": "DEFAULT" }, + { "name": "keep_num_dims", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Gather", + "category": "Transform", + "attributes": [ + { "name": "axis", "default": 0, "type": "int32" }, + { "name": "batch_dims", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "positions" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Gelu", + "category": "Activation", + "attributes": [ + { "name": "approximate", "type": "boolean", "default": false } + ] + }, + { + "name": "HardSwish", + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Hashtable", + "attributes": [ + { "name": "table_id", "type": "int32", "default": 0 }, + { "name": "key_dtype", "type": "TensorType", "default": "FLOAT32" }, + { "name": "value_dtype", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "HashtableLookup", + "inputs": [ + { "name": "key" }, + { "name": "keys" }, + { "name": "values" } + ], + "outputs": [ + { "name": "value" }, + { "name": "hits" } + ] + }, + { + "name": "If", + "attributes": [ + { "name": "then_subgraph_index", "type": "int32", "default": 0 }, + { "name": "else_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "InstanceNorm", + "attributes": [ + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + }, + { + "name": "LeakyRelu", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 } + ] + }, + { + "name": "LocalResponseNormalization", + "category": "Normalization", + "attributes": [ + { "name": "radius", "type": "int32", "default": 0 }, + { "name": "bias", "type": "float32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LogicalOr", + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Logistic", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LogSoftmax", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "LSHProjection", + "attributes": [ + { "name": "type", "type": "LSHProjectionType", "default": "UNKNOWN" } + ], + "inputs": [ + { "name": "hash" }, + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "LSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "kernel_type", "type": "LSTMKernelType", "default": "FULL" }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T", "description": "Input tensor." }, + { "name": "input_input_weights", "type": "T", "option": "optional", "description": "Input to input weights tensor.", "visible": false }, + { "name": "input_forget_weights", "type": "T", "description": "Input to forget weights tensor.", "visible": false }, + { "name": "input_cell_weights", "type": "T", "description": "Input to cell weights tensor.", "visible": false }, + { "name": "input_output_weights", "type": "T", "description": "Input to output weights tensor.", "visible": false }, + { "name": "recurrent_input_weights", "type": "T", "option": "optional", "description": "Recurrent to input weights tensor.", "visible": false }, + { "name": "recurrent_forget_weights", "type": "T", "description": "Recurrent to forget weights tensor.", "visible": false }, + { "name": "recurrent_cell_weights", "type": "T", "description": "Recurrent to cell weights tensor.", "visible": false }, + { "name": "recurrent_output_weights", "type": "T", "description": "Recurrent to output weights tensor.", "visible": false }, + { "name": "cell_input_weights", "type": "T", "option": "optional", "description": "Cell to input weights tensor.", "visible": false }, + { "name": "cell_forget_weights", "type": "T", "option": "optional", "description": "Cell to forget weights tensor.", "visible": false }, + { "name": "cell_output_weights", "type": "T", "option": "optional", "description": "Cell to output weights tensor.", "visible": false }, + { "name": "input_gate_bias", "type": "T", "option": "optional", "description": "Input gate bias tensor.", "visible": false }, + { "name": "forget_gate_bias", "type": "T", "description": "Forget gate bias tensor.", "visible": false }, + { "name": "cell_gate_bias", "type": "T", "description": "Cell gate bias tensor.", "visible": false }, + { "name": "output_gate_bias", "type": "T", "description": "Output gate bias tensor.", "visible": false }, + { "name": "projection_weights", "type": "T", "option": "optional", "description": "Projection weights tensor.", "visible": false }, + { "name": "projection_bias", "type": "T", "option": "optional", "description": "Projection bias tensor.", "visible": false } + ], + "outputs": [ + { "name": "scratch", "type": "T" }, + { "name": "output_state", "type": "T" }, + { "name": "cell_state", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Maximum", + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MaxPool2D", + "category": "Pool", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ], + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Mean", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Minimum", + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "MirrorPad", + "attributes": [ + { "name": "mode", "type": "MirrorPadMode", "default": "REFLECT" } + ] + }, + { + "name": "Mul", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Normalize", + "category": "Normalization", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "NotEqual", + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C" } + ] + }, + { + "name": "OneHot", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Pack", + "attributes": [ + { "name": "values_count", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Pad", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Predict", + "inputs": [ + { "name": "hashes" }, + { "name": "keys" }, + { "name": "labels" }, + { "name": "weights" } + ], + "outputs": [ + { "name": "label" }, + { "name": "weight" } + ] + }, + { + "name": "Prelu", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "slope", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Quantize", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Range", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "start", "type": "T" }, + { "name": "limit", "type": "T" }, + { "name": "delta", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceMax", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceMin", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "ReduceWindow", + "attributes": [ + { "name": "reduce_function", "type": "ReduceWindowFunction", "default": "UNSUPPORTED" } + ] + }, + { + "name": "Relu", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Relu6", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "attributes": [ + { "name": "new_shape", "type": "int32[]", "default": 0 } + ], + "inputs": [ + { "name": "data", "type": "T" }, + { "name": "shape", "type": "T" } + ], + "outputs": [ + { "name": "reshaped", "type": "T" } + ] + }, + { + "name": "ResizeBilinear", + "attributes": [ + { "name": "align_corners", "default": false, "type": "boolean" }, + { "name": "new_height", "type": "int32", "default": 0 }, + { "name": "new_width", "type": "int32", "default": 0 }, + { "name": "half_pixel_centers", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "ResizeNearestNeighbor", + "attributes": [ + { "name": "align_corners", "type": "boolean", "default": false }, + { "name": "half_pixel_centers", "type": "boolean", "default": false } + ] + }, + { + "name": "ReverseSequence", + "attributes": [ + { "name": "seq_dim", "type": "int32", "default": 0 }, + { "name": "batch_dim", "type": "int32", "default": 0 } + ] + }, + { + "name": "RNN", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "X", "type": "T" }, + { "name": "W", "type": "T" }, + { "name": "R", "type": "T" }, + { "name": "b", "type": "T" } + ], + "outputs": [ + { "name": "hidden", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Rsqrt", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Shape", + "attributes": [ + { "name": "out_type", "type": "TensorType", "default": "FLOAT32" } + ] + }, + { + "name": "Sin", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "SkipGram", + "inputs": [ + { "name": "inputs" } + ], + "outputs": [ + { "name": "ngrams" } + ], + "attributes": [ + { "name": "ngram_size", "type": "int32", "default": 0 }, + { "name": "max_skip_size", "type": "int32", "default": 0 }, + { "name": "include_all_ngrams", "type": "boolean", "default": false } + ] + }, + { + "name": "Slice", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Softmax", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "SpaceToDepth", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + }, + { + "name": "SparseToDense", + "attributes": [ + { "name": "validate_indices", "type": "boolean", "default": false } + ] + }, + { + "name": "Split", + "category": "Tensor", + "inputs": [ + { "name": "axis" }, + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "num_splits", "type": "int32", "default": 0 } + ] + }, + { + "name": "SplitV", + "attributes": [ + { "name": "num_splits", "type": "int32", "default": 0 } + ] + }, + { + "name": "Squeeze", + "category": "Transform", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "squeeze_dims", "type": "int32[]", "default": 0 } + ] + }, + { + "name": "StablehloBroadcastInDim", + "attributes": [ + { "name": "broadcast_dimensions", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloCompare", + "attributes": [ + { "name": "comparison_direction", "type": "StablehloComparisonDirection", "default": "STABLEHLO_COMPARISON_DIRECTION_EQ" }, + { "name": "compare_type", "type": "StablehloComparisonType", "default": "STABLEHLO_COMPARISON_TYPE_NOTYPE" } + ] + }, + { + "name": "StablehloConcatenate", + "attributes": [ + { "name": "dimension", "type": "int64", "default": 0 } + ] + }, + { + "name": "StablehloConvolution", + "attributes": [ + { "name": "window_strides", "type": "int64[]", "default": 0 }, + { "name": "padding", "type": "int64[]", "default": 0 }, + { "name": "lhs_dilation", "type": "int64[]", "default": 0 }, + { "name": "rhs_dilation", "type": "int64[]", "default": 0 }, + { "name": "window_reversal", "type": "boolean", "default": false }, + { "name": "input_batch_dimension", "type": "int64", "default": 0 }, + { "name": "input_feature_dimension", "type": "int64", "default": 0 }, + { "name": "input_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "kernel_input_feature_dimension", "type": "int64", "default": 0 }, + { "name": "kernel_output_feature_dimension", "type": "int64", "default": 0 }, + { "name": "kernel_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "output_batch_dimension", "type": "int64", "default": 0 }, + { "name": "output_feature_dimension", "type": "int64", "default": 0 }, + { "name": "output_spatial_dimensions", "type": "int64[]", "default": 0 }, + { "name": "feature_group_count", "type": "int64", "default": 0 }, + { "name": "batch_group_count", "type": "int64", "default": 0 }, + { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloCustomCall", + "attributes": [ + { "name": "call_target_name", "type": "string", "default": null }, + { "name": "has_side_effect", "type": "boolean", "default": false }, + { "name": "backend_config", "type": "string", "default": null }, + { "name": "api_version", "type": "int32", "default": 0 }, + { "name": "called_computations", "type": "int32[]", "default": 0 }, + { "name": "custom_attributes", "type": "uint8[]", "default": 0 } + ] + }, + { + "name": "StablehloDotGeneral", + "attributes": [ + { "name": "lhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "lhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloDynamicSlice", + "attributes": [ + { "name": "slice_sizes", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloGather", + "attributes": [ + { "name": "offset_dims", "type": "int64[]", "default": 0 }, + { "name": "collapsed_slice_dims", "type": "int64[]", "default": 0 }, + { "name": "start_index_map", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "slice_sizes", "type": "int64[]", "default": 0 }, + { "name": "indices_are_sorted", "type": "boolean", "default": false } + ] + }, + { + "name": "StablehloIota", + "attributes": [ + { "name": "iota_dimension", "type": "int64", "default": 0 } + ] + }, + { + "name": "StablehloPad", + "attributes": [ + { "name": "edge_padding_low", "type": "int64[]", "default": 0 }, + { "name": "edge_padding_high", "type": "int64[]", "default": 0 }, + { "name": "interior_padding", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloReduce", + "attributes": [ + { "name": "dimensions", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloReduceWindow", + "attributes": [ + { "name": "window_dimensions", "type": "int64[]", "default": 0 }, + { "name": "window_strides", "type": "int64[]", "default": 0 }, + { "name": "base_dilations", "type": "int64[]", "default": 0 }, + { "name": "window_dilations", "type": "int64[]", "default": 0 }, + { "name": "padding", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloRngBitGenerator", + "attributes": [ + { "name": "algorithm", "type": "RngAlgorithm", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloScatter", + "attributes": [ + { "name": "indices_are_sorted", "type": "boolean", "default": false }, + { "name": "update_window_dims", "type": "int64[]", "default": 0 }, + { "name": "inserted_window_dims", "type": "int64[]", "default": 0 }, + { "name": "scatter_dims_to_operand_dims", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "unique_indices", "type": "boolean", "default": false }, + { "name": "update_computation_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloSlice", + "attributes": [ + { "name": "start_indices", "type": "int64[]", "default": 0 }, + { "name": "limit_indices", "type": "int64[]", "default": 0 }, + { "name": "strides", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloSort", + "attributes": [ + { "name": "dimension", "type": "int64", "default": 0 }, + { "name": "is_stable", "type": "boolean", "default": false }, + { "name": "comparator_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloTranspose", + "attributes": [ + { "name": "permutation", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloWhile", + "attributes": [ + { "name": "cond_subgraph_index", "type": "int32", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "begin_mask", "type": "int32", "default": 0 }, + { "name": "end_mask", "type": "int32", "default": 0 }, + { "name": "ellipsis_mask", "type": "int32", "default": 0 }, + { "name": "new_axis_mask", "type": "int32", "default": 0 }, + { "name": "shrink_axis_mask", "type": "int32", "default": 0 }, + { "name": "offset", "type": "boolean", "default": false } + ] + }, + { + "name": "Sub", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "pot_scale_int16", "type": "boolean", "default": true } + ], + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ] + }, + { + "name": "Sum", + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "SVDF", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "rank", "type": "int32", "default": 0 }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "feature", "type": "T" }, + { "name": "time", "type": "T" }, + { "name": "bias", "type": "T" }, + { "name": "state", "type": "T" } + ], + "outputs": [ + { "name": "state_out", "type": "T" }, + { "name": "output", "type": "T" } + ] + }, + { + "name": "Tanh", + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + }, + { + "name": "Tile", + "inputs": [ + { "name": "input" }, + { "name": "multipliers" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Transpose", + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "perm" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "TransposeConv", + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "quantized_bias_type", "type": "TensorType", "default": "FLOAT32" } + ], + "inputs": [ + { "name": "output_shape" }, + { "name": "weights" }, + { "name": "input" }, + { "name": "bias", "optional": true } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "UnidirectionalSequenceLSTM", + "category": "Layer", + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0 }, + { "name": "proj_clip", "type": "float32", "default": 0 }, + { "name": "time_major", "type": "boolean", "default": false }, + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false }, + { "name": "diagonal_recurrent_tensors", "type": "boolean", "default": false } + ], + "inputs": [ + { "name": "input", "type": "T", "description": "Input tensor." }, + { "name": "input_input_weights", "type": "T", "option": "optional", "description": "Input to input weights tensor.", "visible": false }, + { "name": "input_forget_weights", "type": "T", "description": "Input to forget weights tensor.", "visible": false }, + { "name": "input_cell_weights", "type": "T", "description": "Input to cell weights tensor.", "visible": false }, + { "name": "input_output_weights", "type": "T", "description": "Input to output weights tensor.", "visible": false }, + { "name": "recurrent_input_weights", "type": "T", "option": "optional", "description": "Recurrent to input weights tensor.", "visible": false }, + { "name": "recurrent_forget_weights", "type": "T", "description": "Recurrent to forget weights tensor.", "visible": false }, + { "name": "recurrent_cell_weights", "type": "T", "description": "Recurrent to cell weights tensor.", "visible": false }, + { "name": "recurrent_output_weights", "type": "T", "description": "Recurrent to output weights tensor.", "visible": false }, + { "name": "cell_input_weights", "type": "T", "option": "optional", "description": "Cell to input weights tensor.", "visible": false }, + { "name": "cell_forget_weights", "type": "T", "option": "optional", "description": "Cell to forget weights tensor.", "visible": false }, + { "name": "cell_output_weights", "type": "T", "option": "optional", "description": "Cell to output weights tensor.", "visible": false }, + { "name": "input_gate_bias", "type": "T", "option": "optional", "description": "Input gate bias tensor.", "visible": false }, + { "name": "forget_gate_bias", "type": "T", "description": "Forget gate bias tensor.", "visible": false }, + { "name": "cell_gate_bias", "type": "T", "description": "Cell gate bias tensor.", "visible": false }, + { "name": "output_gate_bias", "type": "T", "description": "Output gate bias tensor.", "visible": false }, + { "name": "projection_weights", "type": "T", "option": "optional", "description": "Projection weights tensor.", "visible": false }, + { "name": "projection_bias", "type": "T", "option": "optional", "description": "Projection bias tensor.", "visible": false }, + { "name": "output_state_in", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_state_in", "type": "T", "option": "optional", "visible": false }, + { "name": "activation", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_clip", "type": "T", "option": "optional", "visible": false }, + { "name": "proj_clip", "type": "T", "option": "optional", "visible": false }, + { "name": "time_major", "type": "T", "option": "optional", "visible": false }, + { "name": "input_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "forget_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "cell_layer_norm_weights", "type": "T", "option": "optional", "visible": false }, + { "name": "output_layer_norm_weights", "type": "T", "option": "optional", "visible": false } + ] + }, + { + "name": "Unique", + "attributes": [ + { "name": "idx_out_type", "type": "TensorType", "default": "INT32" } + ] + }, + { + "name": "Unpack", + "attributes": [ + { "name": "num", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "UnsortedSegmentProd", + "attributes": [ + { "name": "num_segments", "type": "int32", "default": 0 } + ] + }, + { + "name": "VarHandle", + "attributes": [ + { "name": "container", "type": "string", "default": null }, + { "name": "shared_name", "type": "string", "default": null } + ] + }, + { + "name": "While", + "attributes": [ + { "name": "cond_subgraph_index", "type": "int32", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + } +] \ No newline at end of file diff --git a/tflite-schema.js b/tflite-schema.js new file mode 100644 index 00000000000..7d3cc1655ae --- /dev/null +++ b/tflite-schema.js @@ -0,0 +1,3883 @@ + +import * as flatbuffers from './flatbuffers.js'; + +const $root = flatbuffers.get('tflite'); + +$root.tflite = $root.tflite || {}; + +$root.tflite.TensorType = { + FLOAT32: 0, + FLOAT16: 1, + INT32: 2, + UINT8: 3, + INT64: 4, + STRING: 5, + BOOL: 6, + INT16: 7, + COMPLEX64: 8, + INT8: 9, + FLOAT64: 10, + COMPLEX128: 11, + UINT64: 12, + RESOURCE: 13, + VARIANT: 14, + UINT32: 15, + UINT16: 16, + INT4: 17 +}; + +$root.tflite.CustomQuantization = class CustomQuantization { + + static decode(reader, position) { + const $ = new $root.tflite.CustomQuantization(); + $.custom = reader.typedArray(position, 4, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CustomQuantization(); + $.custom = reader.typedArray(json.custom, Uint8Array); + return $; + } +}; + +$root.tflite.QuantizationDetails = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.CustomQuantization.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'CustomQuantization': return $root.tflite.CustomQuantization.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.QuantizationParameters = class QuantizationParameters { + + static decode(reader, position) { + const $ = new $root.tflite.QuantizationParameters(); + $.min = reader.typedArray(position, 4, Float32Array); + $.max = reader.typedArray(position, 6, Float32Array); + $.scale = reader.typedArray(position, 8, Float32Array); + $.zero_point = reader.int64s_(position, 10); + $.details = reader.union(position, 12, $root.tflite.QuantizationDetails.decode); + $.quantized_dimension = reader.int32_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.QuantizationParameters(); + $.min = reader.typedArray(json.min, Float32Array); + $.max = reader.typedArray(json.max, Float32Array); + $.scale = reader.typedArray(json.scale, Float32Array); + $.zero_point = reader.array(json.zero_point); + $.details = $root.tflite.QuantizationDetails.decodeText(reader, json.details, json.details_type); + $.quantized_dimension = reader.value(json.quantized_dimension, 0); + return $; + } +}; + +$root.tflite.DimensionType = { + DENSE: 0, + SPARSE_CSR: 1 +}; + +$root.tflite.Int32Vector = class Int32Vector { + + static decode(reader, position) { + const $ = new $root.tflite.Int32Vector(); + $.values = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Int32Vector(); + $.values = reader.typedArray(json.values, Int32Array); + return $; + } +}; + +$root.tflite.Uint16Vector = class Uint16Vector { + + static decode(reader, position) { + const $ = new $root.tflite.Uint16Vector(); + $.values = reader.typedArray(position, 4, Uint16Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Uint16Vector(); + $.values = reader.typedArray(json.values, Uint16Array); + return $; + } +}; + +$root.tflite.Uint8Vector = class Uint8Vector { + + static decode(reader, position) { + const $ = new $root.tflite.Uint8Vector(); + $.values = reader.typedArray(position, 4, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Uint8Vector(); + $.values = reader.typedArray(json.values, Uint8Array); + return $; + } +}; + +$root.tflite.SparseIndexVector = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.Int32Vector.decode(reader, position); + case 2: return $root.tflite.Uint16Vector.decode(reader, position); + case 3: return $root.tflite.Uint8Vector.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'Int32Vector': return $root.tflite.Int32Vector.decodeText(reader, json); + case 'Uint16Vector': return $root.tflite.Uint16Vector.decodeText(reader, json); + case 'Uint8Vector': return $root.tflite.Uint8Vector.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.DimensionMetadata = class DimensionMetadata { + + static decode(reader, position) { + const $ = new $root.tflite.DimensionMetadata(); + $.format = reader.int8_(position, 4, 0); + $.dense_size = reader.int32_(position, 6, 0); + $.array_segments = reader.union(position, 8, $root.tflite.SparseIndexVector.decode); + $.array_indices = reader.union(position, 12, $root.tflite.SparseIndexVector.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.DimensionMetadata(); + $.format = $root.tflite.DimensionType[json.format]; + $.dense_size = reader.value(json.dense_size, 0); + $.array_segments = $root.tflite.SparseIndexVector.decodeText(reader, json.array_segments, json.array_segments_type); + $.array_indices = $root.tflite.SparseIndexVector.decodeText(reader, json.array_indices, json.array_indices_type); + return $; + } +}; + +$root.tflite.SparsityParameters = class SparsityParameters { + + static decode(reader, position) { + const $ = new $root.tflite.SparsityParameters(); + $.traversal_order = reader.typedArray(position, 4, Int32Array); + $.block_map = reader.typedArray(position, 6, Int32Array); + $.dim_metadata = reader.tableArray(position, 8, $root.tflite.DimensionMetadata.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SparsityParameters(); + $.traversal_order = reader.typedArray(json.traversal_order, Int32Array); + $.block_map = reader.typedArray(json.block_map, Int32Array); + $.dim_metadata = reader.objectArray(json.dim_metadata, $root.tflite.DimensionMetadata.decodeText); + return $; + } +}; + +$root.tflite.VariantSubType = class VariantSubType { + + static decode(reader, position) { + const $ = new $root.tflite.VariantSubType(); + $.shape = reader.typedArray(position, 4, Int32Array); + $.type = reader.int8_(position, 6, 0); + $.has_rank = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.VariantSubType(); + $.shape = reader.typedArray(json.shape, Int32Array); + $.type = $root.tflite.TensorType[json.type]; + $.has_rank = reader.value(json.has_rank, false); + return $; + } +}; + +$root.tflite.Tensor = class Tensor { + + static decode(reader, position) { + const $ = new $root.tflite.Tensor(); + $.shape = reader.typedArray(position, 4, Int32Array); + $.type = reader.int8_(position, 6, 0); + $.buffer = reader.uint32_(position, 8, 0); + $.name = reader.string_(position, 10, null); + $.quantization = reader.table(position, 12, $root.tflite.QuantizationParameters.decode); + $.is_variable = reader.bool_(position, 14, false); + $.sparsity = reader.table(position, 16, $root.tflite.SparsityParameters.decode); + $.shape_signature = reader.typedArray(position, 18, Int32Array); + $.has_rank = reader.bool_(position, 20, false); + $.variant_tensors = reader.tableArray(position, 22, $root.tflite.VariantSubType.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Tensor(); + $.shape = reader.typedArray(json.shape, Int32Array); + $.type = $root.tflite.TensorType[json.type]; + $.buffer = reader.value(json.buffer, 0); + $.name = reader.value(json.name, null); + $.quantization = reader.object(json.quantization, $root.tflite.QuantizationParameters.decodeText); + $.is_variable = reader.value(json.is_variable, false); + $.sparsity = reader.object(json.sparsity, $root.tflite.SparsityParameters.decodeText); + $.shape_signature = reader.typedArray(json.shape_signature, Int32Array); + $.has_rank = reader.value(json.has_rank, false); + $.variant_tensors = reader.objectArray(json.variant_tensors, $root.tflite.VariantSubType.decodeText); + return $; + } +}; + +$root.tflite.BuiltinOperator = { + ADD: 0, + AVERAGE_POOL_2D: 1, + CONCATENATION: 2, + CONV_2D: 3, + DEPTHWISE_CONV_2D: 4, + DEPTH_TO_SPACE: 5, + DEQUANTIZE: 6, + EMBEDDING_LOOKUP: 7, + FLOOR: 8, + FULLY_CONNECTED: 9, + HASHTABLE_LOOKUP: 10, + L2_NORMALIZATION: 11, + L2_POOL_2D: 12, + LOCAL_RESPONSE_NORMALIZATION: 13, + LOGISTIC: 14, + LSH_PROJECTION: 15, + LSTM: 16, + MAX_POOL_2D: 17, + MUL: 18, + RELU: 19, + RELU_N1_TO_1: 20, + RELU6: 21, + RESHAPE: 22, + RESIZE_BILINEAR: 23, + RNN: 24, + SOFTMAX: 25, + SPACE_TO_DEPTH: 26, + SVDF: 27, + TANH: 28, + CONCAT_EMBEDDINGS: 29, + SKIP_GRAM: 30, + CALL: 31, + CUSTOM: 32, + EMBEDDING_LOOKUP_SPARSE: 33, + PAD: 34, + UNIDIRECTIONAL_SEQUENCE_RNN: 35, + GATHER: 36, + BATCH_TO_SPACE_ND: 37, + SPACE_TO_BATCH_ND: 38, + TRANSPOSE: 39, + MEAN: 40, + SUB: 41, + DIV: 42, + SQUEEZE: 43, + UNIDIRECTIONAL_SEQUENCE_LSTM: 44, + STRIDED_SLICE: 45, + BIDIRECTIONAL_SEQUENCE_RNN: 46, + EXP: 47, + TOPK_V2: 48, + SPLIT: 49, + LOG_SOFTMAX: 50, + DELEGATE: 51, + BIDIRECTIONAL_SEQUENCE_LSTM: 52, + CAST: 53, + PRELU: 54, + MAXIMUM: 55, + ARG_MAX: 56, + MINIMUM: 57, + LESS: 58, + NEG: 59, + PADV2: 60, + GREATER: 61, + GREATER_EQUAL: 62, + LESS_EQUAL: 63, + SELECT: 64, + SLICE: 65, + SIN: 66, + TRANSPOSE_CONV: 67, + SPARSE_TO_DENSE: 68, + TILE: 69, + EXPAND_DIMS: 70, + EQUAL: 71, + NOT_EQUAL: 72, + LOG: 73, + SUM: 74, + SQRT: 75, + RSQRT: 76, + SHAPE: 77, + POW: 78, + ARG_MIN: 79, + FAKE_QUANT: 80, + REDUCE_PROD: 81, + REDUCE_MAX: 82, + PACK: 83, + LOGICAL_OR: 84, + ONE_HOT: 85, + LOGICAL_AND: 86, + LOGICAL_NOT: 87, + UNPACK: 88, + REDUCE_MIN: 89, + FLOOR_DIV: 90, + REDUCE_ANY: 91, + SQUARE: 92, + ZEROS_LIKE: 93, + FILL: 94, + FLOOR_MOD: 95, + RANGE: 96, + RESIZE_NEAREST_NEIGHBOR: 97, + LEAKY_RELU: 98, + SQUARED_DIFFERENCE: 99, + MIRROR_PAD: 100, + ABS: 101, + SPLIT_V: 102, + UNIQUE: 103, + CEIL: 104, + REVERSE_V2: 105, + ADD_N: 106, + GATHER_ND: 107, + COS: 108, + WHERE: 109, + RANK: 110, + ELU: 111, + REVERSE_SEQUENCE: 112, + MATRIX_DIAG: 113, + QUANTIZE: 114, + MATRIX_SET_DIAG: 115, + ROUND: 116, + HARD_SWISH: 117, + IF: 118, + WHILE: 119, + NON_MAX_SUPPRESSION_V4: 120, + NON_MAX_SUPPRESSION_V5: 121, + SCATTER_ND: 122, + SELECT_V2: 123, + DENSIFY: 124, + SEGMENT_SUM: 125, + BATCH_MATMUL: 126, + PLACEHOLDER_FOR_GREATER_OP_CODES: 127, + CUMSUM: 128, + CALL_ONCE: 129, + BROADCAST_TO: 130, + RFFT2D: 131, + CONV_3D: 132, + IMAG: 133, + REAL: 134, + COMPLEX_ABS: 135, + HASHTABLE: 136, + HASHTABLE_FIND: 137, + HASHTABLE_IMPORT: 138, + HASHTABLE_SIZE: 139, + REDUCE_ALL: 140, + CONV_3D_TRANSPOSE: 141, + VAR_HANDLE: 142, + READ_VARIABLE: 143, + ASSIGN_VARIABLE: 144, + BROADCAST_ARGS: 145, + RANDOM_STANDARD_NORMAL: 146, + BUCKETIZE: 147, + RANDOM_UNIFORM: 148, + MULTINOMIAL: 149, + GELU: 150, + DYNAMIC_UPDATE_SLICE: 151, + RELU_0_TO_1: 152, + UNSORTED_SEGMENT_PROD: 153, + UNSORTED_SEGMENT_MAX: 154, + UNSORTED_SEGMENT_SUM: 155, + ATAN2: 156, + UNSORTED_SEGMENT_MIN: 157, + SIGN: 158, + BITCAST: 159, + BITWISE_XOR: 160, + RIGHT_SHIFT: 161, + STABLEHLO_LOGISTIC: 162, + STABLEHLO_ADD: 163, + STABLEHLO_DIVIDE: 164, + STABLEHLO_MULTIPLY: 165, + STABLEHLO_MAXIMUM: 166, + STABLEHLO_RESHAPE: 167, + STABLEHLO_CLAMP: 168, + STABLEHLO_CONCATENATE: 169, + STABLEHLO_BROADCAST_IN_DIM: 170, + STABLEHLO_CONVOLUTION: 171, + STABLEHLO_SLICE: 172, + STABLEHLO_CUSTOM_CALL: 173, + STABLEHLO_REDUCE: 174, + STABLEHLO_ABS: 175, + STABLEHLO_AND: 176, + STABLEHLO_COSINE: 177, + STABLEHLO_EXPONENTIAL: 178, + STABLEHLO_FLOOR: 179, + STABLEHLO_LOG: 180, + STABLEHLO_MINIMUM: 181, + STABLEHLO_NEGATE: 182, + STABLEHLO_OR: 183, + STABLEHLO_POWER: 184, + STABLEHLO_REMAINDER: 185, + STABLEHLO_RSQRT: 186, + STABLEHLO_SELECT: 187, + STABLEHLO_SUBTRACT: 188, + STABLEHLO_TANH: 189, + STABLEHLO_SCATTER: 190, + STABLEHLO_COMPARE: 191, + STABLEHLO_CONVERT: 192, + STABLEHLO_DYNAMIC_SLICE: 193, + STABLEHLO_DYNAMIC_UPDATE_SLICE: 194, + STABLEHLO_PAD: 195, + STABLEHLO_IOTA: 196, + STABLEHLO_DOT_GENERAL: 197, + STABLEHLO_REDUCE_WINDOW: 198, + STABLEHLO_SORT: 199, + STABLEHLO_WHILE: 200, + STABLEHLO_GATHER: 201, + STABLEHLO_TRANSPOSE: 202, + DILATE: 203, + STABLEHLO_RNG_BIT_GENERATOR: 204, + REDUCE_WINDOW: 205 +}; + +$root.tflite.BuiltinOptions = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.Conv2DOptions.decode(reader, position); + case 2: return $root.tflite.DepthwiseConv2DOptions.decode(reader, position); + case 3: return $root.tflite.ConcatEmbeddingsOptions.decode(reader, position); + case 4: return $root.tflite.LSHProjectionOptions.decode(reader, position); + case 5: return $root.tflite.Pool2DOptions.decode(reader, position); + case 6: return $root.tflite.SVDFOptions.decode(reader, position); + case 7: return $root.tflite.RNNOptions.decode(reader, position); + case 8: return $root.tflite.FullyConnectedOptions.decode(reader, position); + case 9: return $root.tflite.SoftmaxOptions.decode(reader, position); + case 10: return $root.tflite.ConcatenationOptions.decode(reader, position); + case 11: return $root.tflite.AddOptions.decode(reader, position); + case 12: return $root.tflite.L2NormOptions.decode(reader, position); + case 13: return $root.tflite.LocalResponseNormalizationOptions.decode(reader, position); + case 14: return $root.tflite.LSTMOptions.decode(reader, position); + case 15: return $root.tflite.ResizeBilinearOptions.decode(reader, position); + case 16: return $root.tflite.CallOptions.decode(reader, position); + case 17: return $root.tflite.ReshapeOptions.decode(reader, position); + case 18: return $root.tflite.SkipGramOptions.decode(reader, position); + case 19: return $root.tflite.SpaceToDepthOptions.decode(reader, position); + case 20: return $root.tflite.EmbeddingLookupSparseOptions.decode(reader, position); + case 21: return $root.tflite.MulOptions.decode(reader, position); + case 22: return $root.tflite.PadOptions.decode(reader, position); + case 23: return $root.tflite.GatherOptions.decode(reader, position); + case 24: return $root.tflite.BatchToSpaceNDOptions.decode(reader, position); + case 25: return $root.tflite.SpaceToBatchNDOptions.decode(reader, position); + case 26: return $root.tflite.TransposeOptions.decode(reader, position); + case 27: return $root.tflite.ReducerOptions.decode(reader, position); + case 28: return $root.tflite.SubOptions.decode(reader, position); + case 29: return $root.tflite.DivOptions.decode(reader, position); + case 30: return $root.tflite.SqueezeOptions.decode(reader, position); + case 31: return $root.tflite.SequenceRNNOptions.decode(reader, position); + case 32: return $root.tflite.StridedSliceOptions.decode(reader, position); + case 33: return $root.tflite.ExpOptions.decode(reader, position); + case 34: return $root.tflite.TopKV2Options.decode(reader, position); + case 35: return $root.tflite.SplitOptions.decode(reader, position); + case 36: return $root.tflite.LogSoftmaxOptions.decode(reader, position); + case 37: return $root.tflite.CastOptions.decode(reader, position); + case 38: return $root.tflite.DequantizeOptions.decode(reader, position); + case 39: return $root.tflite.MaximumMinimumOptions.decode(reader, position); + case 40: return $root.tflite.ArgMaxOptions.decode(reader, position); + case 41: return $root.tflite.LessOptions.decode(reader, position); + case 42: return $root.tflite.NegOptions.decode(reader, position); + case 43: return $root.tflite.PadV2Options.decode(reader, position); + case 44: return $root.tflite.GreaterOptions.decode(reader, position); + case 45: return $root.tflite.GreaterEqualOptions.decode(reader, position); + case 46: return $root.tflite.LessEqualOptions.decode(reader, position); + case 47: return $root.tflite.SelectOptions.decode(reader, position); + case 48: return $root.tflite.SliceOptions.decode(reader, position); + case 49: return $root.tflite.TransposeConvOptions.decode(reader, position); + case 50: return $root.tflite.SparseToDenseOptions.decode(reader, position); + case 51: return $root.tflite.TileOptions.decode(reader, position); + case 52: return $root.tflite.ExpandDimsOptions.decode(reader, position); + case 53: return $root.tflite.EqualOptions.decode(reader, position); + case 54: return $root.tflite.NotEqualOptions.decode(reader, position); + case 55: return $root.tflite.ShapeOptions.decode(reader, position); + case 56: return $root.tflite.PowOptions.decode(reader, position); + case 57: return $root.tflite.ArgMinOptions.decode(reader, position); + case 58: return $root.tflite.FakeQuantOptions.decode(reader, position); + case 59: return $root.tflite.PackOptions.decode(reader, position); + case 60: return $root.tflite.LogicalOrOptions.decode(reader, position); + case 61: return $root.tflite.OneHotOptions.decode(reader, position); + case 62: return $root.tflite.LogicalAndOptions.decode(reader, position); + case 63: return $root.tflite.LogicalNotOptions.decode(reader, position); + case 64: return $root.tflite.UnpackOptions.decode(reader, position); + case 65: return $root.tflite.FloorDivOptions.decode(reader, position); + case 66: return $root.tflite.SquareOptions.decode(reader, position); + case 67: return $root.tflite.ZerosLikeOptions.decode(reader, position); + case 68: return $root.tflite.FillOptions.decode(reader, position); + case 69: return $root.tflite.BidirectionalSequenceLSTMOptions.decode(reader, position); + case 70: return $root.tflite.BidirectionalSequenceRNNOptions.decode(reader, position); + case 71: return $root.tflite.UnidirectionalSequenceLSTMOptions.decode(reader, position); + case 72: return $root.tflite.FloorModOptions.decode(reader, position); + case 73: return $root.tflite.RangeOptions.decode(reader, position); + case 74: return $root.tflite.ResizeNearestNeighborOptions.decode(reader, position); + case 75: return $root.tflite.LeakyReluOptions.decode(reader, position); + case 76: return $root.tflite.SquaredDifferenceOptions.decode(reader, position); + case 77: return $root.tflite.MirrorPadOptions.decode(reader, position); + case 78: return $root.tflite.AbsOptions.decode(reader, position); + case 79: return $root.tflite.SplitVOptions.decode(reader, position); + case 80: return $root.tflite.UniqueOptions.decode(reader, position); + case 81: return $root.tflite.ReverseV2Options.decode(reader, position); + case 82: return $root.tflite.AddNOptions.decode(reader, position); + case 83: return $root.tflite.GatherNdOptions.decode(reader, position); + case 84: return $root.tflite.CosOptions.decode(reader, position); + case 85: return $root.tflite.WhereOptions.decode(reader, position); + case 86: return $root.tflite.RankOptions.decode(reader, position); + case 87: return $root.tflite.ReverseSequenceOptions.decode(reader, position); + case 88: return $root.tflite.MatrixDiagOptions.decode(reader, position); + case 89: return $root.tflite.QuantizeOptions.decode(reader, position); + case 90: return $root.tflite.MatrixSetDiagOptions.decode(reader, position); + case 91: return $root.tflite.HardSwishOptions.decode(reader, position); + case 92: return $root.tflite.IfOptions.decode(reader, position); + case 93: return $root.tflite.WhileOptions.decode(reader, position); + case 94: return $root.tflite.DepthToSpaceOptions.decode(reader, position); + case 95: return $root.tflite.NonMaxSuppressionV4Options.decode(reader, position); + case 96: return $root.tflite.NonMaxSuppressionV5Options.decode(reader, position); + case 97: return $root.tflite.ScatterNdOptions.decode(reader, position); + case 98: return $root.tflite.SelectV2Options.decode(reader, position); + case 99: return $root.tflite.DensifyOptions.decode(reader, position); + case 100: return $root.tflite.SegmentSumOptions.decode(reader, position); + case 101: return $root.tflite.BatchMatMulOptions.decode(reader, position); + case 102: return $root.tflite.CumsumOptions.decode(reader, position); + case 103: return $root.tflite.CallOnceOptions.decode(reader, position); + case 104: return $root.tflite.BroadcastToOptions.decode(reader, position); + case 105: return $root.tflite.Rfft2dOptions.decode(reader, position); + case 106: return $root.tflite.Conv3DOptions.decode(reader, position); + case 107: return $root.tflite.HashtableOptions.decode(reader, position); + case 108: return $root.tflite.HashtableFindOptions.decode(reader, position); + case 109: return $root.tflite.HashtableImportOptions.decode(reader, position); + case 110: return $root.tflite.HashtableSizeOptions.decode(reader, position); + case 111: return $root.tflite.VarHandleOptions.decode(reader, position); + case 112: return $root.tflite.ReadVariableOptions.decode(reader, position); + case 113: return $root.tflite.AssignVariableOptions.decode(reader, position); + case 114: return $root.tflite.RandomOptions.decode(reader, position); + case 115: return $root.tflite.BucketizeOptions.decode(reader, position); + case 116: return $root.tflite.GeluOptions.decode(reader, position); + case 117: return $root.tflite.DynamicUpdateSliceOptions.decode(reader, position); + case 118: return $root.tflite.UnsortedSegmentProdOptions.decode(reader, position); + case 119: return $root.tflite.UnsortedSegmentMaxOptions.decode(reader, position); + case 120: return $root.tflite.UnsortedSegmentMinOptions.decode(reader, position); + case 121: return $root.tflite.UnsortedSegmentSumOptions.decode(reader, position); + case 122: return $root.tflite.ATan2Options.decode(reader, position); + case 123: return $root.tflite.SignOptions.decode(reader, position); + case 124: return $root.tflite.BitcastOptions.decode(reader, position); + case 125: return $root.tflite.BitwiseXorOptions.decode(reader, position); + case 126: return $root.tflite.RightShiftOptions.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'Conv2DOptions': return $root.tflite.Conv2DOptions.decodeText(reader, json); + case 'DepthwiseConv2DOptions': return $root.tflite.DepthwiseConv2DOptions.decodeText(reader, json); + case 'ConcatEmbeddingsOptions': return $root.tflite.ConcatEmbeddingsOptions.decodeText(reader, json); + case 'LSHProjectionOptions': return $root.tflite.LSHProjectionOptions.decodeText(reader, json); + case 'Pool2DOptions': return $root.tflite.Pool2DOptions.decodeText(reader, json); + case 'SVDFOptions': return $root.tflite.SVDFOptions.decodeText(reader, json); + case 'RNNOptions': return $root.tflite.RNNOptions.decodeText(reader, json); + case 'FullyConnectedOptions': return $root.tflite.FullyConnectedOptions.decodeText(reader, json); + case 'SoftmaxOptions': return $root.tflite.SoftmaxOptions.decodeText(reader, json); + case 'ConcatenationOptions': return $root.tflite.ConcatenationOptions.decodeText(reader, json); + case 'AddOptions': return $root.tflite.AddOptions.decodeText(reader, json); + case 'L2NormOptions': return $root.tflite.L2NormOptions.decodeText(reader, json); + case 'LocalResponseNormalizationOptions': return $root.tflite.LocalResponseNormalizationOptions.decodeText(reader, json); + case 'LSTMOptions': return $root.tflite.LSTMOptions.decodeText(reader, json); + case 'ResizeBilinearOptions': return $root.tflite.ResizeBilinearOptions.decodeText(reader, json); + case 'CallOptions': return $root.tflite.CallOptions.decodeText(reader, json); + case 'ReshapeOptions': return $root.tflite.ReshapeOptions.decodeText(reader, json); + case 'SkipGramOptions': return $root.tflite.SkipGramOptions.decodeText(reader, json); + case 'SpaceToDepthOptions': return $root.tflite.SpaceToDepthOptions.decodeText(reader, json); + case 'EmbeddingLookupSparseOptions': return $root.tflite.EmbeddingLookupSparseOptions.decodeText(reader, json); + case 'MulOptions': return $root.tflite.MulOptions.decodeText(reader, json); + case 'PadOptions': return $root.tflite.PadOptions.decodeText(reader, json); + case 'GatherOptions': return $root.tflite.GatherOptions.decodeText(reader, json); + case 'BatchToSpaceNDOptions': return $root.tflite.BatchToSpaceNDOptions.decodeText(reader, json); + case 'SpaceToBatchNDOptions': return $root.tflite.SpaceToBatchNDOptions.decodeText(reader, json); + case 'TransposeOptions': return $root.tflite.TransposeOptions.decodeText(reader, json); + case 'ReducerOptions': return $root.tflite.ReducerOptions.decodeText(reader, json); + case 'SubOptions': return $root.tflite.SubOptions.decodeText(reader, json); + case 'DivOptions': return $root.tflite.DivOptions.decodeText(reader, json); + case 'SqueezeOptions': return $root.tflite.SqueezeOptions.decodeText(reader, json); + case 'SequenceRNNOptions': return $root.tflite.SequenceRNNOptions.decodeText(reader, json); + case 'StridedSliceOptions': return $root.tflite.StridedSliceOptions.decodeText(reader, json); + case 'ExpOptions': return $root.tflite.ExpOptions.decodeText(reader, json); + case 'TopKV2Options': return $root.tflite.TopKV2Options.decodeText(reader, json); + case 'SplitOptions': return $root.tflite.SplitOptions.decodeText(reader, json); + case 'LogSoftmaxOptions': return $root.tflite.LogSoftmaxOptions.decodeText(reader, json); + case 'CastOptions': return $root.tflite.CastOptions.decodeText(reader, json); + case 'DequantizeOptions': return $root.tflite.DequantizeOptions.decodeText(reader, json); + case 'MaximumMinimumOptions': return $root.tflite.MaximumMinimumOptions.decodeText(reader, json); + case 'ArgMaxOptions': return $root.tflite.ArgMaxOptions.decodeText(reader, json); + case 'LessOptions': return $root.tflite.LessOptions.decodeText(reader, json); + case 'NegOptions': return $root.tflite.NegOptions.decodeText(reader, json); + case 'PadV2Options': return $root.tflite.PadV2Options.decodeText(reader, json); + case 'GreaterOptions': return $root.tflite.GreaterOptions.decodeText(reader, json); + case 'GreaterEqualOptions': return $root.tflite.GreaterEqualOptions.decodeText(reader, json); + case 'LessEqualOptions': return $root.tflite.LessEqualOptions.decodeText(reader, json); + case 'SelectOptions': return $root.tflite.SelectOptions.decodeText(reader, json); + case 'SliceOptions': return $root.tflite.SliceOptions.decodeText(reader, json); + case 'TransposeConvOptions': return $root.tflite.TransposeConvOptions.decodeText(reader, json); + case 'SparseToDenseOptions': return $root.tflite.SparseToDenseOptions.decodeText(reader, json); + case 'TileOptions': return $root.tflite.TileOptions.decodeText(reader, json); + case 'ExpandDimsOptions': return $root.tflite.ExpandDimsOptions.decodeText(reader, json); + case 'EqualOptions': return $root.tflite.EqualOptions.decodeText(reader, json); + case 'NotEqualOptions': return $root.tflite.NotEqualOptions.decodeText(reader, json); + case 'ShapeOptions': return $root.tflite.ShapeOptions.decodeText(reader, json); + case 'PowOptions': return $root.tflite.PowOptions.decodeText(reader, json); + case 'ArgMinOptions': return $root.tflite.ArgMinOptions.decodeText(reader, json); + case 'FakeQuantOptions': return $root.tflite.FakeQuantOptions.decodeText(reader, json); + case 'PackOptions': return $root.tflite.PackOptions.decodeText(reader, json); + case 'LogicalOrOptions': return $root.tflite.LogicalOrOptions.decodeText(reader, json); + case 'OneHotOptions': return $root.tflite.OneHotOptions.decodeText(reader, json); + case 'LogicalAndOptions': return $root.tflite.LogicalAndOptions.decodeText(reader, json); + case 'LogicalNotOptions': return $root.tflite.LogicalNotOptions.decodeText(reader, json); + case 'UnpackOptions': return $root.tflite.UnpackOptions.decodeText(reader, json); + case 'FloorDivOptions': return $root.tflite.FloorDivOptions.decodeText(reader, json); + case 'SquareOptions': return $root.tflite.SquareOptions.decodeText(reader, json); + case 'ZerosLikeOptions': return $root.tflite.ZerosLikeOptions.decodeText(reader, json); + case 'FillOptions': return $root.tflite.FillOptions.decodeText(reader, json); + case 'BidirectionalSequenceLSTMOptions': return $root.tflite.BidirectionalSequenceLSTMOptions.decodeText(reader, json); + case 'BidirectionalSequenceRNNOptions': return $root.tflite.BidirectionalSequenceRNNOptions.decodeText(reader, json); + case 'UnidirectionalSequenceLSTMOptions': return $root.tflite.UnidirectionalSequenceLSTMOptions.decodeText(reader, json); + case 'FloorModOptions': return $root.tflite.FloorModOptions.decodeText(reader, json); + case 'RangeOptions': return $root.tflite.RangeOptions.decodeText(reader, json); + case 'ResizeNearestNeighborOptions': return $root.tflite.ResizeNearestNeighborOptions.decodeText(reader, json); + case 'LeakyReluOptions': return $root.tflite.LeakyReluOptions.decodeText(reader, json); + case 'SquaredDifferenceOptions': return $root.tflite.SquaredDifferenceOptions.decodeText(reader, json); + case 'MirrorPadOptions': return $root.tflite.MirrorPadOptions.decodeText(reader, json); + case 'AbsOptions': return $root.tflite.AbsOptions.decodeText(reader, json); + case 'SplitVOptions': return $root.tflite.SplitVOptions.decodeText(reader, json); + case 'UniqueOptions': return $root.tflite.UniqueOptions.decodeText(reader, json); + case 'ReverseV2Options': return $root.tflite.ReverseV2Options.decodeText(reader, json); + case 'AddNOptions': return $root.tflite.AddNOptions.decodeText(reader, json); + case 'GatherNdOptions': return $root.tflite.GatherNdOptions.decodeText(reader, json); + case 'CosOptions': return $root.tflite.CosOptions.decodeText(reader, json); + case 'WhereOptions': return $root.tflite.WhereOptions.decodeText(reader, json); + case 'RankOptions': return $root.tflite.RankOptions.decodeText(reader, json); + case 'ReverseSequenceOptions': return $root.tflite.ReverseSequenceOptions.decodeText(reader, json); + case 'MatrixDiagOptions': return $root.tflite.MatrixDiagOptions.decodeText(reader, json); + case 'QuantizeOptions': return $root.tflite.QuantizeOptions.decodeText(reader, json); + case 'MatrixSetDiagOptions': return $root.tflite.MatrixSetDiagOptions.decodeText(reader, json); + case 'HardSwishOptions': return $root.tflite.HardSwishOptions.decodeText(reader, json); + case 'IfOptions': return $root.tflite.IfOptions.decodeText(reader, json); + case 'WhileOptions': return $root.tflite.WhileOptions.decodeText(reader, json); + case 'DepthToSpaceOptions': return $root.tflite.DepthToSpaceOptions.decodeText(reader, json); + case 'NonMaxSuppressionV4Options': return $root.tflite.NonMaxSuppressionV4Options.decodeText(reader, json); + case 'NonMaxSuppressionV5Options': return $root.tflite.NonMaxSuppressionV5Options.decodeText(reader, json); + case 'ScatterNdOptions': return $root.tflite.ScatterNdOptions.decodeText(reader, json); + case 'SelectV2Options': return $root.tflite.SelectV2Options.decodeText(reader, json); + case 'DensifyOptions': return $root.tflite.DensifyOptions.decodeText(reader, json); + case 'SegmentSumOptions': return $root.tflite.SegmentSumOptions.decodeText(reader, json); + case 'BatchMatMulOptions': return $root.tflite.BatchMatMulOptions.decodeText(reader, json); + case 'CumsumOptions': return $root.tflite.CumsumOptions.decodeText(reader, json); + case 'CallOnceOptions': return $root.tflite.CallOnceOptions.decodeText(reader, json); + case 'BroadcastToOptions': return $root.tflite.BroadcastToOptions.decodeText(reader, json); + case 'Rfft2dOptions': return $root.tflite.Rfft2dOptions.decodeText(reader, json); + case 'Conv3DOptions': return $root.tflite.Conv3DOptions.decodeText(reader, json); + case 'HashtableOptions': return $root.tflite.HashtableOptions.decodeText(reader, json); + case 'HashtableFindOptions': return $root.tflite.HashtableFindOptions.decodeText(reader, json); + case 'HashtableImportOptions': return $root.tflite.HashtableImportOptions.decodeText(reader, json); + case 'HashtableSizeOptions': return $root.tflite.HashtableSizeOptions.decodeText(reader, json); + case 'VarHandleOptions': return $root.tflite.VarHandleOptions.decodeText(reader, json); + case 'ReadVariableOptions': return $root.tflite.ReadVariableOptions.decodeText(reader, json); + case 'AssignVariableOptions': return $root.tflite.AssignVariableOptions.decodeText(reader, json); + case 'RandomOptions': return $root.tflite.RandomOptions.decodeText(reader, json); + case 'BucketizeOptions': return $root.tflite.BucketizeOptions.decodeText(reader, json); + case 'GeluOptions': return $root.tflite.GeluOptions.decodeText(reader, json); + case 'DynamicUpdateSliceOptions': return $root.tflite.DynamicUpdateSliceOptions.decodeText(reader, json); + case 'UnsortedSegmentProdOptions': return $root.tflite.UnsortedSegmentProdOptions.decodeText(reader, json); + case 'UnsortedSegmentMaxOptions': return $root.tflite.UnsortedSegmentMaxOptions.decodeText(reader, json); + case 'UnsortedSegmentMinOptions': return $root.tflite.UnsortedSegmentMinOptions.decodeText(reader, json); + case 'UnsortedSegmentSumOptions': return $root.tflite.UnsortedSegmentSumOptions.decodeText(reader, json); + case 'ATan2Options': return $root.tflite.ATan2Options.decodeText(reader, json); + case 'SignOptions': return $root.tflite.SignOptions.decodeText(reader, json); + case 'BitcastOptions': return $root.tflite.BitcastOptions.decodeText(reader, json); + case 'BitwiseXorOptions': return $root.tflite.BitwiseXorOptions.decodeText(reader, json); + case 'RightShiftOptions': return $root.tflite.RightShiftOptions.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.BuiltinOptions2 = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.StablehloConcatenateOptions.decode(reader, position); + case 2: return $root.tflite.StablehloBroadcastInDimOptions.decode(reader, position); + case 3: return $root.tflite.StablehloSliceOptions.decode(reader, position); + case 4: return $root.tflite.StablehloConvolutionOptions.decode(reader, position); + case 5: return $root.tflite.StablehloCustomCallOptions.decode(reader, position); + case 6: return $root.tflite.StablehloReduceOptions.decode(reader, position); + case 7: return $root.tflite.StablehloScatterOptions.decode(reader, position); + case 8: return $root.tflite.StablehloCompareOptions.decode(reader, position); + case 9: return $root.tflite.StablehloDynamicSliceOptions.decode(reader, position); + case 10: return $root.tflite.StablehloPadOptions.decode(reader, position); + case 11: return $root.tflite.StablehloIotaOptions.decode(reader, position); + case 12: return $root.tflite.StablehloDotGeneralOptions.decode(reader, position); + case 13: return $root.tflite.StablehloReduceWindowOptions.decode(reader, position); + case 14: return $root.tflite.StablehloSortOptions.decode(reader, position); + case 15: return $root.tflite.StablehloWhileOptions.decode(reader, position); + case 16: return $root.tflite.StablehloGatherOptions.decode(reader, position); + case 17: return $root.tflite.StablehloTransposeOptions.decode(reader, position); + case 18: return $root.tflite.DilateOptions.decode(reader, position); + case 19: return $root.tflite.StablehloRngBitGeneratorOptions.decode(reader, position); + case 20: return $root.tflite.ReduceWindowOptions.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'StablehloConcatenateOptions': return $root.tflite.StablehloConcatenateOptions.decodeText(reader, json); + case 'StablehloBroadcastInDimOptions': return $root.tflite.StablehloBroadcastInDimOptions.decodeText(reader, json); + case 'StablehloSliceOptions': return $root.tflite.StablehloSliceOptions.decodeText(reader, json); + case 'StablehloConvolutionOptions': return $root.tflite.StablehloConvolutionOptions.decodeText(reader, json); + case 'StablehloCustomCallOptions': return $root.tflite.StablehloCustomCallOptions.decodeText(reader, json); + case 'StablehloReduceOptions': return $root.tflite.StablehloReduceOptions.decodeText(reader, json); + case 'StablehloScatterOptions': return $root.tflite.StablehloScatterOptions.decodeText(reader, json); + case 'StablehloCompareOptions': return $root.tflite.StablehloCompareOptions.decodeText(reader, json); + case 'StablehloDynamicSliceOptions': return $root.tflite.StablehloDynamicSliceOptions.decodeText(reader, json); + case 'StablehloPadOptions': return $root.tflite.StablehloPadOptions.decodeText(reader, json); + case 'StablehloIotaOptions': return $root.tflite.StablehloIotaOptions.decodeText(reader, json); + case 'StablehloDotGeneralOptions': return $root.tflite.StablehloDotGeneralOptions.decodeText(reader, json); + case 'StablehloReduceWindowOptions': return $root.tflite.StablehloReduceWindowOptions.decodeText(reader, json); + case 'StablehloSortOptions': return $root.tflite.StablehloSortOptions.decodeText(reader, json); + case 'StablehloWhileOptions': return $root.tflite.StablehloWhileOptions.decodeText(reader, json); + case 'StablehloGatherOptions': return $root.tflite.StablehloGatherOptions.decodeText(reader, json); + case 'StablehloTransposeOptions': return $root.tflite.StablehloTransposeOptions.decodeText(reader, json); + case 'DilateOptions': return $root.tflite.DilateOptions.decodeText(reader, json); + case 'StablehloRngBitGeneratorOptions': return $root.tflite.StablehloRngBitGeneratorOptions.decodeText(reader, json); + case 'ReduceWindowOptions': return $root.tflite.ReduceWindowOptions.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.StablehloGatherOptions = class StablehloGatherOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloGatherOptions(); + $.offset_dims = reader.int64s_(position, 4); + $.collapsed_slice_dims = reader.int64s_(position, 6); + $.start_index_map = reader.int64s_(position, 8); + $.index_vector_dim = reader.int64_(position, 10, 0); + $.slice_sizes = reader.int64s_(position, 12); + $.indices_are_sorted = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloGatherOptions(); + $.offset_dims = reader.array(json.offset_dims); + $.collapsed_slice_dims = reader.array(json.collapsed_slice_dims); + $.start_index_map = reader.array(json.start_index_map); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.slice_sizes = reader.array(json.slice_sizes); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + return $; + } +}; + +$root.tflite.StablehloTransposeOptions = class StablehloTransposeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloTransposeOptions(); + $.permutation = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloTransposeOptions(); + $.permutation = reader.array(json.permutation); + return $; + } +}; + +$root.tflite.StablehloPrecisionConfig = { + DEFAULT: 0, + HIGH: 1, + HIGHEST: 2 +}; + +$root.tflite.StablehloDotGeneralOptions = class StablehloDotGeneralOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.int64s_(position, 4); + $.rhs_batching_dimensions = reader.int64s_(position, 6); + $.lhs_contracting_dimensions = reader.int64s_(position, 8); + $.rhs_contracting_dimensions = reader.int64s_(position, 10); + $.precision_config = reader.typedArray(position, 12, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.array(json.lhs_batching_dimensions); + $.rhs_batching_dimensions = reader.array(json.rhs_batching_dimensions); + $.lhs_contracting_dimensions = reader.array(json.lhs_contracting_dimensions); + $.rhs_contracting_dimensions = reader.array(json.rhs_contracting_dimensions); + $.precision_config = reader.objectArray(json.precision_config, $root.tflite.StablehloPrecisionConfig.decodeText); + return $; + } +}; + +$root.tflite.StablehloReduceWindowOptions = class StablehloReduceWindowOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloReduceWindowOptions(); + $.window_dimensions = reader.int64s_(position, 4); + $.window_strides = reader.int64s_(position, 6); + $.base_dilations = reader.int64s_(position, 8); + $.window_dilations = reader.int64s_(position, 10); + $.padding = reader.int64s_(position, 12); + $.body_subgraph_index = reader.int32_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloReduceWindowOptions(); + $.window_dimensions = reader.array(json.window_dimensions); + $.window_strides = reader.array(json.window_strides); + $.base_dilations = reader.array(json.base_dilations); + $.window_dilations = reader.array(json.window_dilations); + $.padding = reader.array(json.padding); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloWhileOptions = class StablehloWhileOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloWhileOptions(); + $.cond_subgraph_index = reader.int32_(position, 4, 0); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloWhileOptions(); + $.cond_subgraph_index = reader.value(json.cond_subgraph_index, 0); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloSortOptions = class StablehloSortOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloSortOptions(); + $.dimension = reader.int64_(position, 4, 0); + $.is_stable = reader.bool_(position, 6, false); + $.comparator_subgraph_index = reader.int32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloSortOptions(); + $.dimension = reader.value(json.dimension, 0); + $.is_stable = reader.value(json.is_stable, false); + $.comparator_subgraph_index = reader.value(json.comparator_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloConcatenateOptions = class StablehloConcatenateOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloConcatenateOptions(); + $.dimension = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloConcatenateOptions(); + $.dimension = reader.value(json.dimension, 0); + return $; + } +}; + +$root.tflite.StablehloBroadcastInDimOptions = class StablehloBroadcastInDimOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloBroadcastInDimOptions(); + $.broadcast_dimensions = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloBroadcastInDimOptions(); + $.broadcast_dimensions = reader.array(json.broadcast_dimensions); + return $; + } +}; + +$root.tflite.StablehloComparisonDirection = { + STABLEHLO_COMPARISON_DIRECTION_EQ: 0, + STABLEHLO_COMPARISON_DIRECTION_NE: 1, + STABLEHLO_COMPARISON_DIRECTION_GE: 2, + STABLEHLO_COMPARISON_DIRECTION_GT: 3, + STABLEHLO_COMPARISON_DIRECTION_LE: 4, + STABLEHLO_COMPARISON_DIRECTION_LT: 5 +}; + +$root.tflite.StablehloComparisonType = { + STABLEHLO_COMPARISON_TYPE_NOTYPE: 0, + STABLEHLO_COMPARISON_TYPE_FLOAT: 1, + STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER: 2, + STABLEHLO_COMPARISON_TYPE_SIGNED: 3, + STABLEHLO_COMPARISON_TYPE_UNSIGNED: 4 +}; + +$root.tflite.StablehloCompareOptions = class StablehloCompareOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloCompareOptions(); + $.comparison_direction = reader.uint32_(position, 4, 0); + $.compare_type = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloCompareOptions(); + $.comparison_direction = $root.tflite.StablehloComparisonDirection[json.comparison_direction]; + $.compare_type = $root.tflite.StablehloComparisonType[json.compare_type]; + return $; + } +}; + +$root.tflite.StablehloDynamicSliceOptions = class StablehloDynamicSliceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.array(json.slice_sizes); + return $; + } +}; + +$root.tflite.StablehloPadOptions = class StablehloPadOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloPadOptions(); + $.edge_padding_low = reader.int64s_(position, 4); + $.edge_padding_high = reader.int64s_(position, 6); + $.interior_padding = reader.int64s_(position, 8); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloPadOptions(); + $.edge_padding_low = reader.array(json.edge_padding_low); + $.edge_padding_high = reader.array(json.edge_padding_high); + $.interior_padding = reader.array(json.interior_padding); + return $; + } +}; + +$root.tflite.StablehloIotaOptions = class StablehloIotaOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloIotaOptions(); + $.iota_dimension = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloIotaOptions(); + $.iota_dimension = reader.value(json.iota_dimension, 0); + return $; + } +}; + +$root.tflite.StablehloCustomCallOptions = class StablehloCustomCallOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloCustomCallOptions(); + $.call_target_name = reader.string_(position, 4, null); + $.has_side_effect = reader.bool_(position, 6, false); + $.backend_config = reader.string_(position, 8, null); + $.api_version = reader.int32_(position, 10, 0); + $.called_computations = reader.typedArray(position, 12, Int32Array); + $.custom_attributes = reader.typedArray(position, 14, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloCustomCallOptions(); + $.call_target_name = reader.value(json.call_target_name, null); + $.has_side_effect = reader.value(json.has_side_effect, false); + $.backend_config = reader.value(json.backend_config, null); + $.api_version = reader.value(json.api_version, 0); + $.called_computations = reader.typedArray(json.called_computations, Int32Array); + $.custom_attributes = reader.typedArray(json.custom_attributes, Uint8Array); + return $; + } +}; + +$root.tflite.StablehloReduceOptions = class StablehloReduceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloReduceOptions(); + $.dimensions = reader.int64s_(position, 4); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloReduceOptions(); + $.dimensions = reader.array(json.dimensions); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloSliceOptions = class StablehloSliceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloSliceOptions(); + $.start_indices = reader.int64s_(position, 4); + $.limit_indices = reader.int64s_(position, 6); + $.strides = reader.int64s_(position, 8); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloSliceOptions(); + $.start_indices = reader.array(json.start_indices); + $.limit_indices = reader.array(json.limit_indices); + $.strides = reader.array(json.strides); + return $; + } +}; + +$root.tflite.StablehloConvolutionOptions = class StablehloConvolutionOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloConvolutionOptions(); + $.window_strides = reader.int64s_(position, 4); + $.padding = reader.int64s_(position, 6); + $.lhs_dilation = reader.int64s_(position, 8); + $.rhs_dilation = reader.int64s_(position, 10); + $.window_reversal = reader.bools_(position, 12); + $.input_batch_dimension = reader.int64_(position, 14, 0); + $.input_feature_dimension = reader.int64_(position, 16, 0); + $.input_spatial_dimensions = reader.int64s_(position, 18); + $.kernel_input_feature_dimension = reader.int64_(position, 20, 0); + $.kernel_output_feature_dimension = reader.int64_(position, 22, 0); + $.kernel_spatial_dimensions = reader.int64s_(position, 24); + $.output_batch_dimension = reader.int64_(position, 26, 0); + $.output_feature_dimension = reader.int64_(position, 28, 0); + $.output_spatial_dimensions = reader.int64s_(position, 30); + $.feature_group_count = reader.int64_(position, 32, 0); + $.batch_group_count = reader.int64_(position, 34, 0); + $.precision_config = reader.typedArray(position, 36, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloConvolutionOptions(); + $.window_strides = reader.array(json.window_strides); + $.padding = reader.array(json.padding); + $.lhs_dilation = reader.array(json.lhs_dilation); + $.rhs_dilation = reader.array(json.rhs_dilation); + $.window_reversal = reader.array(json.window_reversal); + $.input_batch_dimension = reader.value(json.input_batch_dimension, 0); + $.input_feature_dimension = reader.value(json.input_feature_dimension, 0); + $.input_spatial_dimensions = reader.array(json.input_spatial_dimensions); + $.kernel_input_feature_dimension = reader.value(json.kernel_input_feature_dimension, 0); + $.kernel_output_feature_dimension = reader.value(json.kernel_output_feature_dimension, 0); + $.kernel_spatial_dimensions = reader.array(json.kernel_spatial_dimensions); + $.output_batch_dimension = reader.value(json.output_batch_dimension, 0); + $.output_feature_dimension = reader.value(json.output_feature_dimension, 0); + $.output_spatial_dimensions = reader.array(json.output_spatial_dimensions); + $.feature_group_count = reader.value(json.feature_group_count, 0); + $.batch_group_count = reader.value(json.batch_group_count, 0); + $.precision_config = reader.objectArray(json.precision_config, $root.tflite.StablehloPrecisionConfig.decodeText); + return $; + } +}; + +$root.tflite.StablehloScatterOptions = class StablehloScatterOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloScatterOptions(); + $.indices_are_sorted = reader.bool_(position, 4, false); + $.update_window_dims = reader.int64s_(position, 6); + $.inserted_window_dims = reader.int64s_(position, 8); + $.scatter_dims_to_operand_dims = reader.int64s_(position, 10); + $.index_vector_dim = reader.int64_(position, 12, 0); + $.unique_indices = reader.bool_(position, 14, false); + $.update_computation_subgraph_index = reader.int32_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloScatterOptions(); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + $.update_window_dims = reader.array(json.update_window_dims); + $.inserted_window_dims = reader.array(json.inserted_window_dims); + $.scatter_dims_to_operand_dims = reader.array(json.scatter_dims_to_operand_dims); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.unique_indices = reader.value(json.unique_indices, false); + $.update_computation_subgraph_index = reader.value(json.update_computation_subgraph_index, 0); + return $; + } +}; + +$root.tflite.RngAlgorithm = { + DEFAULT: 0, + PHILOX: 1, + THREEFRY: 2 +}; + +$root.tflite.StablehloRngBitGeneratorOptions = class StablehloRngBitGeneratorOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloRngBitGeneratorOptions(); + $.algorithm = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloRngBitGeneratorOptions(); + $.algorithm = $root.tflite.RngAlgorithm[json.algorithm]; + return $; + } +}; + +$root.tflite.Padding = { + SAME: 0, + VALID: 1 +}; + +$root.tflite.ActivationFunctionType = { + NONE: 0, + RELU: 1, + RELU_N1_TO_1: 2, + RELU6: 3, + TANH: 4, + SIGN_BIT: 5 +}; + +$root.tflite.Conv2DOptions = class Conv2DOptions { + + static decode(reader, position) { + const $ = new $root.tflite.Conv2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.fused_activation_function = reader.int8_(position, 10, 0); + $.dilation_w_factor = reader.int32_(position, 12, 1); + $.dilation_h_factor = reader.int32_(position, 14, 1); + $.quantized_bias_type = reader.int8_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Conv2DOptions(); + $.padding = $root.tflite.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + $.quantized_bias_type = $root.tflite.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.tflite.Conv3DOptions = class Conv3DOptions { + + static decode(reader, position) { + const $ = new $root.tflite.Conv3DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_d = reader.int32_(position, 6, 0); + $.stride_w = reader.int32_(position, 8, 0); + $.stride_h = reader.int32_(position, 10, 0); + $.fused_activation_function = reader.int8_(position, 12, 0); + $.dilation_d_factor = reader.int32_(position, 14, 1); + $.dilation_w_factor = reader.int32_(position, 16, 1); + $.dilation_h_factor = reader.int32_(position, 18, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Conv3DOptions(); + $.padding = $root.tflite.Padding[json.padding]; + $.stride_d = reader.value(json.stride_d, 0); + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.dilation_d_factor = reader.value(json.dilation_d_factor, 1); + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + return $; + } +}; + +$root.tflite.Pool2DOptions = class Pool2DOptions { + + static decode(reader, position) { + const $ = new $root.tflite.Pool2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.filter_width = reader.int32_(position, 10, 0); + $.filter_height = reader.int32_(position, 12, 0); + $.fused_activation_function = reader.int8_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Pool2DOptions(); + $.padding = $root.tflite.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.filter_width = reader.value(json.filter_width, 0); + $.filter_height = reader.value(json.filter_height, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.tflite.DepthwiseConv2DOptions = class DepthwiseConv2DOptions { + + static decode(reader, position) { + const $ = new $root.tflite.DepthwiseConv2DOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.depth_multiplier = reader.int32_(position, 10, 0); + $.fused_activation_function = reader.int8_(position, 12, 0); + $.dilation_w_factor = reader.int32_(position, 14, 1); + $.dilation_h_factor = reader.int32_(position, 16, 1); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.DepthwiseConv2DOptions(); + $.padding = $root.tflite.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.depth_multiplier = reader.value(json.depth_multiplier, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.dilation_w_factor = reader.value(json.dilation_w_factor, 1); + $.dilation_h_factor = reader.value(json.dilation_h_factor, 1); + return $; + } +}; + +$root.tflite.ConcatEmbeddingsOptions = class ConcatEmbeddingsOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ConcatEmbeddingsOptions(); + $.num_channels = reader.int32_(position, 4, 0); + $.num_columns_per_channel = reader.typedArray(position, 6, Int32Array); + $.embedding_dim_per_channel = reader.typedArray(position, 8, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ConcatEmbeddingsOptions(); + $.num_channels = reader.value(json.num_channels, 0); + $.num_columns_per_channel = reader.typedArray(json.num_columns_per_channel, Int32Array); + $.embedding_dim_per_channel = reader.typedArray(json.embedding_dim_per_channel, Int32Array); + return $; + } +}; + +$root.tflite.LSHProjectionType = { + UNKNOWN: 0, + SPARSE: 1, + DENSE: 2 +}; + +$root.tflite.LSHProjectionOptions = class LSHProjectionOptions { + + static decode(reader, position) { + const $ = new $root.tflite.LSHProjectionOptions(); + $.type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.LSHProjectionOptions(); + $.type = $root.tflite.LSHProjectionType[json.type]; + return $; + } +}; + +$root.tflite.SVDFOptions = class SVDFOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SVDFOptions(); + $.rank = reader.int32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SVDFOptions(); + $.rank = reader.value(json.rank, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.RNNOptions = class RNNOptions { + + static decode(reader, position) { + const $ = new $root.tflite.RNNOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.RNNOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.SequenceRNNOptions = class SequenceRNNOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SequenceRNNOptions(); + $.time_major = reader.bool_(position, 4, false); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SequenceRNNOptions(); + $.time_major = reader.value(json.time_major, false); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.BidirectionalSequenceRNNOptions = class BidirectionalSequenceRNNOptions { + + static decode(reader, position) { + const $ = new $root.tflite.BidirectionalSequenceRNNOptions(); + $.time_major = reader.bool_(position, 4, false); + $.fused_activation_function = reader.int8_(position, 6, 0); + $.merge_outputs = reader.bool_(position, 8, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BidirectionalSequenceRNNOptions(); + $.time_major = reader.value(json.time_major, false); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.merge_outputs = reader.value(json.merge_outputs, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.FullyConnectedOptionsWeightsFormat = { + DEFAULT: 0, + SHUFFLED4x16INT8: 1 +}; + +$root.tflite.FullyConnectedOptions = class FullyConnectedOptions { + + static decode(reader, position) { + const $ = new $root.tflite.FullyConnectedOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.weights_format = reader.int8_(position, 6, 0); + $.keep_num_dims = reader.bool_(position, 8, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 10, false); + $.quantized_bias_type = reader.int8_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.FullyConnectedOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.weights_format = $root.tflite.FullyConnectedOptionsWeightsFormat[json.weights_format]; + $.keep_num_dims = reader.value(json.keep_num_dims, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + $.quantized_bias_type = $root.tflite.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.tflite.SoftmaxOptions = class SoftmaxOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SoftmaxOptions(); + $.beta = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SoftmaxOptions(); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.tflite.ConcatenationOptions = class ConcatenationOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ConcatenationOptions(); + $.axis = reader.int32_(position, 4, 0); + $.fused_activation_function = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ConcatenationOptions(); + $.axis = reader.value(json.axis, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.tflite.AddOptions = class AddOptions { + + static decode(reader, position) { + const $ = new $root.tflite.AddOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.pot_scale_int16 = reader.bool_(position, 6, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.AddOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.pot_scale_int16 = reader.value(json.pot_scale_int16, true); + return $; + } +}; + +$root.tflite.MulOptions = class MulOptions { + + static decode(reader, position) { + const $ = new $root.tflite.MulOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.MulOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.tflite.L2NormOptions = class L2NormOptions { + + static decode(reader, position) { + const $ = new $root.tflite.L2NormOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.L2NormOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.tflite.LocalResponseNormalizationOptions = class LocalResponseNormalizationOptions { + + static decode(reader, position) { + const $ = new $root.tflite.LocalResponseNormalizationOptions(); + $.radius = reader.int32_(position, 4, 0); + $.bias = reader.float32_(position, 6, 0); + $.alpha = reader.float32_(position, 8, 0); + $.beta = reader.float32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.LocalResponseNormalizationOptions(); + $.radius = reader.value(json.radius, 0); + $.bias = reader.value(json.bias, 0); + $.alpha = reader.value(json.alpha, 0); + $.beta = reader.value(json.beta, 0); + return $; + } +}; + +$root.tflite.LSTMKernelType = { + FULL: 0, + BASIC: 1 +}; + +$root.tflite.LSTMOptions = class LSTMOptions { + + static decode(reader, position) { + const $ = new $root.tflite.LSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.kernel_type = reader.int8_(position, 10, 0); + $.asymmetric_quantize_inputs = reader.bool_(position, 12, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.LSTMOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.kernel_type = $root.tflite.LSTMKernelType[json.kernel_type]; + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.UnidirectionalSequenceLSTMOptions = class UnidirectionalSequenceLSTMOptions { + + static decode(reader, position) { + const $ = new $root.tflite.UnidirectionalSequenceLSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.time_major = reader.bool_(position, 10, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 12, false); + $.diagonal_recurrent_tensors = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.UnidirectionalSequenceLSTMOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.time_major = reader.value(json.time_major, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + $.diagonal_recurrent_tensors = reader.value(json.diagonal_recurrent_tensors, false); + return $; + } +}; + +$root.tflite.BidirectionalSequenceLSTMOptions = class BidirectionalSequenceLSTMOptions { + + static decode(reader, position) { + const $ = new $root.tflite.BidirectionalSequenceLSTMOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.cell_clip = reader.float32_(position, 6, 0); + $.proj_clip = reader.float32_(position, 8, 0); + $.merge_outputs = reader.bool_(position, 10, false); + $.time_major = reader.bool_(position, 12, true); + $.asymmetric_quantize_inputs = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BidirectionalSequenceLSTMOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.cell_clip = reader.value(json.cell_clip, 0); + $.proj_clip = reader.value(json.proj_clip, 0); + $.merge_outputs = reader.value(json.merge_outputs, false); + $.time_major = reader.value(json.time_major, true); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.ResizeBilinearOptions = class ResizeBilinearOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ResizeBilinearOptions(); + $.new_height = reader.int32_(position, 4, 0); + $.new_width = reader.int32_(position, 6, 0); + $.align_corners = reader.bool_(position, 8, false); + $.half_pixel_centers = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ResizeBilinearOptions(); + $.new_height = reader.value(json.new_height, 0); + $.new_width = reader.value(json.new_width, 0); + $.align_corners = reader.value(json.align_corners, false); + $.half_pixel_centers = reader.value(json.half_pixel_centers, false); + return $; + } +}; + +$root.tflite.ResizeNearestNeighborOptions = class ResizeNearestNeighborOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ResizeNearestNeighborOptions(); + $.align_corners = reader.bool_(position, 4, false); + $.half_pixel_centers = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ResizeNearestNeighborOptions(); + $.align_corners = reader.value(json.align_corners, false); + $.half_pixel_centers = reader.value(json.half_pixel_centers, false); + return $; + } +}; + +$root.tflite.CallOptions = class CallOptions { + + static decode(reader, position) { + const $ = new $root.tflite.CallOptions(); + $.subgraph = reader.uint32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CallOptions(); + $.subgraph = reader.value(json.subgraph, 0); + return $; + } +}; + +$root.tflite.PadOptions = class PadOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.PadOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.PadOptions(); + return $; + } +}; + +$root.tflite.PadV2Options = class PadV2Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.PadV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.PadV2Options(); + return $; + } +}; + +$root.tflite.ReshapeOptions = class ReshapeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ReshapeOptions(); + $.new_shape = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ReshapeOptions(); + $.new_shape = reader.typedArray(json.new_shape, Int32Array); + return $; + } +}; + +$root.tflite.SpaceToBatchNDOptions = class SpaceToBatchNDOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SpaceToBatchNDOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SpaceToBatchNDOptions(); + return $; + } +}; + +$root.tflite.BatchToSpaceNDOptions = class BatchToSpaceNDOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.BatchToSpaceNDOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.BatchToSpaceNDOptions(); + return $; + } +}; + +$root.tflite.SkipGramOptions = class SkipGramOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SkipGramOptions(); + $.ngram_size = reader.int32_(position, 4, 0); + $.max_skip_size = reader.int32_(position, 6, 0); + $.include_all_ngrams = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SkipGramOptions(); + $.ngram_size = reader.value(json.ngram_size, 0); + $.max_skip_size = reader.value(json.max_skip_size, 0); + $.include_all_ngrams = reader.value(json.include_all_ngrams, false); + return $; + } +}; + +$root.tflite.SpaceToDepthOptions = class SpaceToDepthOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SpaceToDepthOptions(); + $.block_size = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SpaceToDepthOptions(); + $.block_size = reader.value(json.block_size, 0); + return $; + } +}; + +$root.tflite.DepthToSpaceOptions = class DepthToSpaceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.DepthToSpaceOptions(); + $.block_size = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.DepthToSpaceOptions(); + $.block_size = reader.value(json.block_size, 0); + return $; + } +}; + +$root.tflite.SubOptions = class SubOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SubOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + $.pot_scale_int16 = reader.bool_(position, 6, true); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SubOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.pot_scale_int16 = reader.value(json.pot_scale_int16, true); + return $; + } +}; + +$root.tflite.DivOptions = class DivOptions { + + static decode(reader, position) { + const $ = new $root.tflite.DivOptions(); + $.fused_activation_function = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.DivOptions(); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + return $; + } +}; + +$root.tflite.TopKV2Options = class TopKV2Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.TopKV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.TopKV2Options(); + return $; + } +}; + +$root.tflite.CombinerType = { + SUM: 0, + MEAN: 1, + SQRTN: 2 +}; + +$root.tflite.EmbeddingLookupSparseOptions = class EmbeddingLookupSparseOptions { + + static decode(reader, position) { + const $ = new $root.tflite.EmbeddingLookupSparseOptions(); + $.combiner = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.EmbeddingLookupSparseOptions(); + $.combiner = $root.tflite.CombinerType[json.combiner]; + return $; + } +}; + +$root.tflite.GatherOptions = class GatherOptions { + + static decode(reader, position) { + const $ = new $root.tflite.GatherOptions(); + $.axis = reader.int32_(position, 4, 0); + $.batch_dims = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.GatherOptions(); + $.axis = reader.value(json.axis, 0); + $.batch_dims = reader.value(json.batch_dims, 0); + return $; + } +}; + +$root.tflite.TransposeOptions = class TransposeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.TransposeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.TransposeOptions(); + return $; + } +}; + +$root.tflite.ExpOptions = class ExpOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ExpOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ExpOptions(); + return $; + } +}; + +$root.tflite.CosOptions = class CosOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.CosOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.CosOptions(); + return $; + } +}; + +$root.tflite.ReducerOptions = class ReducerOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ReducerOptions(); + $.keep_dims = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ReducerOptions(); + $.keep_dims = reader.value(json.keep_dims, false); + return $; + } +}; + +$root.tflite.SqueezeOptions = class SqueezeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SqueezeOptions(); + $.squeeze_dims = reader.typedArray(position, 4, Int32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SqueezeOptions(); + $.squeeze_dims = reader.typedArray(json.squeeze_dims, Int32Array); + return $; + } +}; + +$root.tflite.SplitOptions = class SplitOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SplitOptions(); + $.num_splits = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SplitOptions(); + $.num_splits = reader.value(json.num_splits, 0); + return $; + } +}; + +$root.tflite.SplitVOptions = class SplitVOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SplitVOptions(); + $.num_splits = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SplitVOptions(); + $.num_splits = reader.value(json.num_splits, 0); + return $; + } +}; + +$root.tflite.StridedSliceOptions = class StridedSliceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StridedSliceOptions(); + $.begin_mask = reader.int32_(position, 4, 0); + $.end_mask = reader.int32_(position, 6, 0); + $.ellipsis_mask = reader.int32_(position, 8, 0); + $.new_axis_mask = reader.int32_(position, 10, 0); + $.shrink_axis_mask = reader.int32_(position, 12, 0); + $.offset = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StridedSliceOptions(); + $.begin_mask = reader.value(json.begin_mask, 0); + $.end_mask = reader.value(json.end_mask, 0); + $.ellipsis_mask = reader.value(json.ellipsis_mask, 0); + $.new_axis_mask = reader.value(json.new_axis_mask, 0); + $.shrink_axis_mask = reader.value(json.shrink_axis_mask, 0); + $.offset = reader.value(json.offset, false); + return $; + } +}; + +$root.tflite.LogSoftmaxOptions = class LogSoftmaxOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LogSoftmaxOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LogSoftmaxOptions(); + return $; + } +}; + +$root.tflite.CastOptions = class CastOptions { + + static decode(reader, position) { + const $ = new $root.tflite.CastOptions(); + $.in_data_type = reader.int8_(position, 4, 0); + $.out_data_type = reader.int8_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CastOptions(); + $.in_data_type = $root.tflite.TensorType[json.in_data_type]; + $.out_data_type = $root.tflite.TensorType[json.out_data_type]; + return $; + } +}; + +$root.tflite.DequantizeOptions = class DequantizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.DequantizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.DequantizeOptions(); + return $; + } +}; + +$root.tflite.MaximumMinimumOptions = class MaximumMinimumOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.MaximumMinimumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.MaximumMinimumOptions(); + return $; + } +}; + +$root.tflite.TileOptions = class TileOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.TileOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.TileOptions(); + return $; + } +}; + +$root.tflite.ArgMaxOptions = class ArgMaxOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ArgMaxOptions(); + $.output_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ArgMaxOptions(); + $.output_type = $root.tflite.TensorType[json.output_type]; + return $; + } +}; + +$root.tflite.ArgMinOptions = class ArgMinOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ArgMinOptions(); + $.output_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ArgMinOptions(); + $.output_type = $root.tflite.TensorType[json.output_type]; + return $; + } +}; + +$root.tflite.GreaterOptions = class GreaterOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.GreaterOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.GreaterOptions(); + return $; + } +}; + +$root.tflite.GreaterEqualOptions = class GreaterEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.GreaterEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.GreaterEqualOptions(); + return $; + } +}; + +$root.tflite.LessOptions = class LessOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LessOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LessOptions(); + return $; + } +}; + +$root.tflite.LessEqualOptions = class LessEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LessEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LessEqualOptions(); + return $; + } +}; + +$root.tflite.NegOptions = class NegOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.NegOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.NegOptions(); + return $; + } +}; + +$root.tflite.SelectOptions = class SelectOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SelectOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SelectOptions(); + return $; + } +}; + +$root.tflite.SliceOptions = class SliceOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SliceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SliceOptions(); + return $; + } +}; + +$root.tflite.TransposeConvOptions = class TransposeConvOptions { + + static decode(reader, position) { + const $ = new $root.tflite.TransposeConvOptions(); + $.padding = reader.int8_(position, 4, 0); + $.stride_w = reader.int32_(position, 6, 0); + $.stride_h = reader.int32_(position, 8, 0); + $.fused_activation_function = reader.int8_(position, 10, 0); + $.quantized_bias_type = reader.int8_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.TransposeConvOptions(); + $.padding = $root.tflite.Padding[json.padding]; + $.stride_w = reader.value(json.stride_w, 0); + $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.tflite.ActivationFunctionType[json.fused_activation_function]; + $.quantized_bias_type = $root.tflite.TensorType[json.quantized_bias_type]; + return $; + } +}; + +$root.tflite.ExpandDimsOptions = class ExpandDimsOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ExpandDimsOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ExpandDimsOptions(); + return $; + } +}; + +$root.tflite.SparseToDenseOptions = class SparseToDenseOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SparseToDenseOptions(); + $.validate_indices = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SparseToDenseOptions(); + $.validate_indices = reader.value(json.validate_indices, false); + return $; + } +}; + +$root.tflite.EqualOptions = class EqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.EqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.EqualOptions(); + return $; + } +}; + +$root.tflite.NotEqualOptions = class NotEqualOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.NotEqualOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.NotEqualOptions(); + return $; + } +}; + +$root.tflite.ShapeOptions = class ShapeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ShapeOptions(); + $.out_type = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ShapeOptions(); + $.out_type = $root.tflite.TensorType[json.out_type]; + return $; + } +}; + +$root.tflite.RankOptions = class RankOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.RankOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.RankOptions(); + return $; + } +}; + +$root.tflite.PowOptions = class PowOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.PowOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.PowOptions(); + return $; + } +}; + +$root.tflite.FakeQuantOptions = class FakeQuantOptions { + + static decode(reader, position) { + const $ = new $root.tflite.FakeQuantOptions(); + $.min = reader.float32_(position, 4, 0); + $.max = reader.float32_(position, 6, 0); + $.num_bits = reader.int32_(position, 8, 0); + $.narrow_range = reader.bool_(position, 10, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.FakeQuantOptions(); + $.min = reader.value(json.min, 0); + $.max = reader.value(json.max, 0); + $.num_bits = reader.value(json.num_bits, 0); + $.narrow_range = reader.value(json.narrow_range, false); + return $; + } +}; + +$root.tflite.PackOptions = class PackOptions { + + static decode(reader, position) { + const $ = new $root.tflite.PackOptions(); + $.values_count = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.PackOptions(); + $.values_count = reader.value(json.values_count, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.tflite.LogicalOrOptions = class LogicalOrOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LogicalOrOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LogicalOrOptions(); + return $; + } +}; + +$root.tflite.OneHotOptions = class OneHotOptions { + + static decode(reader, position) { + const $ = new $root.tflite.OneHotOptions(); + $.axis = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.OneHotOptions(); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.tflite.AbsOptions = class AbsOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.AbsOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.AbsOptions(); + return $; + } +}; + +$root.tflite.HardSwishOptions = class HardSwishOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.HardSwishOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.HardSwishOptions(); + return $; + } +}; + +$root.tflite.LogicalAndOptions = class LogicalAndOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LogicalAndOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LogicalAndOptions(); + return $; + } +}; + +$root.tflite.LogicalNotOptions = class LogicalNotOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.LogicalNotOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.LogicalNotOptions(); + return $; + } +}; + +$root.tflite.UnpackOptions = class UnpackOptions { + + static decode(reader, position) { + const $ = new $root.tflite.UnpackOptions(); + $.num = reader.int32_(position, 4, 0); + $.axis = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.UnpackOptions(); + $.num = reader.value(json.num, 0); + $.axis = reader.value(json.axis, 0); + return $; + } +}; + +$root.tflite.FloorDivOptions = class FloorDivOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.FloorDivOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.FloorDivOptions(); + return $; + } +}; + +$root.tflite.SquareOptions = class SquareOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SquareOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SquareOptions(); + return $; + } +}; + +$root.tflite.ZerosLikeOptions = class ZerosLikeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ZerosLikeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ZerosLikeOptions(); + return $; + } +}; + +$root.tflite.FillOptions = class FillOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.FillOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.FillOptions(); + return $; + } +}; + +$root.tflite.FloorModOptions = class FloorModOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.FloorModOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.FloorModOptions(); + return $; + } +}; + +$root.tflite.RangeOptions = class RangeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.RangeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.RangeOptions(); + return $; + } +}; + +$root.tflite.LeakyReluOptions = class LeakyReluOptions { + + static decode(reader, position) { + const $ = new $root.tflite.LeakyReluOptions(); + $.alpha = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.LeakyReluOptions(); + $.alpha = reader.value(json.alpha, 0); + return $; + } +}; + +$root.tflite.SquaredDifferenceOptions = class SquaredDifferenceOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SquaredDifferenceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SquaredDifferenceOptions(); + return $; + } +}; + +$root.tflite.MirrorPadMode = { + REFLECT: 0, + SYMMETRIC: 1 +}; + +$root.tflite.MirrorPadOptions = class MirrorPadOptions { + + static decode(reader, position) { + const $ = new $root.tflite.MirrorPadOptions(); + $.mode = reader.int8_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.MirrorPadOptions(); + $.mode = $root.tflite.MirrorPadMode[json.mode]; + return $; + } +}; + +$root.tflite.UniqueOptions = class UniqueOptions { + + static decode(reader, position) { + const $ = new $root.tflite.UniqueOptions(); + $.idx_out_type = reader.int8_(position, 4, 2); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.UniqueOptions(); + $.idx_out_type = $root.tflite.TensorType[json.idx_out_type]; + return $; + } +}; + +$root.tflite.ReverseV2Options = class ReverseV2Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ReverseV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ReverseV2Options(); + return $; + } +}; + +$root.tflite.AddNOptions = class AddNOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.AddNOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.AddNOptions(); + return $; + } +}; + +$root.tflite.GatherNdOptions = class GatherNdOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.GatherNdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.GatherNdOptions(); + return $; + } +}; + +$root.tflite.WhereOptions = class WhereOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.WhereOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.WhereOptions(); + return $; + } +}; + +$root.tflite.ReverseSequenceOptions = class ReverseSequenceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ReverseSequenceOptions(); + $.seq_dim = reader.int32_(position, 4, 0); + $.batch_dim = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ReverseSequenceOptions(); + $.seq_dim = reader.value(json.seq_dim, 0); + $.batch_dim = reader.value(json.batch_dim, 0); + return $; + } +}; + +$root.tflite.MatrixDiagOptions = class MatrixDiagOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.MatrixDiagOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.MatrixDiagOptions(); + return $; + } +}; + +$root.tflite.QuantizeOptions = class QuantizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.QuantizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.QuantizeOptions(); + return $; + } +}; + +$root.tflite.MatrixSetDiagOptions = class MatrixSetDiagOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.MatrixSetDiagOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.MatrixSetDiagOptions(); + return $; + } +}; + +$root.tflite.IfOptions = class IfOptions { + + static decode(reader, position) { + const $ = new $root.tflite.IfOptions(); + $.then_subgraph_index = reader.int32_(position, 4, 0); + $.else_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.IfOptions(); + $.then_subgraph_index = reader.value(json.then_subgraph_index, 0); + $.else_subgraph_index = reader.value(json.else_subgraph_index, 0); + return $; + } +}; + +$root.tflite.CallOnceOptions = class CallOnceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.CallOnceOptions(); + $.init_subgraph_index = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CallOnceOptions(); + $.init_subgraph_index = reader.value(json.init_subgraph_index, 0); + return $; + } +}; + +$root.tflite.WhileOptions = class WhileOptions { + + static decode(reader, position) { + const $ = new $root.tflite.WhileOptions(); + $.cond_subgraph_index = reader.int32_(position, 4, 0); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.WhileOptions(); + $.cond_subgraph_index = reader.value(json.cond_subgraph_index, 0); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.NonMaxSuppressionV4Options = class NonMaxSuppressionV4Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.NonMaxSuppressionV4Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.NonMaxSuppressionV4Options(); + return $; + } +}; + +$root.tflite.NonMaxSuppressionV5Options = class NonMaxSuppressionV5Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.NonMaxSuppressionV5Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.NonMaxSuppressionV5Options(); + return $; + } +}; + +$root.tflite.ScatterNdOptions = class ScatterNdOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ScatterNdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ScatterNdOptions(); + return $; + } +}; + +$root.tflite.SelectV2Options = class SelectV2Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SelectV2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SelectV2Options(); + return $; + } +}; + +$root.tflite.DensifyOptions = class DensifyOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.DensifyOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.DensifyOptions(); + return $; + } +}; + +$root.tflite.SegmentSumOptions = class SegmentSumOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SegmentSumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SegmentSumOptions(); + return $; + } +}; + +$root.tflite.BatchMatMulOptions = class BatchMatMulOptions { + + static decode(reader, position) { + const $ = new $root.tflite.BatchMatMulOptions(); + $.adj_x = reader.bool_(position, 4, false); + $.adj_y = reader.bool_(position, 6, false); + $.asymmetric_quantize_inputs = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BatchMatMulOptions(); + $.adj_x = reader.value(json.adj_x, false); + $.adj_y = reader.value(json.adj_y, false); + $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + return $; + } +}; + +$root.tflite.CumsumOptions = class CumsumOptions { + + static decode(reader, position) { + const $ = new $root.tflite.CumsumOptions(); + $.exclusive = reader.bool_(position, 4, false); + $.reverse = reader.bool_(position, 6, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CumsumOptions(); + $.exclusive = reader.value(json.exclusive, false); + $.reverse = reader.value(json.reverse, false); + return $; + } +}; + +$root.tflite.BroadcastToOptions = class BroadcastToOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.BroadcastToOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.BroadcastToOptions(); + return $; + } +}; + +$root.tflite.Rfft2dOptions = class Rfft2dOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.Rfft2dOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.Rfft2dOptions(); + return $; + } +}; + +$root.tflite.HashtableOptions = class HashtableOptions { + + static decode(reader, position) { + const $ = new $root.tflite.HashtableOptions(); + $.table_id = reader.int32_(position, 4, 0); + $.key_dtype = reader.int8_(position, 6, 0); + $.value_dtype = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.HashtableOptions(); + $.table_id = reader.value(json.table_id, 0); + $.key_dtype = $root.tflite.TensorType[json.key_dtype]; + $.value_dtype = $root.tflite.TensorType[json.value_dtype]; + return $; + } +}; + +$root.tflite.HashtableFindOptions = class HashtableFindOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.HashtableFindOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.HashtableFindOptions(); + return $; + } +}; + +$root.tflite.HashtableImportOptions = class HashtableImportOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.HashtableImportOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.HashtableImportOptions(); + return $; + } +}; + +$root.tflite.HashtableSizeOptions = class HashtableSizeOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.HashtableSizeOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.HashtableSizeOptions(); + return $; + } +}; + +$root.tflite.VarHandleOptions = class VarHandleOptions { + + static decode(reader, position) { + const $ = new $root.tflite.VarHandleOptions(); + $.container = reader.string_(position, 4, null); + $.shared_name = reader.string_(position, 6, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.VarHandleOptions(); + $.container = reader.value(json.container, null); + $.shared_name = reader.value(json.shared_name, null); + return $; + } +}; + +$root.tflite.ReadVariableOptions = class ReadVariableOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ReadVariableOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ReadVariableOptions(); + return $; + } +}; + +$root.tflite.AssignVariableOptions = class AssignVariableOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.AssignVariableOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.AssignVariableOptions(); + return $; + } +}; + +$root.tflite.RandomOptions = class RandomOptions { + + static decode(reader, position) { + const $ = new $root.tflite.RandomOptions(); + $.seed = reader.int64_(position, 4, 0); + $.seed2 = reader.int64_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.RandomOptions(); + $.seed = reader.value(json.seed, 0); + $.seed2 = reader.value(json.seed2, 0); + return $; + } +}; + +$root.tflite.BucketizeOptions = class BucketizeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.BucketizeOptions(); + $.boundaries = reader.typedArray(position, 4, Float32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BucketizeOptions(); + $.boundaries = reader.typedArray(json.boundaries, Float32Array); + return $; + } +}; + +$root.tflite.GeluOptions = class GeluOptions { + + static decode(reader, position) { + const $ = new $root.tflite.GeluOptions(); + $.approximate = reader.bool_(position, 4, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.GeluOptions(); + $.approximate = reader.value(json.approximate, false); + return $; + } +}; + +$root.tflite.DynamicUpdateSliceOptions = class DynamicUpdateSliceOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.DynamicUpdateSliceOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.DynamicUpdateSliceOptions(); + return $; + } +}; + +$root.tflite.UnsortedSegmentProdOptions = class UnsortedSegmentProdOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.UnsortedSegmentProdOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.UnsortedSegmentProdOptions(); + return $; + } +}; + +$root.tflite.UnsortedSegmentMaxOptions = class UnsortedSegmentMaxOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.UnsortedSegmentMaxOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.UnsortedSegmentMaxOptions(); + return $; + } +}; + +$root.tflite.UnsortedSegmentSumOptions = class UnsortedSegmentSumOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.UnsortedSegmentSumOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.UnsortedSegmentSumOptions(); + return $; + } +}; + +$root.tflite.ATan2Options = class ATan2Options { + + static decode(/* reader, position */) { + const $ = new $root.tflite.ATan2Options(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.ATan2Options(); + return $; + } +}; + +$root.tflite.UnsortedSegmentMinOptions = class UnsortedSegmentMinOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.UnsortedSegmentMinOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.UnsortedSegmentMinOptions(); + return $; + } +}; + +$root.tflite.SignOptions = class SignOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.SignOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.SignOptions(); + return $; + } +}; + +$root.tflite.BitcastOptions = class BitcastOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.BitcastOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.BitcastOptions(); + return $; + } +}; + +$root.tflite.BitwiseXorOptions = class BitwiseXorOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.BitwiseXorOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.BitwiseXorOptions(); + return $; + } +}; + +$root.tflite.RightShiftOptions = class RightShiftOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.RightShiftOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.RightShiftOptions(); + return $; + } +}; + +$root.tflite.DilateOptions = class DilateOptions { + + static decode(/* reader, position */) { + const $ = new $root.tflite.DilateOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.DilateOptions(); + return $; + } +}; + +$root.tflite.ReduceWindowFunction = { + UNSUPPORTED: 0, + ADD: 1, + MUL: 2, + MINIMUM: 3, + MAXIMUM: 4, + ALL: 5, + ANY: 6 +}; + +$root.tflite.ReduceWindowOptions = class ReduceWindowOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ReduceWindowOptions(); + $.reduce_function = reader.int32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ReduceWindowOptions(); + $.reduce_function = $root.tflite.ReduceWindowFunction[json.reduce_function]; + return $; + } +}; + +$root.tflite.OperatorCode = class OperatorCode { + + static decode(reader, position) { + const $ = new $root.tflite.OperatorCode(); + $.deprecated_builtin_code = reader.int8_(position, 4, 0); + $.custom_code = reader.string_(position, 6, null); + $.version = reader.int32_(position, 8, 1); + $.builtin_code = reader.int32_(position, 10, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.OperatorCode(); + $.deprecated_builtin_code = reader.value(json.deprecated_builtin_code, 0); + $.custom_code = reader.value(json.custom_code, null); + $.version = reader.value(json.version, 1); + $.builtin_code = $root.tflite.BuiltinOperator[json.builtin_code]; + return $; + } +}; + +$root.tflite.CustomOptionsFormat = { + FLEXBUFFERS: 0 +}; + +$root.tflite.Operator = class Operator { + + static decode(reader, position) { + const $ = new $root.tflite.Operator(); + $.opcode_index = reader.uint32_(position, 4, 0); + $.inputs = reader.typedArray(position, 6, Int32Array); + $.outputs = reader.typedArray(position, 8, Int32Array); + $.builtin_options = reader.union(position, 10, $root.tflite.BuiltinOptions.decode); + $.custom_options = reader.typedArray(position, 14, Uint8Array); + $.custom_options_format = reader.int8_(position, 16, 0); + $.mutating_variable_inputs = reader.bools_(position, 18); + $.intermediates = reader.typedArray(position, 20, Int32Array); + $.large_custom_options_offset = reader.uint64_(position, 22, 0); + $.large_custom_options_size = reader.uint64_(position, 24, 0); + $.builtin_options_2 = reader.union(position, 26, $root.tflite.BuiltinOptions2.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Operator(); + $.opcode_index = reader.value(json.opcode_index, 0); + $.inputs = reader.typedArray(json.inputs, Int32Array); + $.outputs = reader.typedArray(json.outputs, Int32Array); + $.builtin_options = $root.tflite.BuiltinOptions.decodeText(reader, json.builtin_options, json.builtin_options_type); + $.custom_options = reader.typedArray(json.custom_options, Uint8Array); + $.custom_options_format = $root.tflite.CustomOptionsFormat[json.custom_options_format]; + $.mutating_variable_inputs = reader.array(json.mutating_variable_inputs); + $.intermediates = reader.typedArray(json.intermediates, Int32Array); + $.large_custom_options_offset = reader.value(json.large_custom_options_offset, 0); + $.large_custom_options_size = reader.value(json.large_custom_options_size, 0); + $.builtin_options_2 = $root.tflite.BuiltinOptions2.decodeText(reader, json.builtin_options_2, json.builtin_options_2_type); + return $; + } +}; + +$root.tflite.SubGraph = class SubGraph { + + static decode(reader, position) { + const $ = new $root.tflite.SubGraph(); + $.tensors = reader.tableArray(position, 4, $root.tflite.Tensor.decode); + $.inputs = reader.typedArray(position, 6, Int32Array); + $.outputs = reader.typedArray(position, 8, Int32Array); + $.operators = reader.tableArray(position, 10, $root.tflite.Operator.decode); + $.name = reader.string_(position, 12, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SubGraph(); + $.tensors = reader.objectArray(json.tensors, $root.tflite.Tensor.decodeText); + $.inputs = reader.typedArray(json.inputs, Int32Array); + $.outputs = reader.typedArray(json.outputs, Int32Array); + $.operators = reader.objectArray(json.operators, $root.tflite.Operator.decodeText); + $.name = reader.value(json.name, null); + return $; + } +}; + +$root.tflite.Buffer = class Buffer { + + static decode(reader, position) { + const $ = new $root.tflite.Buffer(); + $.data = reader.typedArray(position, 4, Uint8Array); + $.offset = reader.uint64_(position, 6, 0); + $.size = reader.uint64_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Buffer(); + $.data = reader.typedArray(json.data, Uint8Array); + $.offset = reader.value(json.offset, 0); + $.size = reader.value(json.size, 0); + return $; + } +}; + +$root.tflite.Metadata = class Metadata { + + static decode(reader, position) { + const $ = new $root.tflite.Metadata(); + $.name = reader.string_(position, 4, null); + $.buffer = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Metadata(); + $.name = reader.value(json.name, null); + $.buffer = reader.value(json.buffer, 0); + return $; + } +}; + +$root.tflite.TensorMap = class TensorMap { + + static decode(reader, position) { + const $ = new $root.tflite.TensorMap(); + $.name = reader.string_(position, 4, null); + $.tensor_index = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.TensorMap(); + $.name = reader.value(json.name, null); + $.tensor_index = reader.value(json.tensor_index, 0); + return $; + } +}; + +$root.tflite.SignatureDef = class SignatureDef { + + static decode(reader, position) { + const $ = new $root.tflite.SignatureDef(); + $.inputs = reader.tableArray(position, 4, $root.tflite.TensorMap.decode); + $.outputs = reader.tableArray(position, 6, $root.tflite.TensorMap.decode); + $.signature_key = reader.string_(position, 8, null); + $.deprecated_tag = reader.string_(position, 10, null); + $.subgraph_index = reader.uint32_(position, 12, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SignatureDef(); + $.inputs = reader.objectArray(json.inputs, $root.tflite.TensorMap.decodeText); + $.outputs = reader.objectArray(json.outputs, $root.tflite.TensorMap.decodeText); + $.signature_key = reader.value(json.signature_key, null); + $.deprecated_tag = reader.value(json.deprecated_tag, null); + $.subgraph_index = reader.value(json.subgraph_index, 0); + return $; + } +}; + +$root.tflite.Model = class Model { + + static identifier(reader) { + return reader.identifier === 'TFL3'; + } + + static create(reader) { + return $root.tflite.Model.decode(reader, reader.root); + } + + static createText(reader) { + return $root.tflite.Model.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.tflite.Model(); + $.version = reader.uint32_(position, 4, 0); + $.operator_codes = reader.tableArray(position, 6, $root.tflite.OperatorCode.decode); + $.subgraphs = reader.tableArray(position, 8, $root.tflite.SubGraph.decode); + $.description = reader.string_(position, 10, null); + $.buffers = reader.tableArray(position, 12, $root.tflite.Buffer.decode); + $.metadata_buffer = reader.typedArray(position, 14, Int32Array); + $.metadata = reader.tableArray(position, 16, $root.tflite.Metadata.decode); + $.signature_defs = reader.tableArray(position, 18, $root.tflite.SignatureDef.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Model(); + $.version = reader.value(json.version, 0); + $.operator_codes = reader.objectArray(json.operator_codes, $root.tflite.OperatorCode.decodeText); + $.subgraphs = reader.objectArray(json.subgraphs, $root.tflite.SubGraph.decodeText); + $.description = reader.value(json.description, null); + $.buffers = reader.objectArray(json.buffers, $root.tflite.Buffer.decodeText); + $.metadata_buffer = reader.typedArray(json.metadata_buffer, Int32Array); + $.metadata = reader.objectArray(json.metadata, $root.tflite.Metadata.decodeText); + $.signature_defs = reader.objectArray(json.signature_defs, $root.tflite.SignatureDef.decodeText); + return $; + } +}; + +$root.tflite.AssociatedFileType = { + UNKNOWN: 0, + DESCRIPTIONS: 1, + TENSOR_AXIS_LABELS: 2, + TENSOR_VALUE_LABELS: 3, + TENSOR_AXIS_SCORE_CALIBRATION: 4, + VOCABULARY: 5, + SCANN_INDEX_FILE: 6 +}; + +$root.tflite.AssociatedFile = class AssociatedFile { + + static decode(reader, position) { + const $ = new $root.tflite.AssociatedFile(); + $.name = reader.string_(position, 4, null); + $.description = reader.string_(position, 6, null); + $.type = reader.int8_(position, 8, 0); + $.locale = reader.string_(position, 10, null); + $.version = reader.string_(position, 12, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.AssociatedFile(); + $.name = reader.value(json.name, null); + $.description = reader.value(json.description, null); + $.type = $root.tflite.AssociatedFileType[json.type]; + $.locale = reader.value(json.locale, null); + $.version = reader.value(json.version, null); + return $; + } +}; + +$root.tflite.FeatureProperties = class FeatureProperties { + + static decode(/* reader, position */) { + const $ = new $root.tflite.FeatureProperties(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.tflite.FeatureProperties(); + return $; + } +}; + +$root.tflite.ColorSpaceType = { + UNKNOWN: 0, + RGB: 1, + GRAYSCALE: 2 +}; + +$root.tflite.ImageSize = class ImageSize { + + static decode(reader, position) { + const $ = new $root.tflite.ImageSize(); + $.width = reader.uint32_(position, 4, 0); + $.height = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ImageSize(); + $.width = reader.value(json.width, 0); + $.height = reader.value(json.height, 0); + return $; + } +}; + +$root.tflite.ImageProperties = class ImageProperties { + + static decode(reader, position) { + const $ = new $root.tflite.ImageProperties(); + $.color_space = reader.int8_(position, 4, 0); + $.default_size = reader.table(position, 6, $root.tflite.ImageSize.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ImageProperties(); + $.color_space = $root.tflite.ColorSpaceType[json.color_space]; + $.default_size = reader.object(json.default_size, $root.tflite.ImageSize.decodeText); + return $; + } +}; + +$root.tflite.BoundingBoxType = { + UNKNOWN: 0, + BOUNDARIES: 1, + UPPER_LEFT: 2, + CENTER: 3 +}; + +$root.tflite.AudioProperties = class AudioProperties { + + static decode(reader, position) { + const $ = new $root.tflite.AudioProperties(); + $.sample_rate = reader.uint32_(position, 4, 0); + $.channels = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.AudioProperties(); + $.sample_rate = reader.value(json.sample_rate, 0); + $.channels = reader.value(json.channels, 0); + return $; + } +}; + +$root.tflite.CoordinateType = { + RATIO: 0, + PIXEL: 1 +}; + +$root.tflite.BoundingBoxProperties = class BoundingBoxProperties { + + static decode(reader, position) { + const $ = new $root.tflite.BoundingBoxProperties(); + $.index = reader.typedArray(position, 4, Uint32Array); + $.type = reader.int8_(position, 6, 0); + $.coordinate_type = reader.int8_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BoundingBoxProperties(); + $.index = reader.typedArray(json.index, Uint32Array); + $.type = $root.tflite.BoundingBoxType[json.type]; + $.coordinate_type = $root.tflite.CoordinateType[json.coordinate_type]; + return $; + } +}; + +$root.tflite.ContentProperties = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.FeatureProperties.decode(reader, position); + case 2: return $root.tflite.ImageProperties.decode(reader, position); + case 3: return $root.tflite.BoundingBoxProperties.decode(reader, position); + case 4: return $root.tflite.AudioProperties.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'FeatureProperties': return $root.tflite.FeatureProperties.decodeText(reader, json); + case 'ImageProperties': return $root.tflite.ImageProperties.decodeText(reader, json); + case 'BoundingBoxProperties': return $root.tflite.BoundingBoxProperties.decodeText(reader, json); + case 'AudioProperties': return $root.tflite.AudioProperties.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.ValueRange = class ValueRange { + + static decode(reader, position) { + const $ = new $root.tflite.ValueRange(); + $.min = reader.int32_(position, 4, 0); + $.max = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ValueRange(); + $.min = reader.value(json.min, 0); + $.max = reader.value(json.max, 0); + return $; + } +}; + +$root.tflite.Content = class Content { + + static decode(reader, position) { + const $ = new $root.tflite.Content(); + $.content_properties = reader.union(position, 4, $root.tflite.ContentProperties.decode); + $.range = reader.table(position, 8, $root.tflite.ValueRange.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Content(); + $.content_properties = $root.tflite.ContentProperties.decodeText(reader, json.content_properties, json.content_properties_type); + $.range = reader.object(json.range, $root.tflite.ValueRange.decodeText); + return $; + } +}; + +$root.tflite.NormalizationOptions = class NormalizationOptions { + + static decode(reader, position) { + const $ = new $root.tflite.NormalizationOptions(); + $.mean = reader.typedArray(position, 4, Float32Array); + $.std = reader.typedArray(position, 6, Float32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.NormalizationOptions(); + $.mean = reader.typedArray(json.mean, Float32Array); + $.std = reader.typedArray(json.std, Float32Array); + return $; + } +}; + +$root.tflite.ScoreTransformationType = { + IDENTITY: 0, + LOG: 1, + INVERSE_LOGISTIC: 2 +}; + +$root.tflite.ScoreCalibrationOptions = class ScoreCalibrationOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ScoreCalibrationOptions(); + $.score_transformation = reader.int8_(position, 4, 0); + $.default_score = reader.float32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ScoreCalibrationOptions(); + $.score_transformation = $root.tflite.ScoreTransformationType[json.score_transformation]; + $.default_score = reader.value(json.default_score, 0); + return $; + } +}; + +$root.tflite.ScoreThresholdingOptions = class ScoreThresholdingOptions { + + static decode(reader, position) { + const $ = new $root.tflite.ScoreThresholdingOptions(); + $.global_score_threshold = reader.float32_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ScoreThresholdingOptions(); + $.global_score_threshold = reader.value(json.global_score_threshold, 0); + return $; + } +}; + +$root.tflite.BertTokenizerOptions = class BertTokenizerOptions { + + static decode(reader, position) { + const $ = new $root.tflite.BertTokenizerOptions(); + $.vocab_file = reader.tableArray(position, 4, $root.tflite.AssociatedFile.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.BertTokenizerOptions(); + $.vocab_file = reader.objectArray(json.vocab_file, $root.tflite.AssociatedFile.decodeText); + return $; + } +}; + +$root.tflite.SentencePieceTokenizerOptions = class SentencePieceTokenizerOptions { + + static decode(reader, position) { + const $ = new $root.tflite.SentencePieceTokenizerOptions(); + $.sentencePiece_model = reader.tableArray(position, 4, $root.tflite.AssociatedFile.decode); + $.vocab_file = reader.tableArray(position, 6, $root.tflite.AssociatedFile.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SentencePieceTokenizerOptions(); + $.sentencePiece_model = reader.objectArray(json.sentencePiece_model, $root.tflite.AssociatedFile.decodeText); + $.vocab_file = reader.objectArray(json.vocab_file, $root.tflite.AssociatedFile.decodeText); + return $; + } +}; + +$root.tflite.RegexTokenizerOptions = class RegexTokenizerOptions { + + static decode(reader, position) { + const $ = new $root.tflite.RegexTokenizerOptions(); + $.delim_regex_pattern = reader.string_(position, 4, null); + $.vocab_file = reader.tableArray(position, 6, $root.tflite.AssociatedFile.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.RegexTokenizerOptions(); + $.delim_regex_pattern = reader.value(json.delim_regex_pattern, null); + $.vocab_file = reader.objectArray(json.vocab_file, $root.tflite.AssociatedFile.decodeText); + return $; + } +}; + +$root.tflite.ProcessUnitOptions = class { + + static decode(reader, position, type) { + switch (type) { + case 1: return $root.tflite.NormalizationOptions.decode(reader, position); + case 2: return $root.tflite.ScoreCalibrationOptions.decode(reader, position); + case 3: return $root.tflite.ScoreThresholdingOptions.decode(reader, position); + case 4: return $root.tflite.BertTokenizerOptions.decode(reader, position); + case 5: return $root.tflite.SentencePieceTokenizerOptions.decode(reader, position); + case 6: return $root.tflite.RegexTokenizerOptions.decode(reader, position); + default: return undefined; + } + } + + static decodeText(reader, json, type) { + switch (type) { + case 'NormalizationOptions': return $root.tflite.NormalizationOptions.decodeText(reader, json); + case 'ScoreCalibrationOptions': return $root.tflite.ScoreCalibrationOptions.decodeText(reader, json); + case 'ScoreThresholdingOptions': return $root.tflite.ScoreThresholdingOptions.decodeText(reader, json); + case 'BertTokenizerOptions': return $root.tflite.BertTokenizerOptions.decodeText(reader, json); + case 'SentencePieceTokenizerOptions': return $root.tflite.SentencePieceTokenizerOptions.decodeText(reader, json); + case 'RegexTokenizerOptions': return $root.tflite.RegexTokenizerOptions.decodeText(reader, json); + default: return undefined; + } + } +}; + +$root.tflite.ProcessUnit = class ProcessUnit { + + static decode(reader, position) { + const $ = new $root.tflite.ProcessUnit(); + $.options = reader.union(position, 4, $root.tflite.ProcessUnitOptions.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ProcessUnit(); + $.options = $root.tflite.ProcessUnitOptions.decodeText(reader, json.options, json.options_type); + return $; + } +}; + +$root.tflite.Stats = class Stats { + + static decode(reader, position) { + const $ = new $root.tflite.Stats(); + $.max = reader.typedArray(position, 4, Float32Array); + $.min = reader.typedArray(position, 6, Float32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.Stats(); + $.max = reader.typedArray(json.max, Float32Array); + $.min = reader.typedArray(json.min, Float32Array); + return $; + } +}; + +$root.tflite.TensorGroup = class TensorGroup { + + static decode(reader, position) { + const $ = new $root.tflite.TensorGroup(); + $.name = reader.string_(position, 4, null); + $.tensor_names = reader.strings_(position, 6); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.TensorGroup(); + $.name = reader.value(json.name, null); + $.tensor_names = reader.array(json.tensor_names); + return $; + } +}; + +$root.tflite.TensorMetadata = class TensorMetadata { + + static decode(reader, position) { + const $ = new $root.tflite.TensorMetadata(); + $.name = reader.string_(position, 4, null); + $.description = reader.string_(position, 6, null); + $.dimension_names = reader.strings_(position, 8); + $.content = reader.table(position, 10, $root.tflite.Content.decode); + $.process_units = reader.tableArray(position, 12, $root.tflite.ProcessUnit.decode); + $.stats = reader.table(position, 14, $root.tflite.Stats.decode); + $.associated_files = reader.tableArray(position, 16, $root.tflite.AssociatedFile.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.TensorMetadata(); + $.name = reader.value(json.name, null); + $.description = reader.value(json.description, null); + $.dimension_names = reader.array(json.dimension_names); + $.content = reader.object(json.content, $root.tflite.Content.decodeText); + $.process_units = reader.objectArray(json.process_units, $root.tflite.ProcessUnit.decodeText); + $.stats = reader.object(json.stats, $root.tflite.Stats.decodeText); + $.associated_files = reader.objectArray(json.associated_files, $root.tflite.AssociatedFile.decodeText); + return $; + } +}; + +$root.tflite.CustomMetadata = class CustomMetadata { + + static decode(reader, position) { + const $ = new $root.tflite.CustomMetadata(); + $.name = reader.string_(position, 4, null); + $.data = reader.typedArray(position, 6, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.CustomMetadata(); + $.name = reader.value(json.name, null); + $.data = reader.typedArray(json.data, Uint8Array); + return $; + } +}; + +$root.tflite.SubGraphMetadata = class SubGraphMetadata { + + static decode(reader, position) { + const $ = new $root.tflite.SubGraphMetadata(); + $.name = reader.string_(position, 4, null); + $.description = reader.string_(position, 6, null); + $.input_tensor_metadata = reader.tableArray(position, 8, $root.tflite.TensorMetadata.decode); + $.output_tensor_metadata = reader.tableArray(position, 10, $root.tflite.TensorMetadata.decode); + $.associated_files = reader.tableArray(position, 12, $root.tflite.AssociatedFile.decode); + $.input_process_units = reader.tableArray(position, 14, $root.tflite.ProcessUnit.decode); + $.output_process_units = reader.tableArray(position, 16, $root.tflite.ProcessUnit.decode); + $.input_tensor_groups = reader.tableArray(position, 18, $root.tflite.TensorGroup.decode); + $.output_tensor_groups = reader.tableArray(position, 20, $root.tflite.TensorGroup.decode); + $.custom_metadata = reader.tableArray(position, 22, $root.tflite.CustomMetadata.decode); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.SubGraphMetadata(); + $.name = reader.value(json.name, null); + $.description = reader.value(json.description, null); + $.input_tensor_metadata = reader.objectArray(json.input_tensor_metadata, $root.tflite.TensorMetadata.decodeText); + $.output_tensor_metadata = reader.objectArray(json.output_tensor_metadata, $root.tflite.TensorMetadata.decodeText); + $.associated_files = reader.objectArray(json.associated_files, $root.tflite.AssociatedFile.decodeText); + $.input_process_units = reader.objectArray(json.input_process_units, $root.tflite.ProcessUnit.decodeText); + $.output_process_units = reader.objectArray(json.output_process_units, $root.tflite.ProcessUnit.decodeText); + $.input_tensor_groups = reader.objectArray(json.input_tensor_groups, $root.tflite.TensorGroup.decodeText); + $.output_tensor_groups = reader.objectArray(json.output_tensor_groups, $root.tflite.TensorGroup.decodeText); + $.custom_metadata = reader.objectArray(json.custom_metadata, $root.tflite.CustomMetadata.decodeText); + return $; + } +}; + +$root.tflite.ModelMetadata = class ModelMetadata { + + static identifier(reader) { + return reader.identifier === 'M001'; + } + + static create(reader) { + return $root.tflite.ModelMetadata.decode(reader, reader.root); + } + + static createText(reader) { + return $root.tflite.ModelMetadata.decodeText(reader, reader.root); + } + + static decode(reader, position) { + const $ = new $root.tflite.ModelMetadata(); + $.name = reader.string_(position, 4, null); + $.description = reader.string_(position, 6, null); + $.version = reader.string_(position, 8, null); + $.subgraph_metadata = reader.tableArray(position, 10, $root.tflite.SubGraphMetadata.decode); + $.author = reader.string_(position, 12, null); + $.license = reader.string_(position, 14, null); + $.associated_files = reader.tableArray(position, 16, $root.tflite.AssociatedFile.decode); + $.min_parser_version = reader.string_(position, 18, null); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.ModelMetadata(); + $.name = reader.value(json.name, null); + $.description = reader.value(json.description, null); + $.version = reader.value(json.version, null); + $.subgraph_metadata = reader.objectArray(json.subgraph_metadata, $root.tflite.SubGraphMetadata.decodeText); + $.author = reader.value(json.author, null); + $.license = reader.value(json.license, null); + $.associated_files = reader.objectArray(json.associated_files, $root.tflite.AssociatedFile.decodeText); + $.min_parser_version = reader.value(json.min_parser_version, null); + return $; + } +}; diff --git a/tflite.js b/tflite.js new file mode 100644 index 00000000000..dba01ba7e48 --- /dev/null +++ b/tflite.js @@ -0,0 +1,645 @@ + +import * as flatbuffers from './flatbuffers.js'; +import * as flexbuffers from './flexbuffers.js'; +import * as zip from './zip.js'; + +const tflite = {}; + +tflite.ModelFactory = class { + + match(context) { + const tags = context.tags('flatbuffers'); + if (tags.get('file_identifier') === 'TFL3') { + return 'tflite.flatbuffers'; + } + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + if (extension === 'tflite' && stream.length >= 8) { + const buffer = stream.peek(Math.min(32, stream.length)); + const reader = flatbuffers.BinaryReader.open(buffer); + if (reader.root === 0x00000018) { + const version = reader.uint32_(reader.root, 4, 0); + if (version === 3) { + return 'tflite.flatbuffers'; + } + } + } + const obj = context.peek('json'); + if (obj && obj.subgraphs && obj.operator_codes) { + return 'tflite.flatbuffers.json'; + } + return undefined; + } + + async open(context, target) { + await context.require('./tflite-schema'); + tflite.schema = flatbuffers.get('tflite').tflite; + let model = null; + const attachments = new Map(); + switch (target) { + case 'tflite.flatbuffers.json': { + try { + const obj = context.peek('json'); + const reader = new flatbuffers.TextReader(obj); + model = tflite.schema.Model.createText(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tflite.Error(`File text format is not tflite.Model (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'tflite.flatbuffers': { + const stream = context.stream; + try { + const reader = flatbuffers.BinaryReader.open(stream); + model = tflite.schema.Model.create(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tflite.Error(`File format is not tflite.Model (${message.replace(/\.$/, '')}).`); + } + try { + const archive = zip.Archive.open(stream); + if (archive) { + for (const [name, value] of archive.entries) { + attachments.set(name, value); + } + } + } catch (error) { + // continue regardless of error + } + break; + } + default: { + throw new tflite.Error(`Unsupported TensorFlow Lite format '${target}'.`); + } + } + const metadata = await context.metadata('tflite-metadata.json'); + return new tflite.Model(metadata, model); + } +}; + +tflite.Model = class { + + constructor(metadata, model) { + this._graphs = []; + this._format = 'TensorFlow Lite'; + this._format = `${this._format} v${model.version}`; + this._description = model.description || ''; + this._metadata = new Map(); + const builtinOperators = new Map(); + const upperCase = new Set([ '2D', 'LSH', 'SVDF', 'RNN', 'L2', 'LSTM' ]); + for (const key of Object.keys(tflite.schema.BuiltinOperator)) { + const value = key === 'BATCH_MATMUL' ? 'BATCH_MAT_MUL' : key; + const name = value.split('_').map((s) => (s.length < 1 || upperCase.has(s)) ? s : s[0] + s.substring(1).toLowerCase()).join(''); + const index = tflite.schema.BuiltinOperator[key]; + builtinOperators.set(index, name); + } + const operators = model.operator_codes.map((operator) => { + const code = Math.max(operator.deprecated_builtin_code, operator.builtin_code || 0); + const version = operator.version; + const custom = code === tflite.schema.BuiltinOperator.CUSTOM; + const name = custom ? operator.custom_code ? operator.custom_code : 'Custom' : builtinOperators.has(code) ? builtinOperators.get(code) : code.toString(); + return custom ? { name: name, version: version, custom: true } : { name: name, version: version }; + }); + let modelMetadata = null; + for (const metadata of model.metadata) { + const buffer = model.buffers[metadata.buffer]; + if (buffer) { + switch (metadata.name) { + case 'min_runtime_version': { + const data = buffer.data || new Uint8Array(0); + this._runtime = new TextDecoder().decode(data); + break; + } + case 'TFLITE_METADATA': { + const data = buffer.data || new Uint8Array(0); + const reader = flatbuffers.BinaryReader.open(data); + if (tflite.schema.ModelMetadata.identifier(reader)) { + modelMetadata = tflite.schema.ModelMetadata.create(reader); + if (modelMetadata.name) { + this._name = modelMetadata.name; + } + if (modelMetadata.version) { + this._version = modelMetadata.version; + } + if (modelMetadata.description) { + this._description = this._description ? [ this._description, modelMetadata.description].join(' ') : modelMetadata.description; + } + if (modelMetadata.author) { + this._metadata.set('author', modelMetadata.author); + } + if (modelMetadata.license) { + this._metadata.set('license', modelMetadata.license); + } + } + break; + } + default: { + break; + } + } + } + } + const subgraphs = model.subgraphs; + const subgraphsMetadata = modelMetadata ? modelMetadata.subgraph_metadata : null; + for (let i = 0; i < subgraphs.length; i++) { + const subgraph = subgraphs[i]; + const name = subgraphs.length > 1 ? i.toString() : ''; + const subgraphMetadata = subgraphsMetadata && i < subgraphsMetadata.length ? subgraphsMetadata[i] : null; + this._graphs.push(new tflite.Graph(metadata, subgraph, subgraphMetadata, name, operators, model)); + } + } + + get format() { + return this._format; + } + + get runtime() { + return this._runtime; + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +tflite.Graph = class { + + constructor(metadata, subgraph, subgraphMetadata, name, operators, model) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + this._name = subgraph.name || name; + const tensors = new Map(); + const args = (index) => { + if (index === -1) { + return null; + } + if (!tensors.has(index)) { + if (index < subgraph.tensors.length) { + const tensor = subgraph.tensors[index]; + const buffer = model.buffers[tensor.buffer]; + const is_variable = tensor.is_variable; + const data = buffer ? buffer.data : null; + const initializer = (data && data.length > 0) || is_variable ? new tflite.Tensor(index, tensor, buffer, is_variable) : null; + tensors.set(index, new tflite.Value(index, tensor, initializer)); + } else { + tensors.set(index, new tflite.Value(index, { name: '' }, null)); + } + } + return tensors.get(index); + }; + for (let i = 0; i < subgraph.operators.length; i++) { + const node = subgraph.operators[i]; + const index = node.opcode_index; + const operator = index < operators.length ? operators[index] : { name: `(${index})` }; + this._nodes.push(new tflite.Node(metadata, node, operator, i.toString(), args)); + } + const applyTensorMetadata = (argument, tensorMetadata) => { + if (tensorMetadata) { + const description = tensorMetadata.description; + if (description) { + argument.description = description; + } + const content = tensorMetadata.content; + if (argument.type && content) { + let denotation = null; + const contentProperties = content.content_properties; + if (contentProperties instanceof tflite.schema.FeatureProperties) { + denotation = 'Feature'; + } else if (contentProperties instanceof tflite.schema.ImageProperties) { + denotation = 'Image'; + switch (contentProperties.color_space) { + case 0: denotation += '(Unknown)'; break; + case 1: denotation += '(RGB)'; break; + case 2: denotation += '(Grayscale)'; break; + default: throw tflite.Error(`Unsupported image color space '${contentProperties.color_space}'.`); + } + } else if (contentProperties instanceof tflite.schema.BoundingBoxProperties) { + denotation = 'BoundingBox'; + } else if (contentProperties instanceof tflite.schema.AudioProperties) { + denotation = `Audio(${contentProperties.sample_rate},${contentProperties.channels})`; + } + if (denotation) { + argument.type.denotation = denotation; + } + } + } + }; + const inputs = subgraph.inputs; + for (let i = 0; i < inputs.length; i++) { + const input = inputs[i]; + const value = args(input); + if (subgraphMetadata && i < subgraphMetadata.input_tensor_metadata.length) { + applyTensorMetadata(value, subgraphMetadata.input_tensor_metadata[i]); + } + this._inputs.push(new tflite.Argument(value ? value.name : '?', true, value ? [ value ] : [])); + } + const outputs = subgraph.outputs; + for (let i = 0; i < outputs.length; i++) { + const output = outputs[i]; + const value = args(output); + if (subgraphMetadata && i < subgraphMetadata.output_tensor_metadata.length) { + applyTensorMetadata(value, subgraphMetadata.output_tensor_metadata[i]); + } + this._outputs.push(new tflite.Argument(value ? value.name : '?', true, value ? [ value ] : [])); + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +tflite.Node = class { + + constructor(metadata, node, type, location, args) { + this._location = location; + this._type = type.custom ? { name: type.name, category: 'custom' } : metadata.type(type.name); + this._inputs = []; + this._outputs = []; + this._attributes = []; + if (node) { + let inputs = []; + let outputs = []; + inputs = Array.from(node.inputs || new Int32Array(0)); + outputs = Array.from(node.outputs || new Int32Array(0)); + let inputIndex = 0; + while (inputIndex < inputs.length) { + let count = 1; + let name = null; + let visible = true; + const values = []; + if (this._type && this._type.inputs && inputIndex < this._type.inputs.length) { + const input = this._type.inputs[inputIndex]; + name = input.name; + if (input.list) { + count = inputs.length - inputIndex; + } + if (input.visible === false) { + visible = false; + } + } + const inputArray = inputs.slice(inputIndex, inputIndex + count); + for (const index of inputArray) { + const value = args(index); + if (value) { + values.push(value); + } + } + inputIndex += count; + name = name ? name : inputIndex.toString(); + const argument = new tflite.Argument(name, visible, values); + this._inputs.push(argument); + } + for (let k = 0; k < outputs.length; k++) { + const index = outputs[k]; + const outputArguments = []; + const value = args(index); + if (value) { + outputArguments.push(value); + } + let outputName = k.toString(); + if (this._type && this._type.outputs && k < this._type.outputs.length) { + const output = this._type.outputs[k]; + if (output && output.name) { + outputName = output.name; + } + } + this._outputs.push(new tflite.Argument(outputName, true, outputArguments)); + } + if (type.custom && node.custom_options.length > 0) { + let decoded = false; + if (node.custom_options_format === tflite.schema.CustomOptionsFormat.FLEXBUFFERS) { + try { + const reader = flexbuffers.BinaryReader.open(node.custom_options); + if (reader) { + const custom_options = reader.read(); + if (Array.isArray(custom_options)) { + const attribute = new tflite.Attribute(null, 'custom_options', custom_options); + this._attributes.push(attribute); + decoded = true; + } else if (custom_options) { + for (const [key, value] of Object.entries(custom_options)) { + const schema = metadata.attribute(type.name, key); + const attribute = new tflite.Attribute(schema, key, value); + this._attributes.push(attribute); + } + decoded = true; + } + } + } catch (err) { + // continue regardless of error + } + } + if (!decoded) { + const schema = metadata.attribute(type.name, 'custom'); + this._attributes.push(new tflite.Attribute(schema, 'custom', Array.from(node.custom_options))); + } + } + const options = node.builtin_options; + if (options) { + for (const [name, value] of Object.entries(options)) { + if (name === 'fused_activation_function' && value) { + const activationFunctionMap = { 1: 'Relu', 2: 'ReluN1To1', 3: 'Relu6', 4: 'Tanh', 5: 'SignBit' }; + if (!activationFunctionMap[value]) { + throw new tflite.Error(`Unsupported activation funtion index '${JSON.stringify(value)}'.`); + } + const type = activationFunctionMap[value]; + this._chain = [ new tflite.Node(metadata, null, { name: type }, null, []) ]; + } + const schema = metadata.attribute(type.name, name); + this._attributes.push(new tflite.Attribute(schema, name, value)); + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return ''; + } + + get location() { + return this._location; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } + + get attributes() { + return this._attributes; + } +}; + +tflite.Attribute = class { + + constructor(metadata, name, value) { + this._name = name; + this._value = ArrayBuffer.isView(value) ? Array.from(value) : value; + this._type = metadata && metadata.type ? metadata.type : null; + if (this._name === 'fused_activation_function') { + this._visible = false; + } + if (this._type) { + this._value = tflite.Utility.enum(this._type, this._value); + } + if (metadata) { + if (metadata.visible === false) { + this._visible = false; + } else if (metadata.default !== undefined) { + value = this._value; + if (typeof value == 'function') { + value = value(); + } + if (value === metadata.default) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +tflite.Argument = class { + + constructor(name, visible, value) { + this._name = name; + this._visible = visible; + this._value = value; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get value() { + return this._value; + } +}; + +tflite.Value = class { + + constructor(index, tensor, initializer) { + const name = tensor.name || ''; + this.name = `${name}\n${index}`; + this.location = index.toString(); + this.type = tensor.type !== undefined && tensor.shape !== undefined ? new tflite.TensorType(tensor) : null; + this.initializer = initializer; + const quantization = tensor.quantization; + if (quantization && (quantization.scale.length > 0 || quantization.zero_point.length > 0 || quantization.min.length > 0 || quantization.max.length)) { + this.quantization = { + type: 'linear', + dimension: quantization.quantized_dimension, + scale: quantization.scale, + offset: quantization.zero_point.map((value) => value.toNumber()), + min: quantization.min, + max: quantization.max + }; + } + } +}; + +tflite.Tensor = class { + + constructor(index, tensor, buffer, is_variable) { + this._location = index.toString(); + this._type = new tflite.TensorType(tensor); + this._is_variable = is_variable; + this._name = tensor.name; + this._data = buffer.data.slice(0); + } + + get category() { + return this._is_variable ? 'Variable' : ''; + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get encoding() { + switch (this._type.dataType) { + case 'string': return '|'; + default: return '<'; + } + } + + get values() { + switch (this._type.dataType) { + case 'string': { + let offset = 0; + const data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + const count = data.getInt32(0, true); + offset += 4; + const offsetTable = []; + for (let j = 0; j < count; j++) { + offsetTable.push(data.getInt32(offset, true)); + offset += 4; + } + offsetTable.push(this._data.length); + const stringTable = []; + const utf8Decoder = new TextDecoder('utf-8'); + for (let k = 0; k < count; k++) { + const textArray = this._data.subarray(offsetTable[k], offsetTable[k + 1]); + stringTable.push(utf8Decoder.decode(textArray)); + } + return stringTable; + } + default: { + return this._data; + } + } + } +}; + +tflite.TensorType = class { + + constructor(tensor) { + this._dataType = tflite.Utility.dataType(tensor.type); + this._shape = new tflite.TensorShape(Array.from(tensor.shape || [])); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + set denotation(value) { + this._denotation = value; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +tflite.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return `[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +tflite.Utility = class { + + static dataType(type) { + if (!tflite.Utility._tensorTypeMap) { + tflite.Utility._tensorTypeMap = new Map(Object.entries(tflite.schema.TensorType).map(([key, value]) => [ value, key.toLowerCase() ])); + tflite.Utility._tensorTypeMap.set(6, 'boolean'); + } + return tflite.Utility._tensorTypeMap.has(type) ? tflite.Utility._tensorTypeMap.get(type) : '?'; + } + + static enum(name, value) { + const type = name && tflite.schema ? tflite.schema[name] : undefined; + if (type) { + tflite.Utility._enums = tflite.Utility._enums || new Map(); + if (!tflite.Utility._enums.has(name)) { + const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ])); + tflite.Utility._enums.set(name, entries); + } + const map = tflite.Utility._enums.get(name); + if (map.has(value)) { + return map.get(value); + } + } + return value; + } +}; + +tflite.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorFlow Lite model.'; + } +}; + +export const ModelFactory = tflite.ModelFactory; diff --git a/tnn-metadata.json b/tnn-metadata.json new file mode 100644 index 00000000000..bdd02db2122 --- /dev/null +++ b/tnn-metadata.json @@ -0,0 +1,792 @@ +[ + { + "name": "AbsVal", + "identifier": 0 + }, + { + "name": "Add", + "identifier": 79, + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "ArgMax", + "identifier": 1 + }, + { + "name": "BatchNormCxx", + "identifier": 2, + "category": "Normalization" + }, + { + "name": "Bias", + "identifier": 3, + "category": "Layer", + "attributes": [ + { "name": "bias_data_size", "default": 0, "visible": false } + ] + }, + { + "name": "BinaryOp", + "identifier": 40, + "attributes": [ + { "name": "op_type", "type": "int32", "default": 0 }, + { "name": "b", "type": "float32", "default": 0 } + ] + }, + { + "name": "BlobScale", + "identifier": 78, + "category": "Layer" + }, + { + "name": "BNLL", + "identifier": 4 + }, + { + "name": "Cast", + "identifier": 64, + "attributes": [ + { "name": "to", "default": 0 } + ] + }, + { + "name": "Clip", + "identifier": 54, + "attributes": [ + { "name": "min", "default": 0 }, + { "name": "max", "default": 0 } + ] + }, + { + "name": "Concat", + "identifier": 5, + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "Conv3D", + "identifier": 68, + "category": "Layer", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 }, + { "name": "input_channel", "type": "int32", "default": 0 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_d", "type": "int32", "default": 0 }, + { "name": "stride_h", "default": 0, "visible": false }, + { "name": "stride_w", "type": "int32", "default": 0, "visible": false }, + { "name": "stride_d", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h", "type": "int32", "default": 0 }, + { "name": "pad_w ", "default": 0 }, + { "name": "pad_d ", "default": 0 }, + { "name": "bias", "default": 0 }, + { "name": "pad_type", "default": [] }, + { "name": "dialation_w", "type": "int32", "default": 0 }, + { "name": "dialation_h", "type": "int32", "default": 1 }, + { "name": "dialation_d", "type": "int32", "default": 1 }, + { "name": "activation_type", "type": "int32", "default": 1 } + ] + }, + { + "name": "Convolution", + "identifier": 6, + "category": "Layer", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 }, + { "name": "input_channel", "type": "int32", "default": 0 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "default": 0, "visible": false }, + { "name": "stride_w", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h", "type": "int32", "default": 0 }, + { "name": "pad_w ", "default": 0 }, + { "name": "bias", "default": 0 }, + { "name": "pad_type", "default": [] }, + { "name": "dialation_h", "type": "int32", "default": 0 }, + { "name": "dialation_w", "type": "int32", "default": 1 }, + { "name": "activation_type", "type": "int32", "default": 1 } + ] + }, + { + "name": "ConvolutionDepthWise", + "identifier": 42, + "category": "Layer", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 }, + { "name": "input_channel", "type": "int32", "default": 0 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "default": 0, "visible": false }, + { "name": "stride_w", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h", "type": "int32", "default": 0 }, + { "name": "pad_w ", "default": 0 }, + { "name": "bias", "default": 0 }, + { "name": "pad_type", "default": [] }, + { "name": "dialation_h", "type": "int32", "default": 0 }, + { "name": "dialation_w", "type": "int32", "default": 1 }, + { "name": "activation_type", "type": "int32", "default": 1 } + ] + }, + { + "name": "Crop", + "identifier": 7, + "category": "Data" + }, + { + "name": "Deconvolution", + "identifier": 8, + "category": "Layer" + }, + { + "name": "DeconvolutionDepthWise", + "identifier": 51, + "category": "Layer" + }, + { + "name": "Dequantize", + "identifier": 58 + }, + { + "name": "DetectionOutput", + "identifier": 49, + "attributes": [ + { "name": "num_class", "default": 0 }, + { "name": "share_location", "default": 0 }, + { "name": "background_label_id", "default": 0 }, + { "name": "variance_encoded_in_target", "default": 0 }, + { "name": "code_type", "default": 0 }, + { "name": "keep_top_k", "default": 0 }, + { "name": "confidence_threshold", "default": 0 }, + { "name": "nms_param.nms_threshold", "default": 0 }, + { "name": "nms_param.top_k", "default": 0 }, + { "name": "eta", "default": 0 } + ] + }, + { + "name": "Div", + "identifier": 80, + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Dropout", + "identifier": 9, + "category": "Dropout", + "attributes": [ + { "name": "scale", "type": "float32", "default": 1 } + ] + }, + { + "name": "Eltwise", + "identifier": 10 + }, + { + "name": "ELU", + "identifier": 11 + }, + { + "name": "Embed", + "identifier": 12, + "category": "Transform", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "input_dim", "default": 0 }, + { "name": "bias_term", "default": 0 }, + { "name": "weight_data_size", "default": 0 } + ] + }, + { + "name": "Exp", + "identifier": 13 + }, + { + "name": "Exp", + "identifier": 17 + }, + { + "name": "Exp", + "identifier": 19 + }, + { + "name": "Expand", + "identifier": 83, + "category": "Shape", + "attributes": [ + { "name": "shape_size", "default": 0 }, + { "name": "shape", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "ExpandDims", + "identifier": 45 + }, + { + "name": "Flatten", + "identifier": 14, + "category": "Shape", + "attributes": [ + { "name": "axis", "default": 1 }, + { "name": "num_axis", "default": 4 } + ] + }, + { + "name": "Gather", + "identifier": 84, + "category": "Transform", + "attributes": [ + { "name": "axis", "default": 0 }, + { "name": "has_data", "default": 0 }, + { "name": "has_indices", "default": 0 } + ] + }, + { + "name": "HardSigmoid", + "identifier": 65, + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ] + }, + { + "name": "HardSwish", + "identifier": 69, + "category": "Layer", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 1 }, + { "name": "beta", "type": "float32", "default": 1 } + ] + }, + { + "name": "HdrGuide", + "identifier": 70, + "category": "Layer" + }, + { + "name": "InnerProduct", + "identifier": 15, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "has_bias ", "default": 0, "visible": false }, + { "name": "transpose", "default": 0, "visible": false }, + { "name": "axis ", "default": 0 } + ] + }, + { + "name": "Input", + "identifier": 16 + }, + { + "name": "InstanceNorm", + "identifier": 53 + }, + { + "name": "InstBatchNormCxx", + "identifier": 81, + "category": "Normalization" + }, + { + "name": "Interp", + "identifier": 50 + }, + { + "name": "LRN", + "identifier": 18, + "category": "Normalization", + "attributes": [ + { "name": "alpha", "default": 0 }, + { "name": "beta", "default": 0.75 }, + { "name": "bias", "default": 1 }, + { "name": "size", "default": 1 } + ] + }, + { + "name": "LSTM", + "identifier": 39, + "category": "Layer" + }, + { + "name": "MatMul", + "identifier": 86, + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Max", + "identifier": 71, + "category": "Layer", + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Min", + "identifier": 72, + "category": "Layer", + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Mul", + "identifier": 73, + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "MVN", + "identifier": 20 + }, + { + "name": "Normalize", + "identifier": 46, + "attributes": [ + { "name": "across_spatial", "default": 0 }, + { "name": "epsilon", "type": "float32", "default": 0 }, + { "name": "channel_shared", "default": 0 }, + { "name": "axis", "default": 0 }, + { "name": "p", "default": 0 } + ] + }, + { + "name": "Packing", + "identifier": 62 + }, + { + "name": "Pad", + "identifier": 43, + "attributes": [ + { "name": " n1", "default": 0 }, + { "name": " n2", "default": 0 }, + { "name": " pad_h", "default": 0 }, + { "name": " pad_b", "default": 0 }, + { "name": " pad_w", "default": 0 }, + { "name": " pad_r", "default": 0 }, + { "name": " c1", "default": 0 }, + { "name": " c2", "default": 0 }, + { "name": " type", "default": 0 } + ] + }, + { + "name": "Permute", + "identifier": 47, + "category": "Shape", + "attributes": [ + { "name": "order_size", "default": 0 }, + { "name": "orders", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "Pooling", + "identifier": 21, + "category": "Pool", + "attributes": [ + { "name": "pool_type", "default": 0 }, + { "name": "kernel_h", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_h", "default": 0 }, + { "name": "stride_w ", "default": 0 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "kernel_h_index", "default": 0 }, + { "name": "kernel_w_index", "default": 0 }, + { "name": "pad_type ", "default": 0 }, + { "name": "ceil_mode ", "default": 0 } + ] + }, + { + "name": "Pooling3D", + "identifier": 74, + "category": "Pool", + "attributes": [ + { "name": "pool_type", "default": 0 }, + { "name": "kernel_h", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "kernel_d", "default": 0 }, + { "name": "stride_h", "default": 0 }, + { "name": "stride_w ", "default": 0 }, + { "name": "stride_d ", "default": 0 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "pad_d", "default": 0 }, + { "name": "kernel_h_index", "default": 0 }, + { "name": "kernel_w_index", "default": 0 }, + { "name": "kernel_d_index", "default": 0 }, + { "name": "pad_type ", "default": 0 }, + { "name": "ceil_mode ", "default": 0 } + ] + }, + { + "name": "Pow", + "identifier": 75, + "category": "Layer", + "attributes": [ + { "name": "exponent", "type": "float32", "default": 0 }, + { "name": "scale ", "type": "float32", "default": 0 }, + { "name": "shift ", "type": "float32", "default": 0 } + ] + }, + { + "name": "Power", + "identifier": 22 + }, + { + "name": "PReLU", + "identifier": 23, + "category": "Activation", + "attributes": [ + { "name": "channel_shared", "default": 0 }, + { "name": "has_filler", "default": 0 } + ] + }, + { + "name": "PriorBox", + "identifier": 48, + "attributes": [ + { "name": "min_size", "default": [] }, + { "name": "max_size", "default": [] }, + { "name": "clip", "default": 1 }, + { "name": "flip", "default": 1 }, + { "name": "varainces0", "type": "float32", "default": 0 }, + { "name": "varainces1", "type": "float32", "default": 0 }, + { "name": "varainces2", "type": "float32", "default": 0 }, + { "name": "varainces3", "type": "float32", "default": 0 }, + { "name": "aspect_ratios", "default": 0 }, + { "name": "img_w", "default": 0 }, + { "name": "img_h", "default": 0 }, + { "name": "step_w", "default": -233 }, + { "name": "step_h", "default": -233 }, + { "name": "offset", "default": 0 } + ] + }, + { + "name": "Proposal", + "identifier": 24 + }, + { + "name": "Quantize", + "identifier": 57 + }, + { + "name": "QuantizedConcat", + "identifier": 5, + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + }, + { + "name": "QuantizedConvolution", + "identifier": 6, + "category": "Layer", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 }, + { "name": "input_channel", "type": "int32", "default": 0 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "default": 0, "visible": false }, + { "name": "stride_w", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h", "type": "int32", "default": 0 }, + { "name": "pad_w ", "default": 0 }, + { "name": "bias", "default": 0 }, + { "name": "pad_type", "default": [] }, + { "name": "dialation_h", "type": "int32", "default": 0 }, + { "name": "dialation_w", "type": "int32", "default": 1 }, + { "name": "activation_type", "type": "int32", "default": 1 } + ] + }, + { + "name": "QuantizedPooling", + "identifier": 21, + "category": "Pool", + "attributes": [ + { "name": "pool_type", "default": 0 }, + { "name": "kernel_h", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_h", "default": 0 }, + { "name": "stride_w ", "default": 0 }, + { "name": "pad_h", "default": 0 }, + { "name": "pad_w", "default": 0 }, + { "name": "kernel_h_index", "default": 0 }, + { "name": "kernel_w_index", "default": 0 }, + { "name": "pad_type ", "default": 0 }, + { "name": "ceil_mode ", "default": 0 } + ] + }, + { + "name": "Reduce", + "identifier": 25, + "attributes": [ + { "name": "keep_dims", "default": 0 }, + { "name": "axis", "default": 0 } + ] + }, + { + "name": "ReLU", + "identifier": 26, + "category": "Activation" + }, + { + "name": "ReLU6", + "category": "Activation" + }, + { + "name": "Reorg", + "identifier": 55, + "attributes": [ + { "name": "stride", "default": 0 }, + { "name": "reverse", "default": 0 } + ] + }, + { + "name": "Requantize", + "identifier": 63 + }, + { + "name": "Reshape", + "identifier": 28, + "category": "Shape", + "attributes": [ + { "name": "axis", "default": 0 }, + { "name": "num_axes", "default": 4 }, + { "name": "top_blob_dim_size", "default": -233 }, + { "name": "shape", "type": "int32[]", "size": 2, "default": 233 }, + { "name": "reshape_type", "default": 0 } + ] + }, + { + "name": "RNN", + "identifier": 38, + "category": "Layer" + }, + { + "name": "ROIAlign", + "identifier": 61 + }, + { + "name": "ROIPooling", + "identifier": 27, + "attributes": [ + { "name": "pool_type", "default": 0 }, + { "name": "spatial_scale", "default": 0 }, + { "name": "pooled_w ", "default": 0 }, + { "name": "pooled_h", "default": 0 }, + { "name": "pooled_d", "default": 0 } + ] + }, + { + "name": "ROIPooling", + "identifier": 60, + "attributes": [ + { "name": "pool_type", "default": 0 }, + { "name": "pool_type", "type": "float32", "default": 0 }, + { "name": "pooled_w", "default": 0 }, + { "name": "pooled_h", "default": 0 }, + { "name": "pooled_d", "default": 0 } + ] + }, + { + "name": "Scale", + "identifier": 29, + "category": "Layer", + "attributes": [ + { "name": "axis", "default": 0, "visible": false }, + { "name": "num_axes", "default": 0, "visible": false }, + { "name": "bias_term", "default": 0, "visible": false } + ] + }, + { + "name": "SELU", + "identifier": 66, + "category": "Activation", + "attributes": [ + { "name": "alpha", "default": 0 }, + { "name": "gamma", "default": 0 } + ] + }, + { + "name": "Shuffle", + "identifier": 52, + "attributes": [ + { "name": "group", "default": 1 } + ] + }, + { + "name": "Sigmoid", + "identifier": 30, + "category": "Activation" + }, + { + "name": "Slice", + "identifier": 31, + "category": "Tensor", + "attributes": [ + { "name": "slices", "default": [] }, + { "name": "axis", "default": 1 } + ] + }, + { + "name": "Softmax", + "identifier": 32, + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "SoftmaxCaffe", + "identifier": 82, + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + }, + { + "name": "Splitv", + "identifier": 33, + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output", "option": "variadic" } + ] + }, + { + "name": "SplitV", + "identifier": 81, + "category": "Layer", + "attributes": [ + { "name": "axis", "default": 1 }, + { "name": "slice_count", "default": 1 } + ] + }, + { + "name": "SPP", + "identifier": 34, + "category": "Activation" + }, + { + "name": "Squeeze", + "identifier": 44, + "category": "Transform", + "attributes": [ + { "name": "axes_size", "default": 0 }, + { "name": "axes", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "StridedSlice", + "identifier": 76, + "category": "Tensor", + "attributes": [ + { "name": "start_size", "default": 0 }, + { "name": "start", "type": "int32[]", "size": "0" }, + { "name": "end_size", "default": 0 }, + { "name": "end", "type": "int32[]", "size": "0" }, + { "name": "step_size", "default": 0 }, + { "name": "step", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "StridedSliceV2", + "identifier": 87, + "category": "Tensor", + "attributes": [ + { "name": "start_size", "default": 0 }, + { "name": "start", "type": "int32[]", "size": "0" }, + { "name": "end_size", "default": 0 }, + { "name": "end", "type": "int32[]", "size": "0" }, + { "name": "axes_size", "default": 0 }, + { "name": "axes", "type": "int32[]", "size": "0" }, + { "name": "step_size", "default": 0 }, + { "name": "step", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "Sub", + "identifier": 77, + "category": "Layer", + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Sub", + "identifier": 80, + "attributes": [ + { "name": "weight_input_index", "default": 0 } + ] + }, + { + "name": "Tanh", + "identifier": 35, + "category": "Activation" + }, + { + "name": "Threshold", + "identifier": 36 + }, + { + "name": "Tile", + "identifier": 37 + }, + { + "name": "UnaryOp", + "identifier": 41 + }, + { + "name": "Unsqueeze", + "identifier": 85, + "category": "Shape", + "attributes": [ + { "name": "axes_size", "default": 0 }, + { "name": "axes", "type": "int32[]", "size": "0" } + ] + }, + { + "name": "Upsample", + "identifier": 78, + "category": "Data", + "attributes": [ + { "name": "type", "default": 0 }, + { "name": "scale_h", "type": "float32", "default": 0 }, + { "name": "scale_w", "type": "float32", "default": 0 }, + { "name": "align_corners", "default": 0 }, + { "name": "height", "default": 0 }, + { "name": "width", "default": 0 } + ] + }, + { + "name": "YoloDetectionOutput", + "identifier": 56 + }, + { + "name": "Yolov3DetectionOutput", + "identifier": 59 + } +] \ No newline at end of file diff --git a/tnn.js b/tnn.js new file mode 100644 index 00000000000..eda09c5974d --- /dev/null +++ b/tnn.js @@ -0,0 +1,642 @@ + +import * as base from './base.js'; +import * as text from './text.js'; + +const tnn = {}; + +tnn.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + const stream = context.stream; + if (stream && identifier.endsWith('.tnnproto')) { + try { + const buffer = stream.peek(); + const reader = text.Reader.open(buffer, 2048); + const content = reader.read(); + if (content !== undefined) { + const line = content.trim(); + if (line.startsWith('"') && line.endsWith('"')) { + const header = line.replace(/(^")|("$)/g, '').split(',').shift().trim().split(' '); + if (header.length === 3 || (header.length >= 4 && (header[3] === '4206624770' || header[3] == '4206624772'))) { + return 'tnn.model'; + } + } + } + } catch (err) { + // continue regardless of error + } + } + if (stream && identifier.endsWith('.tnnmodel')) { + for (const signature of [ [ 0x02, 0x00, 0xbc, 0xfa ], [ 0x04, 0x00, 0xbc, 0xfa ] ]) { + if (signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'tnn.params'; + } + } + } + return ''; + } + + async open(context, target) { + const metadata = await context.metadata('tnn-metadata.json'); + switch (target) { + case 'tnn.model': { + const name = `${context.identifier.substring(0, context.identifier.length - 9)}.tnnmodel`; + try { + const content = await context.fetch(name); + const buffer = content.stream.peek(); + return new tnn.Model(metadata, context.stream.peek(), buffer); + } catch (error) { + return new tnn.Model(metadata, context.stream.peek(), null); + } + } + case 'tnn.params': { + const name = `${context.identifier.substring(0, context.identifier.length - 9)}.tnnproto`; + const content = await context.fetch(name, null); + const buffer = content.stream.peek(); + return new tnn.Model(metadata, buffer, context.stream.peek()); + } + default: { + throw new tnn.Error(`Unsupported TNN format '${target}'.`); + } + } + } +}; + +tnn.Model = class { + + constructor(metadata, tnnproto, tnnmodel) { + this.format = 'TNN'; + this.graphs = [ + new tnn.Graph(metadata, tnnproto, tnnmodel) + ]; + } +}; + +tnn.Graph = class { + + constructor(metadata, tnnproto, tnnmodel) { + this.inputs = []; + this.outputs = []; + this.nodes = []; + const resources = new tnn.LayerResourceReader(tnnmodel); + const reader = new tnn.TextProtoReader(tnnproto); + const values = new Map(); + values.map = (name, type, tensor) => { + if (name.length === 0) { + return new tnn.Value(name, type || null, tensor || null); + } + if (!values.has(name)) { + values.set(name, new tnn.Value(name, type || null, tensor || null)); + } else if (type || tensor) { + throw new tnn.Value(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + for (const input of reader.inputs) { + const shape = new tnn.TensorShape(input.shape); + const type = new tnn.TensorType(input.data_type, shape); + this.inputs.push(new tnn.Argument(input.name, [ values.map(input.name, type) ])); + } + for (const output of reader.outputs) { + this.outputs.push(new tnn.Argument(output.name, [ values.map(output.name) ])); + } + for (const layer of reader.layers) { + this.nodes.push(new tnn.Node(metadata, resources, layer, values)); + } + } +}; + +tnn.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +tnn.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new tnn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +tnn.Node = class { + + constructor(metadata, resources, layer, values) { + this.inputs = []; + this.outputs = []; + this.attributes = []; + this.name = layer.name; + this.type = metadata.type(layer.type); + const attributeSchemas = this.type && this.type.attributes ? this.type && this.type.attributes.slice() : []; + const attributes = layer.attributes.slice(); + while (attributes.length > 0) { + const attributeSchema = attributeSchemas.shift(); + let value = null; + let name = ''; + if (attributeSchema && attributeSchema.type === 'int32[]' && attributeSchema.size) { + name = attributeSchema.name; + value = attributes.splice(0, layer.attr[attributeSchema.size]).map((attribute) => parseInt(attribute.value, 10)); + } else { + const attribute = attributes.shift(); + name = attribute.key; + value = attribute.value; + } + this.attributes.push(new tnn.Attribute(attributeSchema, name, value)); + } + + const inputs = layer.inputs; + let inputIndex = 0; + if (this.type && this.type.inputs) { + for (const inputDef of this.type.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => values.map(id)); + this.inputs.push(new tnn.Argument(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } else { + this.inputs.push(...inputs.slice(inputIndex).map((input, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new tnn.Argument(inputName, [ values.map(input) ]); + })); + } + + const outputs = layer.outputs; + let outputIndex = 0; + if (this.type && this.type.outputs) { + for (const outputDef of this.type.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => values.map(id)); + this.outputs.push(new tnn.Argument(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } else { + this.outputs.push(...outputs.slice(outputIndex).map((output, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new tnn.Argument(outputName, [ values.map(output) ]); + })); + } + const weight = (resource, name, shape) => { + const initializer = resource[name]; + if (!initializer) { + throw new tnn.Error(`Layer initializer'${resource.type}.${name}' not found '`); + } + const tensor = new tnn.Tensor(new tnn.TensorType(initializer.dataType, new tnn.TensorShape(shape)), initializer.value); + this.inputs.push(new tnn.Argument(name, [ values.map('', null, tensor) ])); + }; + switch (this.type.name) { + case 'Convolution': + case 'ConvolutionDepthWise': + case 'Deconvolution': + case 'DeconvolutionDepthWise': { + const resource = resources.read(this.name); + if (resource) { + const num_output = parseInt(layer.attr['2'] || 0, 10); + const kernel_w = parseInt(layer.attr['3'] || 0, 10); + const kernel_h = parseInt(layer.attr['4'] || kernel_w, 10); + const weight_data_size = resource.filter.length; + weight(resource, 'filter', [ num_output, weight_data_size / (num_output * kernel_w * kernel_h), kernel_w, kernel_h ]); + if (resource.bias) { + weight(resource, 'bias', [ num_output ]); + } + if (resource.quantized) { + weight(resource, 'quantized', [ num_output ]); + } + } + break; + } + case 'Conv3D':{ + const resource = resources.read(this.name); + if (resource) { + const num_output = parseInt(layer.attr['2'] || 0, 10); + const kernel_w = parseInt(layer.attr['3'] || 0, 10); + const kernel_h = parseInt(layer.attr['4'] || kernel_w, 10); + const kernel_d = parseInt(layer.attr['5'] || kernel_w, 10); + const weight_data_size = resource.filter.length; + weight(resource, 'weight', [ num_output, weight_data_size / (num_output * kernel_w * kernel_h * kernel_d), kernel_w, kernel_h, kernel_d ]); + if (resource.bias) { + weight(resources, 'bias', [ num_output ]); + } + } + break; + } + case 'InnerProduct': { + const resource = resources.read(this.name); + if (resource) { + const num_output = parseInt(layer.attr['0'] || 0, 10); + const weight_data_size = resource.weight.length; + weight(resource, 'weight', [ num_output, weight_data_size / num_output ]); + weight(resource, 'bias', [ num_output ]); + if (resource.weight.dataType === 'int8') { + weight(resource, 'scale', [ num_output ]); + } + } + break; + } + case 'PReLU': { + const resource = resources.read(this.name); + if (resource) { + weight(resource, 'slope', [ resource.slope.length ]); + } + break; + } + case 'BatchNormCxx': + case 'InstBatchNormCxx': { + const resource = resources.read(this.name); + if (resource) { + weight(resource, 'scale', [ resource.scale.length ]); + weight(resource, 'bias', [ resource.bias.length ]); + } + break; + } + case 'Div': + case 'Sub': + case 'Add': + case 'Mul': + case 'MatMul': { + if (this.inputs.length === 1) { + const resource = resources.read(this.name); + if (resource) { + const num_output = resource.slope.length; + weight(resource, 'slope', [ num_output ]); + } + } + break; + } + case 'HdrGuide': { + const resource = resources.read(this.name); + if (resource) { + const weight_size = resource.ccm_weight.length; + weight(resource, 'ccm_weight', [ weight_size ]); + weight(resource, 'ccm_bias', [ weight_size ]); + weight(resource, 'shifts', [ weight_size ]); + weight(resource, 'slopes', [ weight_size ]); + weight(resource, 'projection_weight', [ weight_size ]); + weight(resource, 'projection_bias', [ weight_size ]); + } + break; + } + case 'BlobScale': { + const resource = resources.read(this.name); + if (resource) { + const scale_data_size = resource.scale.length; + weight(resource, 'scale', [ scale_data_size]); + weight(resource, 'bias', [ scale_data_size ]); + } + break; + } + case 'Gather': { + const resource = resources.read(this.name); + if (resource) { + if (resource.data) { + weight(resource, 'data', [ resource.data.length ]); + } + if (resource.indices) { + weight(resource, 'indices', [ resource.indices.length ]); + } + } + break; + } + default: { + break; + } + } + } +}; + +tnn.Attribute = class { + + constructor(metadata, key, value) { + this.type = ''; + this.name = key.toString(); + this.value = value; + if (metadata) { + this.name = metadata.name; + if (metadata.type) { + this.type = metadata.type; + } + switch (this.type) { + case '': + break; + case 'int32': + this.value = parseInt(this.value, 10); + break; + case 'float32': + this.value = parseFloat(this.value); + break; + case 'int32[]': + this.value = this.value.map((v) => parseInt(v, 10)); + break; + case 'float32[]': + this.value = this.value.map((v) => parseFloat(v)); + break; + default: + throw new tnn.Error(`Unsupported attribute type '${this.type}'.`); + } + if (metadata && metadata.visible === false) { + this.visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (this.value == metadata.default || (this.value && this.value.toString() == metadata.default.toString())) { + this.visible = false; + } + } + } + } +}; + +tnn.Tensor = class { + + constructor(type, values) { + this.type = type; + this.values = values; + } +}; + +tnn.TensorType = class { + + constructor(dataType, shape) { + this.dataType = dataType || '?'; + this.shape = shape; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +tnn.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : ''; + } +}; + +tnn.TextProtoReader = class { + + constructor(buffer) { + const reader = text.Reader.open(buffer); + let lines = []; + for (;;) { + const line = reader.read(); + if (line === undefined) { + break; + } + lines.push(line.replace(/\r|"/g, '')); + } + const split = (line, delimiter, trim, ignore_blank) => { + return line.split(delimiter).map((v) => trim ? v.trim() : v).filter((v) => !ignore_blank || v); + }; + lines = split(lines.join(''), ',', true, false); + if (lines.length <= 5) { + throw new tnn.Error('Invalid line count.'); + } + const header = split(lines.shift(), ' ', true, false); + if (header.length < 3) { + throw new tnn.Error('Invalid header size.'); + } else if (header.length > 3 && (header[3] !== '4206624770' && header[3] !== '4206624772')) { + throw new tnn.Error(`Invalid signature '${header[3]}'.`); + } + this.inputs = split(lines.shift(), ':', true, false).map((input) => { + const array = split(input, ' ', true, false); + const name = array.shift(); + if (header[3] === '4206624772') { + const shape_size = parseInt(array.shift(), 10); + const data_type_index = parseInt(array[shape_size], 10); + return { + name: name, + data_type: [ 'float32', 'float16', 'int8', 'int32', 'bfloat16' ][data_type_index], + shape: array.slice(0, -1).map((dim) => parseInt(dim, 10)), + }; + + } + return { + name: name, + data_type: 'float32', + shape: array.map((dim) => parseInt(dim, 10)) + }; + }); + lines.shift(); + this.outputs = split(lines.shift(), ' ', true, false).map((output) => { + return { name: output }; + }); + lines.shift(); + this.layers = []; + while (lines.length > 0) { + const line = lines.shift().trim(); + if (line.length > 0) { + const array = split(line, ' ', true, true); + const layer = {}; + layer.type = array.shift(); + layer.name = array.shift(); + const inputCount = parseInt(array.shift(), 10); + const outputCount = parseInt(array.shift(), 10); + layer.inputs = array.splice(0, inputCount); + layer.outputs = array.splice(0, outputCount); + layer.attr = {}; + layer.attributes = []; + let count = 0; + for (const column of array) { + const parts = column.split(' '); + if (parts.length === 1) { + let key = count; + let value = parts.toString(); + const keyInt = parseInt(key, 10); + if (keyInt < 0) { + value = value.split(',').map((v) => v.trim()); + value.shift(); + key = (-(keyInt + 23300)).toString(); + } + layer.attr[key] = value; + layer.attributes.push({ key: key, value: value }); + count++; + } + } + this.layers.push(layer); + } + } + } +}; + +tnn.LayerResourceReader = class { + + constructor(buffer) { + this.layerResources = []; + if (buffer) { + const reader = new base.BinaryReader(buffer); + const magic_number = reader.uint32(); + if (magic_number !== 0xFABC0002 && magic_number !== 0xFABC0004) { + throw new tnn.Error(`Invalid blob header signature '${magic_number}'.`); + } + this.layerResources = new Array(reader.int32() & 0x1FFFFFFF); + const raw = (reader) => { + const magic_number = reader.uint32(); + if (magic_number !== 0xFABC0002 && magic_number !== 0xFABC0004) { + throw new tnn.Error(`Invalid raw signature '${magic_number}'.`); + } + const data_type = reader.int32(); + if (data_type > 4) { + throw new tnn.Error(`Unsupported data type '${data_type}'.`); + } + const length = reader.int32(); + if (length <= 0) { + return null; + } + let dims = null; + if (magic_number === 0xFABC0004) { + const dim_size = reader.int32(); + dims = reader.read(dim_size * 4); + } + return { + dataType: [ 'float32', 'float16', 'int8', 'int32', 'bfloat16' ][data_type], + length: length / [ 4, 2, 1, 4, 2 ][data_type], + value: reader.read(length), + shape: dims + }; + }; + const expect = (reader, name) => { + const content = reader.string(); + if (name !== content) { + throw new tnn.Error(`Invalid string '${content}' instead of '${name}'.`); + } + }; + for (let i = 0; i < this.layerResources.length; i++) { + const resource = {}; + resource.operator = reader.int32(); + resource.type = reader.string(); + resource.name = reader.string(); + switch (resource.type) { + case 'Convolution': + case 'ConvolutionDepthWise': + case 'Deconvolution': + case 'DeconvolutionDepthWise': { + expect(reader, resource.name); + const bias = reader.int32(); + resource.filter = raw(reader); + if (bias) { + resource.bias = raw(reader); + } + if (resource.filter.dataType === 'int8') { + resource.quantized = raw(reader); + } + break; + } + case 'Conv3D': { + expect(reader, resource.name); + const bias = reader.int32(); + resource.filter = raw(reader); + if (bias) { + resource.bias = raw(reader); + } + break; + } + case 'InnerProduct': { + expect(reader, resource.name); + resource.weight = raw(reader); + resource.bias = raw(reader); + if (resource.weight.dataType === 'int8') { + resource.scale = raw(reader); + } + break; + } + case 'PReLU': { + expect(reader, resource.name); + resource.slope = raw(reader); + break; + } + case 'Add': + case 'Div': + case 'Mul': + case 'Sub': + case 'MatMul': { + resource.slope = raw(reader); + break; + } + case 'BatchNormCxx': + case 'InstBatchNormCxx': + resource.scale = raw(reader); + resource.bias = raw(reader); + break; + case 'HdrGuide': + resource.ccm_weight = raw(reader); + resource.ccm_bias = raw(reader); + resource.shifts = raw(reader); + resource.slopes = raw(reader); + resource.projection_weight = raw(reader); + resource.projection_bias = raw(reader); + break; + case 'BlobScale': + resource.scale = raw(reader); + resource.bias = raw(reader); + break; + case 'Gather': { + // reader.expect(resource.name); + const has_data = reader.int32(); + if (has_data) { + resource.data = raw(reader); + } + const has_indices = reader.int32(); + if (has_indices) { + resource.indices = raw(reader); + } + break; + } + default: { + throw new tnn.Error(`Unsupported layer resource type '${resource.type}'.`); + } + } + this.layerResources[i] = resource; + } + if (reader.position !== reader.length) { + throw new tnn.Error("Invalid blob size."); + } + } + } + + read(name) { + const resource = this.layerResources.shift(); + if (resource && resource.name !== name) { + throw new tnn.Error(`Invalid blob layer name '${name}'.`); + } + return resource; + } +}; + +tnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TNN model.'; + } +}; + +export const ModelFactory = tnn.ModelFactory; diff --git a/torch-metadata.json b/torch-metadata.json new file mode 100644 index 00000000000..62130e1954a --- /dev/null +++ b/torch-metadata.json @@ -0,0 +1,471 @@ +[ + { + "name": "cudnn.BatchBRNNReLU", + "category": "Normalization" + }, + { + "name": "cudnn.BatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + }, + { + "name": "cudnn.BLSTM", + "category": "Layer" + }, + { + "name": "cudnn.LogSoftMax", + "category": "Activation" + }, + { + "name": "cudnn.ReLU", + "category": "Activation", + "attributes": [ + { "name": "threshold", "default": 0 }, + { "name": "val", "default": 0 }, + { "name": "inplace", "default": false, "visible": false }, + { "name": "mode", "default": "CUDNN_ACTIVATION_RELU" }, + { "name": "nElem", "visible": false } + ] + }, + { + "name": "cudnn.RNN", + "category": "Layer" + }, + { + "name": "cudnn.normal3DConv", + "category": "Layer" + }, + { + "name": "cudnn.SpatialAveragePooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING" }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "count_include_pad", "visible": false } + ] + }, + { + "name": "cudnn.SpatialBatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 }, + { "name": "mode", "default": "CUDNN_BATCHNORM_SPATIAL" }, + { "name": "nDim", "default": 4 }, + { "name": "__shareGradInputKey", "visible": false } + ] + }, + { + "name": "cudnn.SpatialConvolution", + "category": "Layer", + "attributes": [ + { "name": "benchmarked", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "fmode", "visible": false }, + { "name": "bwmode", "visible": false }, + { "name": "bdmode", "visible": false } + ] + }, + { + "name": "cudnn.SpatialFullConvolution", + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false } + ] + }, + { + "name": "cudnn.SpatialMaxPooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + }, + { + "name": "cudnn.VolumetricBatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + }, + { + "name": "cudnn.VolumetricMaxPooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false } + ] + }, + { + "name": "inn.SpatialMaxPooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + }, + { + "name": "nn.BatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + }, + { + "name": "nn.CAddTable" + }, + { + "name": "nn.Concat", + "category": "Tensor", + "attributes": [ + { "name": "outputSize", "visible": false } + ] + }, + { + "name": "nn.ConcatTable" + }, + { + "name": "nn.Contiguous" + }, + { + "name": "nn.Dropout", + "category": "Dropout", + "attributes": [ + { "name": "v2", "visible": false } + ] + }, + { + "name": "nn.FlattenTable" + }, + { + "name": "nn.Identity" + }, + { + "name": "nn.InstanceNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "nOutput", "visible": false }, + { "name": "prev_batch_size", "visible": false }, + { "name": "eps", "default": 0.00001 }, + { "name": "momentum", "default": 0.1 } + ] + }, + { + "name": "nn.LeakyReLU", + "category": "Activation", + "attributes": [ + { "name": "negval", "default": 0.01, "visible": false }, + { "name": "inplace", "default": false, "visible": false } + ] + }, + { + "name": "nn.Linear", + "category": "Layer" + }, + { + "name": "nn.LinearNoBias", + "category": "Layer" + }, + { + "name": "nn.LogSoftMax", + "category": "Activation" + }, + { + "name": "nn.Mean", + "attributes": [ + { "name": "squeeze", "default": true }, + { "name": "sizeAverage", "default": false }, + { "name": "dimension", "default": 1 }, + { "name": "nInputDims", "visible": false } + ] + }, + { + "name": "nn.MM" + }, + { + "name": "nn.MulConstant", + "attributes": [ + { "name": "inplace", "default": false, "visible": false } + ] + }, + { + "name": "nn.Normalize", + "category": "Normalization", + "attributes": [] + }, + { + "name": "nn.Normalize2", + "category": "Normalization", + "attributes": [] + }, + { + "name": "nn.PReLU", + "category": "Activation" + }, + { + "name": "nn.ReLU", + "category": "Activation", + "attributes": [ + { "name": "threshold", "default": 0 }, + { "name": "val", "default": 0 }, + { "name": "inplace", "default": false, "visible": false }, + { "name": "mode", "default": "CUDNN_ACTIVATION_RELU" }, + { "name": "nElem", "visible": false } + ] + }, + { + "name": "nn.Reshape", + "category": "Shape", + "attributes": [ + { "name": "nelement", "visible": false } + ] + }, + { + "name": "nn.ScaleTable" + }, + { + "name": "nn.SelectTable" + }, + { + "name": "nn.Sequencer" + }, + { + "name": "nn.ShaveImage" + }, + { + "name": "nn.Sigmoid", + "category": "Activation", + "attributes": [ + { "name": "mode", "default": "CUDNN_ACTIVATION_SIGMOID" }, + { "name": "nElem", "visible": false } + ] + }, + { + "name": "nn.SoftMax", + "category": "Activation" + }, + { + "name": "nn.SpatialAveragePooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING" }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "count_include_pad", "visible": false } + ] + }, + { + "name": "nn.SpatialBatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 }, + { "name": "mode", "default": "CUDNN_BATCHNORM_SPATIAL" }, + { "name": "nDim", "default": 4 }, + { "name": "__shareGradInputKey", "visible": false } + ] + }, + { + "name": "nn.SpatialConvolution", + "category": "Layer", + "attributes": [ + { "name": "benchmarked", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "fmode", "visible": false }, + { "name": "bwmode", "visible": false }, + { "name": "bdmode", "visible": false } + ] + }, + { + "name": "nn.SpatialConvolutionMM", + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false } + ] + }, + { + "name": "nn.SpatialCrossMapLRN", + "category": "Normalization", + "attributes": [] + }, + { + "name": "nn.SpatialDilatedConvolution", + "category": "Layer", + "attributes": [ + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false } + ] + }, + { + "name": "nn.SpatialDropout", + "category": "Dropout" + }, + { + "name": "nn.SpatialFractionalMaxPooling", + "category": "Pool" + }, + { + "name": "nn.SpatialFullConvolution", + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false } + ] + }, + { + "name": "nn.SpatialMaxPooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + }, + { + "name": "nn.SpatialMaxUnpooling", + "category": "Pool", + "attributes": [ + { "name": "oheight", "visible": false }, + { "name": "owidth", "visible": false } + ] + }, + { + "name": "nn.SpatialReflectionPadding", + "category": "Tensor", + "attributes": [] + }, + { + "name": "nn.SpatialReplicationPadding", + "category": "Tensor", + "attributes": [] + }, + { + "name": "nn.SpatialSubtractiveNormalization", + "category": "Normalization" + }, + { + "name": "nn.SpatialZeroPadding", + "category": "Tensor", + "attributes": [] + }, + { + "name": "nn.SplitTable" + }, + { + "name": "nn.Squeeze", + "category": "Transform" + }, + { + "name": "nn.Tanh", + "category": "Activation", + "attributes": [ + { "name": "mode", "default": "CUDNN_ACTIVATION_TANH" }, + { "name": "nElem", "visible": false } + ] + }, + { + "name": "nn.TotalVariation" + }, + { + "name": "nn.Transpose", + "category": "Transform" + }, + { + "name": "nn.View" + }, + { + "name": "nn.VolumetricAveragePooling", + "category": "Pool" + }, + { + "name": "nn.VolumetricBatchNormalization", + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + }, + { + "name": "nn.VolumetricConvolution", + "category": "Layer" + }, + { + "name": "nn.VolumetricMaxPooling", + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false } + ] + }, + { + "name": "w2nn.ScaleTable" + } +] \ No newline at end of file diff --git a/torch.js b/torch.js new file mode 100644 index 00000000000..568c1bc48f1 --- /dev/null +++ b/torch.js @@ -0,0 +1,1134 @@ + +import * as base from './base.js'; + +const torch = {}; + +torch.ModelFactory = class { + + match(context) { + return torch.T7Reader.open(context); + } + + async open(context, target) { + const metadata = await context.metadata('torch-metadata.json'); + const reader = target; + reader.callback = (name) => { + if (name && name != 'nn.JointTrainModule' && !name.startsWith('nn.MSDNet_') && !name.startsWith('onmt.')) { + context.exception(new torch.Error(`Unsupported type '${name}'.`)); + } + return null; + }; + const obj = reader.read(); + let graphs = []; + if (obj && Array.isArray(obj) && obj.length >= 2 && + obj.slice(0, obj.length - 1).every((item) => item.__class__) && + !obj[obj.length - 1].__class__) { + graphs = obj.slice(0, obj.length - 1); + } else { + graphs = [ obj ]; + } + return new torch.Model(metadata, graphs); + } +}; + +torch.Model = class { + + constructor(metadata, graphs) { + this.format = 'Torch v7'; + this.graphs = graphs.map((graph, index) => new torch.Graph(metadata, index.toString(), graph)); + } +}; + +torch.Graph = class { + + constructor(metadata, name, root) { + this.name = name; + this.inputs = []; + this.outputs = []; + this.nodes = []; + this.groups = 'false'; + const values = new Map(); + values.map = (name, type, tensor) => { + if (name.length === 0 && tensor) { + return new torch.Value(name, type || null, tensor || null); + } + if (!values.has(name)) { + values.set(name, new torch.Value(name, type || null, tensor || null)); + } else if (type || tensor) { + throw new torch.Error(`Duplicate value '${name}'.`); + } + return values.get(name); + }; + if (Object.prototype.hasOwnProperty.call(root, 'model')) { + root = root.model; + } + const loadModule = (metadata, module, groups, key, inputs, outputs) => { + if (groups.length > 0) { + this.groups = true; + } + const type = module.__class__ ? `${module.__class__.__module__}.${module.__class__.__name__}` : ''; + switch (type) { + case 'nn.Sequential': { + groups.push(key); + let subInputs = inputs; + let subOutputs = []; + const length = module.modules.length; + let index = 0; + for (const subModule of module.modules) { + if (index == length - 1) { + subOutputs = outputs; + } + loadModule(metadata, subModule, groups, index.toString(), subInputs, subOutputs); + subInputs = subOutputs; + subOutputs = []; + index++; + } + groups.pop(); + break; + } + case 'nn.Parallel': + case 'nn.ParallelTable': + case 'nn.JointTrain': { + groups.push(key); + let newInputs = []; + let newOutputs = []; + let index = 0; + for (const subModule of module.modules) { + const subInputs = [].concat(inputs); + const subOutputs = [].concat(outputs); + loadModule(metadata, subModule, groups, index.toString(), subInputs, subOutputs); + if (inputs.length == 0) { + newInputs = newInputs.concat(subInputs); + } + if (outputs.length == 0) { + newOutputs = newOutputs.concat(subOutputs); + } + index++; + } + // inputs = inputs.concat(newInputs); + for (const newOutput of newOutputs) { + outputs.push(newOutput); + } + groups.pop(); + break; + } + case 'nn.Concat': + case 'nn.ConcatTable': { + const prefix = key; + if (inputs.length == 0) { + inputs.push(values.map(`${groups.join('/')}:${key}:in`, null, null)); + } + let concatInputs = []; + let index = 0; + for (const subModule of module.modules) { + const streamInputs = inputs.map((input) => input); + const streamOutputs = []; + loadModule(metadata, subModule, groups, `${prefix}.${index}`, streamInputs, streamOutputs); + concatInputs = concatInputs.concat(streamOutputs); + index++; + } + delete module.modules; + delete module.dimension; + const node = new torch.Node(metadata, module, groups, key, inputs, outputs, values); + this.nodes.push(node); + break; + } + case 'nn.Inception': { + delete module.modules; // TODO + delete module.module; // TODO + delete module.transfer; // TODO + delete module.pool; // TODO + const node = new torch.Node(metadata, module, groups, key, inputs, outputs, values); + this.nodes.push(node); + break; + } + case 'nn.gModule': { + /* + let index = 0; + for (const subModule of module.modules) { + subModule.modules = []; + this.loadModule(metadata, subModule, groups, index.toString(), [], []); + index++; + } + */ + const node = new torch.Node(metadata, module, groups, key, inputs, outputs, values); + this.nodes.push(node); + break; + } + default: { + const node = new torch.Node(metadata, module, groups, key, inputs, outputs, values); + this.nodes.push(node); + break; + } + } + }; + const inputs = []; + const outputs = []; + loadModule(metadata, root, [], '', inputs, outputs); + this.inputs = this.inputs.concat(inputs.map((input, index) => { + return new torch.Argument(`input${index != 0 ? (index + 1).toString() : ''}`, [ input ]); + })); + this.outputs = this.outputs.concat(outputs.map((output, index) => { + return new torch.Argument(`output${index != 0 ? (index + 1).toString() : ''}`, [ output ]); + })); + } +}; + +torch.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +torch.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new torch.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = initializer ? initializer.type : type; + this.initializer = initializer; + } +}; + +torch.Node = class { + + constructor(metadata, module, groups, name, inputs, outputs, values) { + this.group = groups.join('/'); + if (module.name && typeof module.name === 'string') { + this.name = module.name; + delete module.name; + } else { + this.name = this.group ? (`${this.group}:${name}`) : name; + } + const type = module.__class__ ? `${module.__class__.__module__}.${module.__class__.__name__}` : 'nn.Module'; + this.type = metadata.type(type); + let initializers = []; + for (const [key, obj] of Object.entries(module)) { + if (obj && obj.__class__ && obj.__class__.__module__ === 'torch' && obj.__class__.__name__.endsWith('Storage')) { + module[key] = obj.data(); + } + } + delete module.iSize; + delete module.finput; + delete module.fgradInput; + delete module.output; + delete module.gradInput; + delete module.gradWeight; + delete module.gradBias; + delete module.grad_tmp; + delete module.scaleT; + delete module._input; + delete module._output; + delete module._gradInput; + delete module._gradOutput; + delete module.buffer; + delete module.buffer2; + delete module.tmp_in; + delete module.tmp_out; + delete module.accUpdateGradParameters; + switch (this.type.name) { + case 'nn.Linear': + delete module.addBuffer; + break; + case 'nn.Normalize': + case 'nn.Normalize2': + delete module.addBuffer; + delete module.normp; + delete module.norm; + break; + case 'cudnn.SpatialConvolution': + case 'cudnn.SpatialFullConvolution': + case 'nn.SpatialConvolution': + case 'nn.SpatialConvolutionMM': + case 'nn.SpatialConvolution1_fw': + case 'nn.SpatialDilatedConvolution': + case 'nn.SpatialFullConvolution': + delete module.ones; + delete module.input_slice; + delete module.output_slice; + delete module.convDescData; + this._updateSize(module, 'adj'); + this._updateSize(module, 'd'); + this._updateSize(module, 'dilation'); + this._updateSize(module, 'k'); + this._updateSize(module, 'pad'); + break; + case 'cudnn.BatchNormalization': + case 'cudnn.SpatialBatchNormalization': + case 'nn.BatchNormalization': + case 'nn.SpatialBatchNormalization': + case 'nn.InstanceNormalization': + delete module.save_mean; + delete module.save_std; + delete module.gradWeight; + delete module.normalized; + delete module.centered; + delete module.bn; // TODO InstanceNormalization + break; + case 'nn.SpatialCrossMapLRN': + delete module.scale; + break; + case 'cudnn.SpatialMaxPooling': + case 'cudnn.SpatialAveragePooling': + case 'inn.SpatialMaxPooling': + case 'nn.SpatialMaxPooling': + case 'nn.SpatialAveragePooling': + delete module.indices; + this._updateSize(module, 'pad'); + this._updateSize(module, 'd'); + this._updateSize(module, 'k'); + break; + case 'nn.SpatialZeroPadding': + case 'nn.SpatialReflectionPadding': + case 'nn.SpatialReplicationPadding': + this._updateBox(module, 'pad'); + break; + case 'nn.Dropout': + delete module.noise; + break; + case 'nn.gModule': + delete module.forwardnodes; + delete module.backwardnodes; + break; + case 'nn.StereoJoin': + delete module.output_L; + break; + default: + break; + } + this.attributes = []; + if (module.__class__) { + for (const [key, obj] of Object.entries(module)) { + if (key == '_type') { + continue; + } + if (Array.isArray(obj) && obj.every(((item) => item && item.__class__ && item.__class__.__module__ === 'nn'))) { + continue; + } + if (obj.__class__ && obj.__class__.__module__ === 'torch' && obj.__class__.__name__.endsWith('Tensor')) { + initializers.push(new torch.Argument(key, [ values.map('', null, new torch.Tensor(obj)) ])); + continue; + } + if (key == 'modules') { + continue; + } + if (obj.__class__ && obj.__class__.__module__ !== '' && obj.__class__.__name__ != 'LuaFunction') { + continue; + } + const attribute = new torch.Attribute(metadata, type, key, obj); + this.attributes.push(attribute); + } + } + this.inputs = []; + if (inputs.length == 0 && this.name) { + inputs.push(values.map(`${this.name}:in`)); + } + this.inputs.push(new torch.Argument('input', inputs)); + if (outputs.length == 0 && this.name) { + outputs.push(values.map(this.name)); + } + this.outputs = []; + this.outputs.push(new torch.Argument('output', outputs)); + initializers = initializers.filter((argument) => { + if (argument.name == 'weight') { + this.inputs.push(argument); + return false; + } + return true; + }); + initializers = initializers.filter((argument) => { + if (argument.name == 'bias') { + this.inputs.push(argument); + return false; + } + return true; + }); + this.inputs = this.inputs.concat(initializers); + } + + _updateSize(module, name) { + if (Object.prototype.hasOwnProperty.call(module, `${name}W`) && + Object.prototype.hasOwnProperty.call(module, `${name}H`)) { + module[name] = [ module[`${name}W`], module[`${name}H`] ]; + delete module[`${name}W`]; + delete module[`${name}H`]; + } + } + + _updateBox(module, name) { + if (Object.prototype.hasOwnProperty.call(module, `${name}_t`) && + Object.prototype.hasOwnProperty.call(module, `${name}_r`) && + Object.prototype.hasOwnProperty.call(module, `${name}_b`) && + Object.prototype.hasOwnProperty.call(module, `${name}_l`)) { + module[name] = [ module[`${name}_t`], module[`${name}_r`], module[`${name}_b`], module[`${name}_l`] ]; + delete module[`${name}_t`]; + delete module[`${name}_r`]; + delete module[`${name}_b`]; + delete module[`${name}_l`]; + } + } +}; + +torch.Attribute = class { + + constructor(metadata, type, name, value) { + this.name = name; + this.value = value; + if (name == 'train') { + this.visible = false; + } + metadata = metadata.attribute(type, name); + if (metadata) { + if (metadata.visible === false) { + this.visible = false; + } else if (Object.prototype.hasOwnProperty.call(metadata, 'default')) { + if (JSON.stringify(metadata.default) == JSON.stringify(this.value)) { + this.visible = false; + } + } + } + } +}; + +torch.Tensor = class { + + constructor(tensor) { + this.type = new torch.TensorType(tensor); + this.encoding = '|'; + this._storage = tensor.storage; + this._offset = tensor.storage_offset; + } + + get values() { + if (this.type.shape.dimensions.length === 0) { + return []; + } + if (this._storage) { + const data = this._storage.data(); + if (data) { + const size = this.type.shape.dimensions.reduce((a, b) => a * b, 1); + return data.slice(this._offset, this._offset + size); + } + } + return null; + } +}; + +torch.TensorType = class { + + constructor(tensor) { + this.dataType = tensor.dataType; + this.shape = new torch.TensorShape(tensor.size); + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +torch.TensorShape = class { + + constructor(dimensions) { + this.dimensions = dimensions; + } + + toString() { + if (this.dimensions) { + if (this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } + return ''; + } +}; + +torch.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Torch model.'; + } +}; + +torch.T7Reader = class { + + static open(context) { + const stream = context.stream; + if (stream && stream.length >= 4 && stream.peek(4).every((value, index) => value === 0x00 || (index == 0 && value <= 0x08))) { + const reader = new torch.BinaryReader(stream); + return new torch.T7Reader(reader); + } + if (stream && stream.length >= 2) { + const buffer = stream.peek(2); + const value = String.fromCharCode(stream.peek(1)[0]); + if (buffer[1] === 0x0a && (value >= '0' && value <= '8')) { + const reader = new torch.TextReader(stream); + return new torch.T7Reader(reader); + } + } + return null; + } + + constructor(reader) { + this._reader = reader; + this._memo = new Map(); + this._types = new Map(); + const Storage = class { + constructor(dataType, itemSize) { + this.dataType = dataType; + this.itemSize = itemSize; + } + read(reader) { + this.size = reader.int64(); + this.reader = reader.storage(this.size, this.itemSize, this.dataType); + } + data() { + if (this.reader) { + const reader = this.reader; + reader.seek(0); + const dataType = this.dataType; + const size = this.size; + const array = new Array(size); + for (let i = 0; i < size; i++) { + switch (dataType) { + case 'uint8': + array[i] = reader.byte(); + break; + case 'int8': + array[i] = reader.int8(); + break; + case 'int16': + array[i] = reader.int16(); + break; + case 'int32': + array[i] = reader.int32(); + break; + case 'int64': + array[i] = reader.int64(); + break; + case 'float32': + array[i] = reader.float32(); + break; + case 'float64': + array[i] = reader.float64(); + break; + default: + throw new torch.Error(`Unsupported data type '${dataType}'.`); + } + } + this._data = array; + delete this.reader; + } + return this._data; + } + }; + const Tensor = class { + constructor(dataType) { + this.dataType = dataType; + } + read(reader) { + const dim = reader.int32(); + this.size = reader.int64s(dim); + this.stride = reader.int64s(dim); + this.storage_offset = reader.int64() - 1; + this.storage = reader.read(); + } + }; + this.register('bnn.Binary'); + this.register('bnn.SpatialConvolution'); + this.register('cudnn.BatchNormalization'); + this.register('cudnn.BatchBRNNReLU'); + this.register('cudnn.BLSTM'); + this.register('cudnn.ReLU'); + this.register('cudnn.RNN'); + this.register('cudnn.Sigmoid'); + this.register('cudnn.SoftMax'); + this.register('cudnn.LogSoftMax'); + this.register('cudnn.normal3DConv'); + this.register('cudnn.normal3DdeConv'); + this.register('cudnn.SpatialAveragePooling'); + this.register('cudnn.SpatialBatchNormalization'); + this.register('cudnn.SpatialConvolution'); + this.register('cudnn.SpatialFullConvolution'); + this.register('cudnn.SpatialMaxPooling'); + this.register('cudnn.SpatialSoftMax'); + this.register('cudnn.Tanh'); + this.register('cudnn.VolumetricAveragePooling'); + this.register('cudnn.VolumetricBatchNormalization'); + this.register('cudnn.VolumetricConvolution'); + this.register('cudnn.VolumetricMaxPooling'); + this.register('Dict'); + this.register('inn.ConstAffine'); + this.register('inn.SpatialMaxPooling'); + this.register('nn.Abs'); + this.register('nn.AddConstant'); + this.register('nn.BatchNormalization'); + this.register('nn.BilinearSamplerBHWD'); + this.register('nn.BinActiveZ'); // allenai/XNOR-Net + this.register('nn.BCECriterion'); + this.register('nn.Bottle'); + this.register('nn.Clamp'); + this.register('nn.CMul'); + this.register('nn.CAddTable'); + this.register('nn.CDivTable'); + this.register('nn.CMulTable'); + this.register('nn.CSubTable'); + this.register('nn.Concat'); + this.register('nn.Copy'); + this.register('nn.ConcatTable'); + this.register('nn.Contiguous'); + this.register('nn.Constant'); + this.register('nn.CostVolMulti'); + this.register('nn.DataParallelTable'); + this.register('nn.DepthConcat'); + this.register('nn.Dropout'); + this.register('nn.Exp'); + this.register('nn.ExpOut'); + this.register('nn.FlattenTable'); + this.register('nn.GenNoise'); + this.register('nn.Identity'); + this.register('nn.Index'); + this.register('nn.Inception'); + this.register('nn.InstanceNormalization'); + this.register('nn.JoinTable'); + this.register('nn.JointTrain'); + this.register('nn.KeypointCoordinate'); + this.register('nn.LeakyReLU'); + this.register('nn.Linear'); + this.register('nn.LinearNoBias'); + this.register('nn.LogSoftMax'); + this.register('nn.LookupTable'); + this.register('nn.LSTM'); + this.register('nn.MaskZero'); + this.register('nn.MapTable'); + this.register('nn.Max'); + this.register('nn.Mean'); + this.register('nn.Min'); + this.register('nn.MulConstant'); + this.register('nn.MM'); + this.register('nn.MSECriterion'); + this.register('nn.Narrow'); + this.register('nn.NarrowTable'); + this.register('nn.Normalize'); + this.register('nn.Normalize2'); + this.register('nn.NoiseFill'); + this.register('nn.Padding'); + this.register('nn.Parallel'); + this.register('nn.ParallelCriterion'); + this.register('nn.ParallelTable'); + this.register('nn.PixelShuffle'); + this.register('nn.Power'); + this.register('nn.PReLU'); + this.register('nn.Recursor'); + this.register('nn.ReLU'); + this.register('nn.Replicate'); + this.register('nn.Reshape'); + this.register('nn.ShaveImage'); + this.register('nn.Select'); + this.register('nn.SelectTable'); + this.register('nn.Sequencer'); + this.register('nn.Sequential'); + this.register('nn.Sigmoid'); + this.register('nn.Sum'); + this.register('nn.SoftMax'); + this.register('nn.SpatialAveragePooling'); + this.register('nn.SpatialBatchNormalization'); + this.register('nn.SpatialConvolution'); + this.register('nn.SpatialConvolution1_fw'); + this.register('nn.SpatialConvolutionMM'); + this.register('nn.SpatialCrossMapLRN'); + this.register('nn.SpatialDilatedConvolution'); + this.register('nn.SpatialDropout'); + this.register('nn.SpatialFractionalMaxPooling'); + this.register('nn.SpatialFullConvolution'); + this.register('nn.SpatialLPPooling'); + this.register('nn.SpatialMaxPooling'); + this.register('nn.SpatialMaxUnpooling'); + this.register('nn.SpatialReflectionPadding'); + this.register('nn.SpatialReplicationPadding'); + this.register('nn.SpatialSoftMax'); + this.register('nn.SpatialSubtractiveNormalization'); + this.register('nn.SpatialUpSamplingBilinear'); + this.register('nn.SpatialUpSamplingNearest'); + this.register('nn.SpatialZeroPadding'); + this.register('nn.SplitTable'); + this.register('nn.Squeeze'); + this.register('nn.Square'); + this.register('nn.Sqrt'); + this.register('nn.StereoJoin'); + this.register('nn.Tanh'); + this.register('nn.Transpose'); + this.register('nn.TotalVariation'); + this.register('nn.Unpool'); + this.register('nn.View'); + this.register('nn.gModule'); + this.register('nngraph.Node'); + this.register('graph.Edge'); + this.register('graph.Graph'); + this.register('torch.ByteTensor', class extends Tensor { + constructor() { + super('uint8'); + } + }); + this.register('torch.CharTensor', class extends Tensor { + constructor() { + super('int8'); + } + }); + this.register('torch.ShortTensor', class extends Tensor { + constructor() { + super('int16'); + } + }); + this.register('torch.IntTensor', class extends Tensor { + constructor() { + super('int32'); + } + }); + this.register('torch.LongTensor', class extends Tensor { + constructor() { + super('int64'); + } + }); + this.register('torch.FloatTensor', class extends Tensor { + constructor() { + super('float32'); + } + }); + this.register('torch.DoubleTensor', class extends Tensor { + constructor() { + super('float64'); + } + }); + this.register('torch.CudaByteTensor', class extends Tensor { + constructor() { + super('uint8'); + } + }); + this.register('torch.CudaCharTensor', class extends Tensor { + constructor() { + super('int8'); + } + }); + this.register('torch.CudaShortTensor', class extends Tensor { + constructor() { + super('int16'); + } + }); + this.register('torch.CudaIntTensor', class extends Tensor { + constructor() { + super('int32'); + } + }); + this.register('torch.CudaLongTensor', class extends Tensor { + constructor() { + super('int64'); + } + }); + this.register('torch.CudaTensor', class extends Tensor { + constructor() { + super('float32'); + } + }); + this.register('torch.CudaDoubleTensor', class extends Tensor { + constructor() { + super('float64'); + } + }); + this.register('torch.ByteStorage', class extends Storage { + constructor() { + super('uint8', 1); + } + }); + this.register('torch.CharStorage', class extends Storage { + constructor() { + super('int8', 1); + } + }); + this.register('torch.ShortStorage', class extends Storage { + constructor() { + super('int16', 2); + } + }); + this.register('torch.IntStorage', class extends Storage { + constructor() { + super('int32', 4); + } + }); + this.register('torch.LongStorage', class extends Storage { + constructor() { + super('int64', 8); + } + }); + this.register('torch.FloatStorage', class extends Storage { + constructor() { + super('float32', 4); + } + }); + this.register('torch.DoubleStorage', class extends Storage { + constructor() { + super('float64', 8); + } + }); + this.register('torch.CudaByteStorage', class extends Storage { + constructor() { + super('uint8', 1); + } + }); + this.register('torch.CudaCharStorage', class extends Storage { + constructor() { + super('int8', 1); + } + }); + this.register('torch.CudaShortStorage', class extends Storage { + constructor() { + super('int16', 2); + } + }); + this.register('torch.CudaIntStorage', class extends Storage { + constructor() { + super('int32', 4); + } + }); + this.register('torch.CudaLongStorage', class extends Storage { + constructor() { + super('int64', 8); + } + }); + this.register('torch.CudaIntStorage', class extends Storage { + constructor() { + super('int32', 4); + } + }); + this.register('torch.CudaStorage', class extends Storage { + constructor() { + super('float32', 4); + } + }); + this.register('torch.CudaFloatStorage', class extends Storage { + constructor() { + super('float64', 8); + } + }); + this.register('w2nn.AuxiliaryLossTable'); + this.register('w2nn.InplaceClip01'); + this.register('w2nn.ScaleTable'); + this.register('LuaFunction', class { + constructor(size, dumped, upvalues) { + this.size = size; + this.dumped = dumped; + this.upvalues = upvalues; + } + }); + } + + register(name, type) { + type = type || class {}; + const parts = name.split('.'); + type.__name__ = parts.pop(); + type.__module__ = parts.join('.'); + type.prototype.__class__ = type; + this._types.set(name, type); + } + + read() { + const type = this.int32(); + switch (type) { + case 0: return null; + case 1: return this.float64(); + case 2: return this.string(); + case 3: return this.table(); + case 4: return this.object(); + case 5: return this.boolean(); + case 6: return this.function(); + case 7: return this.function(); + case 8: return this.function(); + default: throw new torch.Error(`File format has invalid type '${type}'.`); + } + } + + boolean() { + return this._reader.boolean(); + } + + int32() { + return this._reader.int32(); + } + + int64() { + return this._reader.int64(); + } + + int64s(size) { + return this._reader.int64s(size); + } + + float64() { + return this._reader.float64(); + } + + string() { + return this._reader.string(); + } + + object() { + const index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + + let version = this.string(); + let name = null; + if (version.startsWith('V ')) { + name = this.string(); + version = Number(version.split(' ')[1]); + } else { + name = version; + version = 0; + } + + if (!this._types.has(name)) { + this.callback(name); + this.register(name); + } + const type = this._types.get(name); + const obj = Reflect.construct(type, []); + this._memo.set(index, obj); + if (obj.read) { + obj.read(this, version); + } else { + const attributes = this.read(); + if (attributes != null) { + for (const [key, value] of Object.entries(attributes)) { + obj[key] = value; + } + } + } + return obj; + } + + table() { + const index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + const table = {}; + this._memo.set(index, table); + const size = this.int32(); + let convert = true; + let sum = 0; + for (let i = 0; i < size; i++) { + const key = this.read(); + const value = this.read(); + table[key] = value; + if (Number.isInteger(key) && key >= 0) { + sum += key; + } else { + convert = false; + } + } + const n = Object.keys(table).length; + if (convert && (n * (n + 1)) == (2 * sum)) { + const list = []; + for (let j = 0; j < n; j++) { + let item = table[j + 1]; + if (item == table) { + item = list; + } + list.push(item); + } + this._memo.set(index, list); + return list; + } + return table; + } + + function() { + const index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + const size = this.int32(); + const dumped = this._reader.read(size); + const upvalues = this.read(); + const type = this._types.get('LuaFunction'); + const obj = Reflect.construct(type, [ size, dumped, upvalues ]); + this._memo.set(index, obj); + return obj; + } + + storage(size, itemSize, dataType) { + return this._reader.storage(size, itemSize, dataType); + } +}; + +torch.BinaryReader = class extends base.BinaryReader { + + constructor(data) { + super(data instanceof Uint8Array ? data : data.peek(), true); + this._textDecoder = new TextDecoder('ascii'); + } + + boolean() { + return this.int32() == 1; + } + + read(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + int64s(size) { + const array = []; + for (let i = 0; i < size; i++) { + array.push(this.int64()); + } + return array; + } + + string() { + const size = this.int32(); + const buffer = this.read(size); + return this._textDecoder.decode(buffer); + } + + storage(size, itemSize) { + const buffer = this.read(size * itemSize); + return new torch.BinaryReader(buffer); + } +}; + +torch.TextReader = class { + + constructor(data, separator) { + this._buffer = data instanceof Uint8Array ? data : data.peek(); + this._position = 0; + this._dataView = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + this._textDecoder = new TextDecoder('ascii'); + this._separator = separator || 0x0a; + } + + seek(position) { + this._position = position; + } + + line(size) { + const start = this._position; + while (this._position < this._buffer.length && size > -1) { + const c = this._buffer[this._position++]; + if (c == this._separator) { + return this._buffer.slice(start, this._position - 1); + } else if (this._position == this._buffer.length) { + return this._buffer.slice(start, this._position); + } + size--; + } + throw new torch.Error('Line exceeded maximum length.'); + } + + boolean() { + return this.int32() == 1; + } + + read(size) { + return this.line(size); + } + + int8() { + return this.int64(); + } + + int16() { + return this.int64(); + } + + int32() { + return this.int64(); + } + + int64() { + const token = this._textDecoder.decode(this.line(20)); + const number = Number.parseInt(token, 10); + if (Number.isNaN(token - number)) { + throw new torch.Error(`Couldn't parse int64 '${token}'.`); + } + return number; + } + + int64s(size) { + const array = []; + if (size > 0) { + const content = this._textDecoder.decode(this.line(Number.MAX_SAFE_INTEGER)); + for (const token of content.split(' ')) { + const number = Number.parseInt(token, 10); + if (Number.isNaN(token - number)) { + throw new torch.Error(`Couldn't parse int64 '${token}'.`); + } + array.push(number); + } + } + return array; + } + + float32() { + return this.float64(); + } + + float64() { + const token = this._textDecoder.decode(this.line(24)); + if (token.startsWith('-nan')) { + return -NaN; + } + if (token.startsWith('nan')) { + return NaN; + } + if (token.startsWith('inf')) { + return Infinity; + } + if (token.startsWith('-inf')) { + return -Infinity; + } + const number = Number.parseFloat(token); + if (Number.isNaN(token - number)) { + throw new torch.Error(`Couldn't parse float '${token}'.`); + } + return number; + } + + string() { + const size = this.int32(); + if (size == 0) { + return ''; + } + const data = this.line(size); + const content = this._textDecoder.decode(data); + if (size != content.length) { + throw new torch.Error('Invalid string length.'); + } + return content; + } + + storage(size, itemSize, dataType) { + if (size <= 0) { + throw new torch.Error(`Unsupported storage size '${size}'.`); + } + if (dataType === 'uint8') { + const start = this._position; + this._position += size; + const bytes = this._buffer.slice(start, this._position); + this.line(0); + return new torch.BinaryReader(bytes); + } + const data = this.line(Number.MAX_SAFE_INTEGER); + return new torch.TextReader(data, 0x20); + } +}; + +export const ModelFactory = torch.ModelFactory; + diff --git a/uff-metadata.json b/uff-metadata.json new file mode 100644 index 00000000000..ad9f28a035e --- /dev/null +++ b/uff-metadata.json @@ -0,0 +1,144 @@ +[ + { + "name": "_FlattenConcat_TRT", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "list": true } + ] + }, + { + "name": "_GridAnchor_TRT", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "_MaxPool", + "category": "Pool", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "_NMS_TRT", + "inputs": [ + { "name": "input" }, + { "name": "?" }, + { "name": "?" } + ] + }, + { + "name": "Activation", + "category": "Activation", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "BatchNorm", + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "moving_mean" }, + { "name": "moving_variance" } + ] + }, + { + "name": "Binary", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ] + }, + { + "name": "Concat", + "category": "Tensor", + "inputs": [ + { "name": "inputs", "list": true } + ] + }, + { + "name": "Conv", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "kernel" } + ] + }, + { + "name": "Flatten", + "category": "Shape" + }, + { + "name": "FullyConnected", + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" } + ] + }, + { + "name": "GatherV2", + "category": "Data", + "inputs": [ + { "name": "input" }, + { "name": "indices" } + ] + }, + { + "name": "Pool", + "category": "Pool", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "Reshape", + "category": "Shape", + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + }, + { + "name": "Shape", + "inputs": [ + { "name": "input" } + ] + }, + { + "name": "Softmax", + "category": "Activation" + }, + { + "name": "Squeeze", + "category": "Transform" + }, + { + "name": "Stack", + "inputs": [ + { "name": "input" }, + { "name": "?" }, + { "name": "?" }, + { "name": "?" } + ] + }, + { + "name": "StridedSlice", + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ] + }, + { + "name": "Unary", + "inputs": [ + { "name": "input" } + ] + } +] \ No newline at end of file diff --git a/uff-proto.js b/uff-proto.js new file mode 100644 index 00000000000..8b89dc54f2f --- /dev/null +++ b/uff-proto.js @@ -0,0 +1,986 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('uff'); + +$root.uff = {}; + +$root.uff.MetaGraph = class MetaGraph { + + constructor() { + this.descriptors = []; + this.graphs = []; + this.referenced_data = []; + this.extra_fields = []; + } + + static decode(reader, length) { + const message = new $root.uff.MetaGraph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int64(); + break; + case 2: + message.descriptor_core_version = reader.int64(); + break; + case 3: + message.descriptors.push($root.uff.Descriptor.decode(reader, reader.uint32())); + break; + case 4: + message.graphs.push($root.uff.Graph.decode(reader, reader.uint32())); + break; + case 5: + message.referenced_data.push($root.uff.MetaGraph.ReferencedDataEntry.decode(reader, reader.uint32())); + break; + case 100: + message.extra_fields.push($root.uff.MetaGraph.ExtraFieldsEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.MetaGraph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int64(); + break; + case "descriptor_core_version": + message.descriptor_core_version = reader.int64(); + break; + case "descriptors": + message.descriptors.push($root.uff.Descriptor.decodeText(reader)); + break; + case "graphs": + message.graphs.push($root.uff.Graph.decodeText(reader)); + break; + case "referenced_data": + message.referenced_data.push($root.uff.MetaGraph.ReferencedDataEntry.decodeText(reader)); + break; + case "extra_fields": + message.extra_fields.push($root.uff.MetaGraph.ExtraFieldsEntry.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.MetaGraph.prototype.version = protobuf.Int64.create(0); +$root.uff.MetaGraph.prototype.descriptor_core_version = protobuf.Int64.create(0); + +$root.uff.MetaGraph.ReferencedDataEntry = class ReferencedDataEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.MetaGraph.ReferencedDataEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.uff.Data.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.MetaGraph.ReferencedDataEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.uff.Data.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.MetaGraph.ReferencedDataEntry.prototype.key = ""; +$root.uff.MetaGraph.ReferencedDataEntry.prototype.value = null; + +$root.uff.MetaGraph.ExtraFieldsEntry = class ExtraFieldsEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.MetaGraph.ExtraFieldsEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.uff.Data.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.MetaGraph.ExtraFieldsEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.uff.Data.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.MetaGraph.ExtraFieldsEntry.prototype.key = ""; +$root.uff.MetaGraph.ExtraFieldsEntry.prototype.value = null; + +$root.uff.Descriptor = class Descriptor { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.Descriptor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.version = reader.int64(); + break; + case 3: + message.optional = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Descriptor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "id": + message.id = reader.string(); + break; + case "version": + message.version = reader.int64(); + break; + case "optional": + message.optional = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Descriptor.prototype.id = ""; +$root.uff.Descriptor.prototype.version = protobuf.Int64.create(0); +$root.uff.Descriptor.prototype.optional = false; + +$root.uff.Graph = class Graph { + + constructor() { + this.nodes = []; + this.extra_fields = []; + } + + static decode(reader, length) { + const message = new $root.uff.Graph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.nodes.push($root.uff.Node.decode(reader, reader.uint32())); + break; + case 100: + message.extra_fields.push($root.uff.Graph.ExtraFieldsEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Graph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "id": + message.id = reader.string(); + break; + case "nodes": + message.nodes.push($root.uff.Node.decodeText(reader)); + break; + case "extra_fields": + message.extra_fields.push($root.uff.Graph.ExtraFieldsEntry.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Graph.prototype.id = ""; + +$root.uff.Graph.ExtraFieldsEntry = class ExtraFieldsEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.Graph.ExtraFieldsEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.uff.Data.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Graph.ExtraFieldsEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.uff.Data.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Graph.ExtraFieldsEntry.prototype.key = ""; +$root.uff.Graph.ExtraFieldsEntry.prototype.value = null; + +$root.uff.Node = class Node { + + constructor() { + this.inputs = []; + this.fields = []; + this.extra_fields = []; + } + + static decode(reader, length) { + const message = new $root.uff.Node(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.inputs.push(reader.string()); + break; + case 3: + message.operation = reader.string(); + break; + case 4: + message.fields.push($root.uff.Node.FieldsEntry.decode(reader, reader.uint32())); + break; + case 100: + message.extra_fields.push($root.uff.Node.ExtraFieldsEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Node(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "id": + message.id = reader.string(); + break; + case "inputs": + reader.array(message.inputs, () => reader.string()); + break; + case "operation": + message.operation = reader.string(); + break; + case "fields": + message.fields.push($root.uff.Node.FieldsEntry.decodeText(reader)); + break; + case "extra_fields": + message.extra_fields.push($root.uff.Node.ExtraFieldsEntry.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Node.prototype.id = ""; +$root.uff.Node.prototype.operation = ""; + +$root.uff.Node.FieldsEntry = class FieldsEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.Node.FieldsEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.uff.Data.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Node.FieldsEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.uff.Data.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Node.FieldsEntry.prototype.key = ""; +$root.uff.Node.FieldsEntry.prototype.value = null; + +$root.uff.Node.ExtraFieldsEntry = class ExtraFieldsEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.Node.ExtraFieldsEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.uff.Data.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Node.ExtraFieldsEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.uff.Data.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.Node.ExtraFieldsEntry.prototype.key = ""; +$root.uff.Node.ExtraFieldsEntry.prototype.value = null; + +$root.uff.Data = class Data { + + constructor() { + } + + get type() { + $root.uff.Data.typeSet = $root.uff.Data.typeSet || new Set([ "s", "s_list", "d", "d_list", "b", "b_list", "i", "i_list", "blob", "ref", "dtype", "dtype_list", "dim_orders", "dim_orders_list"]); + return Object.keys(this).find((key) => $root.uff.Data.typeSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.uff.Data(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.s = reader.string(); + break; + case 2: + message.s_list = $root.uff.ListString.decode(reader, reader.uint32()); + break; + case 3: + message.d = reader.double(); + break; + case 4: + message.d_list = $root.uff.ListDouble.decode(reader, reader.uint32()); + break; + case 5: + message.b = reader.bool(); + break; + case 6: + message.b_list = $root.uff.ListBool.decode(reader, reader.uint32()); + break; + case 7: + message.i = reader.int64(); + break; + case 8: + message.i_list = $root.uff.ListInt64.decode(reader, reader.uint32()); + break; + case 9: + message.blob = reader.bytes(); + break; + case 100: + message.ref = reader.string(); + break; + case 101: + message.dtype = reader.int32(); + break; + case 102: + message.dtype_list = $root.uff.ListDataType.decode(reader, reader.uint32()); + break; + case 103: + message.dim_orders = $root.uff.DimensionOrders.decode(reader, reader.uint32()); + break; + case 104: + message.dim_orders_list = $root.uff.ListDimensionOrders.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.Data(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "s": + message.s = reader.string(); + break; + case "s_list": + message.s_list = $root.uff.ListString.decodeText(reader); + break; + case "d": + message.d = reader.double(); + break; + case "d_list": + message.d_list = $root.uff.ListDouble.decodeText(reader); + break; + case "b": + message.b = reader.bool(); + break; + case "b_list": + message.b_list = $root.uff.ListBool.decodeText(reader); + break; + case "i": + message.i = reader.int64(); + break; + case "i_list": + message.i_list = $root.uff.ListInt64.decodeText(reader); + break; + case "blob": + message.blob = reader.bytes(); + break; + case "ref": + message.ref = reader.string(); + break; + case "dtype": + message.dtype = reader.enum($root.uff.DataType); + break; + case "dtype_list": + message.dtype_list = $root.uff.ListDataType.decodeText(reader); + break; + case "dim_orders": + message.dim_orders = $root.uff.DimensionOrders.decodeText(reader); + break; + case "dim_orders_list": + message.dim_orders_list = $root.uff.ListDimensionOrders.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.DataType = { + "DT_INVALID": 0, + "DT_INT8": 65544, + "DT_INT16": 65552, + "DT_INT32": 65568, + "DT_INT64": 65600, + "DT_FLOAT16": 131088, + "DT_FLOAT32": 131104 +}; + +$root.uff.OrderEnum = { + "OE_ZERO": 0, + "OE_SPECIAL": -1, + "OE_INCREMENT": 2147483647, + "OE_DECREMENT": -2147483648 +}; + +$root.uff.DimensionOrders = class DimensionOrders { + + constructor() { + this.orders = []; + } + + static decode(reader, length) { + const message = new $root.uff.DimensionOrders(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.orders.push($root.uff.DimensionOrders.OrdersEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.DimensionOrders(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "orders": + message.orders.push($root.uff.DimensionOrders.OrdersEntry.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.DimensionOrders.OrdersEntry = class OrdersEntry { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.uff.DimensionOrders.OrdersEntry(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.int32(); + break; + case 2: + message.value = $root.uff.ListInt64.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.DimensionOrders.OrdersEntry(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.enum($root.uff.OrderEnum); + break; + case "value": + message.value = $root.uff.ListInt64.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.DimensionOrders.OrdersEntry.prototype.key = 0; +$root.uff.DimensionOrders.OrdersEntry.prototype.value = null; + +$root.uff.ListString = class ListString { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListString(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListString(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + reader.array(message.val, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.ListDouble = class ListDouble { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListDouble(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.doubles(message.val, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListDouble(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + reader.array(message.val, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.ListBool = class ListBool { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListBool(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.array(message.val, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListBool(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + reader.array(message.val, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.ListInt64 = class ListInt64 { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListInt64(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.array(message.val, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListInt64(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + reader.array(message.val, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.ListDataType = class ListDataType { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListDataType(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val = reader.array(message.val, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListDataType(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + reader.array(message.val, () => reader.enum($root.uff.DataType)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.uff.ListDimensionOrders = class ListDimensionOrders { + + constructor() { + this.val = []; + } + + static decode(reader, length) { + const message = new $root.uff.ListDimensionOrders(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.val.push($root.uff.DimensionOrders.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.uff.ListDimensionOrders(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "val": + message.val.push($root.uff.DimensionOrders.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; diff --git a/uff.js b/uff.js new file mode 100644 index 00000000000..3d59675f828 --- /dev/null +++ b/uff.js @@ -0,0 +1,281 @@ + +import * as protobuf from './protobuf.js'; + +const uff = {}; + +uff.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'uff' || extension === 'pb') { + const tags = context.tags('pb'); + if (tags.size > 0 && + tags.has(1) && tags.get(1) === 0 && + tags.has(2) && tags.get(2) === 0 && + tags.has(3) && tags.get(3) === 2 && + tags.has(4) && tags.get(4) === 2 && + (!tags.has(5) || tags.get(5) === 2)) { + return 'uff.pb'; + } + } + if (extension === 'pbtxt' || identifier.toLowerCase().endsWith('.uff.txt')) { + const tags = context.tags('pbtxt'); + if (tags.has('version') && tags.has('descriptors') && tags.has('graphs')) { + return 'uff.pbtxt'; + } + } + return undefined; + } + + async open(context, target) { + await context.require('./uff-proto'); + uff.proto = protobuf.get('uff').uff; + let meta_graph = null; + switch (target) { + case 'uff.pb': { + try { + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + meta_graph = uff.proto.MetaGraph.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new uff.Error(`File format is not uff.MetaGraph (${message.replace(/\.$/, '')}).`); + } + break; + } + case 'uff.pbtxt': { + try { + const stream = context.stream; + const reader = protobuf.TextReader.open(stream); + meta_graph = uff.proto.MetaGraph.decodeText(reader); + } catch (error) { + throw new uff.Error(`File text format is not uff.MetaGraph (${error.message}).`); + } + break; + } + default: { + throw new uff.Error(`Unsupported UFF format '${target}'.`); + } + } + const metadata = await context.metadata('uff-metadata.json'); + return new uff.Model(metadata, meta_graph); + } +}; + +uff.Model = class { + + constructor(metadata, meta_graph) { + const version = meta_graph.version; + this.format = `UFF${version ? ` v${version}` : ''}`; + this.imports = meta_graph.descriptors.map((descriptor) => `${descriptor.id} v${descriptor.version}`); + const references = new Map(meta_graph.referenced_data.map((item) => [ item.key, item.value ])); + for (const graph of meta_graph.graphs) { + for (const node of graph.nodes) { + for (const field of node.fields) { + if (field.value.type === 'ref' && references.has(field.value.ref)) { + field.value = references.get(field.value.ref); + } + } + } + } + this.graphs = meta_graph.graphs.map((graph) => new uff.Graph(metadata, graph)); + } +}; + +uff.Graph = class { + + constructor(metadata, graph) { + this.name = graph.id; + this.inputs = []; + this.outputs = []; + this.nodes = []; + const values = new Map(); + const counts = new Map(); + for (const node of graph.nodes) { + for (const input of node.inputs) { + counts.set(input, counts.has(input) ? counts.get(input) + 1 : 1); + values.set(input, new uff.Value(input)); + } + if (!values.has(node.id)) { + values.set(node.id, new uff.Value(node.id)); + } + } + const value = (name) => { + return values.get(name); + }; + for (let i = graph.nodes.length - 1; i >= 0; i--) { + const node = graph.nodes[i]; + if (node.operation === 'Const' && node.inputs.length === 0 && counts.get(node.id) === 1) { + const fields = {}; + for (const field of node.fields) { + fields[field.key] = field.value; + } + if (fields.dtype && fields.shape && fields.values) { + const tensor = new uff.Tensor(fields.dtype.dtype, fields.shape, fields.values); + values.set(node.id, new uff.Value(node.id, tensor.type, tensor)); + graph.nodes.splice(i, 1); + } + } + if (node.operation === 'Input' && node.inputs.length === 0) { + const fields = {}; + for (const field of node.fields) { + fields[field.key] = field.value; + } + const type = fields.dtype && fields.shape ? new uff.TensorType(fields.dtype.dtype, fields.shape) : null; + values.set(node.id, new uff.Value(node.id, type, null)); + } + } + for (const node of graph.nodes) { + if (node.operation === 'Input') { + this.inputs.push(new uff.Argument(node.id, [ values.get(node.id) ])); + continue; + } + if (node.operation === 'MarkOutput' && node.inputs.length === 1) { + this.outputs.push(new uff.Argument(node.id, [ values.get(node.inputs[0]) ])); + continue; + } + this.nodes.push(new uff.Node(metadata, node, value)); + } + } +}; + +uff.Argument = class { + + constructor(name, value, type) { + this.name = name; + this.value = value; + if (type) { + this.type = type; + } + } +}; + +uff.Value = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new uff.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + this.type = type || null; + this.initializer = initializer || null; + } +}; + +uff.Node = class { + + constructor(metadata, node, value) { + this.name = node.id; + this.type = metadata.type(node.operation) || { name: node.operation }; + this.attributes = []; + this.inputs = []; + this.outputs = []; + if (node.inputs && node.inputs.length > 0) { + let index = 0; + if (this.type && this.type.inputs) { + for (const metadata of this.type.inputs) { + if (index < node.inputs.length || metadata.optional !== true) { + const count = metadata.list ? (node.inputs.length - index) : 1; + const values = node.inputs.slice(index, index + count).map((name) => value(name)); + index += count; + const argument = new uff.Argument(metadata.name, values); + this.inputs.push(argument); + } + } + } + this.inputs.push(...node.inputs.slice(index).map((identifier, i) => { + const name = ((index + i) === 0) ? 'input' : (index + i).toString(); + return new uff.Argument(name, [ value(identifier) ]); + })); + } + this.outputs.push(new uff.Argument('output', [ value(node.id) ])); + for (const field of node.fields) { + let type = null; + switch (field.value.type) { + case 's': value = field.value.s; type = 'string'; break; + case 's_list': value = field.value.s_list; type = 'string[]'; break; + case 'd': value = field.value.d; type = 'float64'; break; + case 'd_list': value = field.value.d_list.val; type = 'float64[]'; break; + case 'b': value = field.value.b; type = 'boolean'; break; + case 'b_list': value = field.value.b_list; type = 'boolean[]'; break; + case 'i': value = field.value.i; type = 'int64'; break; + case 'i_list': value = field.value.i_list.val; type = 'int64[]'; break; + case 'blob': value = field.value.blob; break; + case 'ref': value = field.value.ref; type = 'ref'; break; + case 'dtype': value = new uff.TensorType(field.value.dtype, null).dataType; type = 'uff.DataType'; break; + case 'dtype_list': value = field.value.dtype_list.map((type) => new uff.TensorType(type, null).dataType); type = 'uff.DataType[]'; break; + case 'dim_orders': value = field.value.dim_orders; break; + case 'dim_orders_list': value = field.value.dim_orders_list.val; break; + default: throw new uff.Error(`Unsupported attribute '${field.key}' value '${JSON.stringify(value)}'.`); + } + const attribute = new uff.Argument(field.key, value, type); + this.attributes.push(attribute); + } + } +}; + +uff.Tensor = class { + + constructor(dataType, shape, values) { + this.type = new uff.TensorType(dataType, shape); + switch (values.type) { + case 'blob': this.values = values.blob; break; + default: throw new uff.Error(`Unsupported values format '${JSON.stringify(values.type)}'.`); + } + if (this.values.length > 8 && + this.values[0] === 0x28 && this.values[1] === 0x2e && this.values[2] === 0x2e && this.values[3] === 0x2e && + this.values[this.values.length - 1] === 0x29 && this.values[this.values.length - 2] === 0x2e && this.values[this.values.length - 3] === 0x2e && this.values[this.values.length - 4] === 0x2e) { + this.values = null; + } + } +}; + +uff.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case uff.proto.DataType.DT_INT8: this.dataType = 'int8'; break; + case uff.proto.DataType.DT_INT16: this.dataType = 'int16'; break; + case uff.proto.DataType.DT_INT32: this.dataType = 'int32'; break; + case uff.proto.DataType.DT_INT64: this.dataType = 'int64'; break; + case uff.proto.DataType.DT_FLOAT16: this.dataType = 'float16'; break; + case uff.proto.DataType.DT_FLOAT32: this.dataType = 'float32'; break; + case 7: this.dataType = '?'; break; + default: throw new uff.Error(`Unsupported data type '${JSON.stringify(dataType)}'.`); + } + this.shape = shape ? new uff.TensorShape(shape) : null; + } + + toString() { + return this.dataType + this.shape.toString(); + } +}; + +uff.TensorShape = class { + + constructor(shape) { + if (shape.type !== 'i_list') { + throw new uff.Error(`Unsupported shape format '${JSON.stringify(shape.type)}'.`); + } + this.dimensions = shape.i_list.val; + } + + toString() { + if (this.dimensions && this.dimensions.length > 0) { + return `[${this.dimensions.join(',')}]`; + } + return ''; + } +}; + +uff.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading UFF model.'; + } +}; + +export const ModelFactory = uff.ModelFactory; + diff --git a/view.js b/view.js new file mode 100644 index 00000000000..26a1165e77c --- /dev/null +++ b/view.js @@ -0,0 +1,5956 @@ + +import * as base from './base.js'; +import * as zip from './zip.js'; +import * as tar from './tar.js'; +import * as json from './json.js'; +import * as xml from './xml.js'; +import * as protobuf from './protobuf.js'; +import * as flatbuffers from './flatbuffers.js'; +import * as hdf5 from './hdf5.js'; +import * as python from './python.js'; +import * as grapher from './grapher.js'; + +const view = {}; +const markdown = {}; + +view.View = class { + + constructor(host) { + this._host = host; + this._defaultOptions = { + weights: true, + attributes: false, + names: false, + direction: 'vertical', + mousewheel: 'scroll' + }; + this._options = Object.assign({}, this._defaultOptions); + this._model = null; + this._graphs = []; + this._selection = []; + this._sidebar = new view.Sidebar(this._host); + this._searchText = ''; + this._modelFactoryService = new view.ModelFactoryService(this._host); + } + + async start() { + try { + await zip.Archive.import(); + await this._host.view(this); + const options = this._host.get('options') || {}; + for (const [name, value] of Object.entries(options)) { + this._options[name] = value; + } + this._element('sidebar-button').addEventListener('click', () => { + this.showModelProperties(); + }); + this._element('zoom-in-button').addEventListener('click', () => { + this.zoomIn(); + }); + this._element('zoom-out-button').addEventListener('click', () => { + this.zoomOut(); + }); + this._element('toolbar-path-back-button').addEventListener('click', () => { + this.popGraph(); + }); + this._element('sidebar').addEventListener('mousewheel', (e) => { + if (e.shiftKey || e.ctrlKey) { + e.preventDefault(); + } + }, { passive: true }); + this._host.document.addEventListener('keydown', () => { + if (this._graph) { + this._graph.select(null); + } + }); + const platform = this._host.environment('platform'); + this._menu = new view.Menu(this._host); + this._menu.add({ + accelerator: platform === 'darwin' ? 'Ctrl+Cmd+F' : 'F11', + execute: () => this._host.execute('fullscreen') + }); + this._menu.add({ + accelerator: 'Backspace', + execute: () => this.popGraph() + }); + if (this._host.environment('menu')) { + this._menu.attach(this._element('menu'), this._element('menu-button')); + const file = this._menu.group('&File'); + file.add({ + label: '&Open...', + accelerator: 'CmdOrCtrl+O', + execute: () => this._host.execute('open') + }); + if (this._host.type === 'Electron') { + this._recents = file.group('Open &Recent'); + file.add({ + label: '&Export...', + accelerator: 'CmdOrCtrl+Shift+E', + execute: () => this._host.execute('export'), + enabled: () => this.activeGraph + }); + file.add({ + label: platform === 'darwin' ? '&Close Window' : '&Close', + accelerator: 'CmdOrCtrl+W', + execute: () => this._host.execute('close'), + }); + file.add({ + label: platform === 'win32' ? 'E&xit' : '&Quit', + accelerator: platform === 'win32' ? '' : 'CmdOrCtrl+Q', + execute: () => this._host.execute('quit'), + }); + } else { + file.add({ + label: 'Export as &PNG', + accelerator: 'CmdOrCtrl+Shift+E', + execute: () => this.export(`${this._host.document.title}.png`), + enabled: () => this.activeGraph + }); + file.add({ + label: 'Export as &SVG', + accelerator: 'CmdOrCtrl+Alt+E', + execute: () => this.export(`${this._host.document.title}.svg`), + enabled: () => this.activeGraph + }); + } + const edit = this._menu.group('&Edit'); + edit.add({ + label: '&Find...', + accelerator: 'CmdOrCtrl+F', + execute: () => this.find(), + enabled: () => this.activeGraph + }); + const view = this._menu.group('&View'); + view.add({ + label: () => this.options.attributes ? 'Hide &Attributes' : 'Show &Attributes', + accelerator: 'CmdOrCtrl+D', + execute: () => this.toggle('attributes'), + enabled: () => this.activeGraph + }); + view.add({ + label: () => this.options.weights ? 'Hide &Weights' : 'Show &Weights', + accelerator: 'CmdOrCtrl+I', + execute: () => this.toggle('weights'), + enabled: () => this.activeGraph + }); + view.add({ + label: () => this.options.names ? 'Hide &Names' : 'Show &Names', + accelerator: 'CmdOrCtrl+U', + execute: () => this.toggle('names'), + enabled: () => this.activeGraph + }); + view.add({ + label: () => this.options.direction === 'vertical' ? 'Show &Horizontal' : 'Show &Vertical', + accelerator: 'CmdOrCtrl+K', + execute: () => this.toggle('direction'), + enabled: () => this.activeGraph + }); + view.add({ + label: () => this.options.mousewheel === 'scroll' ? '&Mouse Wheel: Zoom' : '&Mouse Wheel: Scroll', + accelerator: 'CmdOrCtrl+M', + execute: () => this.toggle('mousewheel'), + enabled: () => this.activeGraph + }); + view.add({}); + if (this._host.type === 'Electron') { + view.add({ + label: '&Reload', + accelerator: platform === 'darwin' ? 'CmdOrCtrl+R' : 'F5', + execute: () => this._host.execute('reload'), + enabled: () => this.activeGraph + }); + view.add({}); + } + view.add({ + label: 'Zoom &In', + accelerator: 'Shift+Up', + execute: () => this.zoomIn(), + enabled: () => this.activeGraph + }); + view.add({ + label: 'Zoom &Out', + accelerator: 'Shift+Down', + execute: () => this.zoomOut(), + enabled: () => this.activeGraph + }); + view.add({ + label: 'Actual &Size', + accelerator: 'Shift+Backspace', + execute: () => this.resetZoom(), + enabled: () => this.activeGraph + }); + view.add({}); + view.add({ + label: '&Properties...', + accelerator: 'CmdOrCtrl+Enter', + execute: () => this.showModelProperties(), + enabled: () => this.activeGraph + }); + if (this._host.type === 'Electron' && !this._host.environment('packaged')) { + view.add({}); + view.add({ + label: '&Developer Tools...', + accelerator: 'CmdOrCtrl+Alt+I', + execute: () => this._host.execute('toggle-developer-tools') + }); + } + const help = this._menu.group('&Help'); + help.add({ + label: 'Report &Issue', + execute: () => this._host.execute('report-issue') + }); + help.add({ + label: `&About ${this._host.environment('name')}`, + execute: () => this._host.execute('about') + }); + } + await this._host.start(); + } catch (err) { + this.error(err, null, null); + } + } + + show(page) { + if (!page) { + page = (!this._model && !this.activeGraph) ? 'welcome' : 'default'; + } + this._host.event('screen_view', { + screen_name: page, + }); + if (this._sidebar) { + this._sidebar.close(); + } + if (this._menu) { + this._menu.close(); + } + this._host.document.body.classList.remove(...Array.from(this._host.document.body.classList).filter((_) => _ !== 'active')); + this._host.document.body.classList.add(...page.split(' ')); + if (page === 'default') { + this._activate(); + } else { + this._deactivate(); + } + if (page === 'welcome') { + const element = this._element('open-file-button'); + if (element) { + element.focus(); + } + } + this._page = page; + } + + progress(percent) { + const bar = this._element('progress-bar'); + if (bar) { + bar.style.width = `${percent}%`; + } + } + + cut() { + this._host.document.execCommand('cut'); + } + + copy() { + this._host.document.execCommand('copy'); + } + + paste() { + this._host.document.execCommand('paste'); + } + + selectAll() { + this._host.document.execCommand('selectall'); + } + + find() { + if (this._graph) { + this._graph.select(null); + const content = new view.FindSidebar(this._host, this.activeGraph); + content.on('search-text-changed', (sender, text) => { + this._searchText = text; + }); + content.on('select', (sender, selection) => { + this.scrollTo(this._graph.select([ selection ])); + }); + content.on('focus', (sender, selection) => { + this._graph.focus([ selection ]); + }); + content.on('blur', (sender, selection) => { + this._graph.blur([ selection ]); + }); + this._sidebar.open(content.render(), 'Find'); + content.focus(this._searchText); + } + } + + get model() { + return this._model; + } + + get options() { + return this._options; + } + + toggle(name) { + switch (name) { + case 'names': + case 'attributes': + case 'weights': + this._options[name] = !this._options[name]; + this._reload(); + break; + case 'direction': + this._options.direction = this._options.direction === 'vertical' ? 'horizontal' : 'vertical'; + this._reload(); + break; + case 'mousewheel': + this._options.mousewheel = this._options.mousewheel === 'scroll' ? 'zoom' : 'scroll'; + break; + default: + throw new view.Error(`Unsupported toogle '${name}'.`); + } + const options = {}; + for (const [name, value] of Object.entries(this._options)) { + if (this._defaultOptions[name] !== value) { + options[name] = value; + } + } + if (Object.entries(options).length == 0) { + this._host.delete('options'); + } else { + this._host.set('options', options); + } + } + + recents(recents) { + if (this._recents) { + this._recents.clear(); + for (let i = 0; i < recents.length; i++) { + const recent = recents[i]; + this._recents.add({ + label: recent.label, + accelerator: `CmdOrCtrl+${(i + 1)}`, + execute: () => this._host.execute('open', recent.path) + }); + } + } + } + + _reload() { + this.show('welcome spinner'); + if (this._model && this._graphs.length > 0) { + this._updateGraph(this._model, this._graphs).catch((error) => { + if (error) { + this.error(error, 'Graph update failed.', 'welcome'); + } + }); + } + } + + _timeout(delay) { + return new Promise((resolve) => { + setTimeout(resolve, delay); + }); + } + + _element(id) { + return this._host.document.getElementById(id); + } + + zoomIn() { + this._updateZoom(this._zoom * 1.1); + } + + zoomOut() { + this._updateZoom(this._zoom * 0.9); + } + + resetZoom() { + this._updateZoom(1); + } + + _activate() { + if (!this._events) { + this._events = {}; + this._events.scroll = (e) => this._scrollHandler(e); + this._events.wheel = (e) => this._wheelHandler(e); + this._events.gesturestart = (e) => this._gestureStartHandler(e); + this._events.pointerdown = (e) => this._pointerDownHandler(e); + this._events.touchstart = (e) => this._touchStartHandler(e); + } + const graph = this._element('graph'); + graph.focus(); + graph.addEventListener('scroll', this._events.scroll); + graph.addEventListener('wheel', this._events.wheel, { passive: false }); + graph.addEventListener('pointerdown', this._events.pointerdown); + if (this._host.environment('agent') === 'safari') { + graph.addEventListener('gesturestart', this._events.gesturestart, false); + } else { + graph.addEventListener('touchstart', this._events.touchstart, { passive: true }); + } + } + + _deactivate() { + if (this._events) { + const graph = this._element('graph'); + graph.removeEventListener('scroll', this._events.scroll); + graph.removeEventListener('wheel', this._events.wheel); + graph.removeEventListener('pointerdown', this._events.pointerdown); + graph.removeEventListener('gesturestart', this._events.gesturestart); + graph.removeEventListener('touchstart', this._events.touchstart); + } + } + + _updateZoom(zoom, e) { + const container = this._element('graph'); + const canvas = this._element('canvas'); + const limit = this._options.direction === 'vertical' ? + container.clientHeight / this._height : + container.clientWidth / this._width; + const min = Math.min(Math.max(limit, 0.15), 1); + zoom = Math.max(min, Math.min(zoom, 1.4)); + const scrollLeft = this._scrollLeft || container.scrollLeft; + const scrollTop = this._scrollTop || container.scrollTop; + const x = (e ? e.pageX : (container.clientWidth / 2)) + scrollLeft; + const y = (e ? e.pageY : (container.clientHeight / 2)) + scrollTop; + const width = zoom * this._width; + const height = zoom * this._height; + canvas.style.width = `${width}px`; + canvas.style.height = `${height}px`; + this._scrollLeft = Math.max(0, ((x * zoom) / this._zoom) - (x - scrollLeft)); + this._scrollTop = Math.max(0, ((y * zoom) / this._zoom) - (y - scrollTop)); + container.scrollLeft = this._scrollLeft; + container.scrollTop = this._scrollTop; + this._zoom = zoom; + } + + _pointerDownHandler(e) { + if (e.pointerType !== 'touch' && e.buttons === 1) { + e.target.setPointerCapture(e.pointerId); + const document = this._host.document.documentElement; + const container = this._element('graph'); + this._mousePosition = { + left: container.scrollLeft, + top: container.scrollTop, + x: e.clientX, + y: e.clientY + }; + container.style.cursor = 'grabbing'; + e.stopImmediatePropagation(); + const pointerMoveHandler = (e) => { + e.preventDefault(); + e.stopImmediatePropagation(); + if (this._mousePosition) { + const dx = e.clientX - this._mousePosition.x; + const dy = e.clientY - this._mousePosition.y; + this._mousePosition.moved = dx * dx + dy * dy > 0; + if (this._mousePosition.moved) { + const container = this._element('graph'); + container.scrollTop = this._mousePosition.top - dy; + container.scrollLeft = this._mousePosition.left - dx; + } + } + }; + const clickHandler = (e) => { + e.stopPropagation(); + document.removeEventListener('click', clickHandler, true); + }; + const pointerUpHandler = (e) => { + e.target.releasePointerCapture(e.pointerId); + container.style.removeProperty('cursor'); + container.removeEventListener('pointerup', pointerUpHandler); + container.removeEventListener('pointerleave', pointerUpHandler); + container.removeEventListener('pointermove', pointerMoveHandler); + if (this._mousePosition && this._mousePosition.moved) { + e.preventDefault(); + e.stopImmediatePropagation(); + delete this._mousePosition; + document.addEventListener('click', clickHandler, true); + } + }; + container.addEventListener('pointermove', pointerMoveHandler); + container.addEventListener('pointerup', pointerUpHandler); + container.addEventListener('pointerleave', pointerUpHandler); + } + } + + _touchStartHandler(e) { + if (e.touches.length === 2) { + this._touchPoints = Array.from(e.touches); + this._touchZoom = this._zoom; + } + const touchMoveHandler = (e) => { + if (Array.isArray(this._touchPoints) && this._touchPoints.length === 2 && e.touches.length === 2) { + const distance = (points) => { + const dx =(points[1].clientX - points[0].clientX); + const dy =(points[1].clientY - points[0].clientY); + return Math.sqrt(dx * dx + dy * dy); + }; + const d1 = distance(Array.from(e.touches)); + const d2 = distance(this._touchPoints); + if (d2 !== 0) { + const points = this._touchPoints; + const e = { + pageX: (points[1].pageX + points[0].pageX) / 2, + pageY: (points[1].pageY + points[0].pageY) / 2 + }; + const zoom = d2 === 0 ? d1 : d1 / d2; + this._updateZoom(this._touchZoom * zoom, e); + } + } + }; + const container = this._element('graph'); + const touchEndHandler = () => { + container.removeEventListener('touchmove', touchMoveHandler, { passive: true }); + container.removeEventListener('touchcancel', touchEndHandler, { passive: true }); + container.removeEventListener('touchend', touchEndHandler, { passive: true }); + delete this._touchPoints; + delete this._touchZoom; + }; + container.addEventListener('touchmove', touchMoveHandler, { passive: true }); + container.addEventListener('touchcancel', touchEndHandler, { passive: true }); + container.addEventListener('touchend', touchEndHandler, { passive: true }); + } + + _gestureStartHandler(e) { + e.preventDefault(); + this._gestureZoom = this._zoom; + const container = this._element('graph'); + const gestureChangeHandler = (e) => { + e.preventDefault(); + this._updateZoom(this._gestureZoom * e.scale, e); + }; + const gestureEndHandler = (e) => { + container.removeEventListener('gesturechange', gestureChangeHandler, false); + container.removeEventListener('gestureend', gestureEndHandler, false); + e.preventDefault(); + if (this._gestureZoom) { + this._updateZoom(this._gestureZoom * e.scale, e); + delete this._gestureZoom; + } + }; + container.addEventListener('gesturechange', gestureChangeHandler, false); + container.addEventListener('gestureend', gestureEndHandler, false); + } + + _scrollHandler(e) { + if (this._scrollLeft && e.target.scrollLeft !== Math.floor(this._scrollLeft)) { + delete this._scrollLeft; + } + if (this._scrollTop && e.target.scrollTop !== Math.floor(this._scrollTop)) { + delete this._scrollTop; + } + } + + _wheelHandler(e) { + if (e.shiftKey || e.ctrlKey || this._options.mousewheel === 'zoom') { + const delta = -e.deltaY * (e.deltaMode === 1 ? 0.05 : e.deltaMode ? 1 : 0.002) * (e.ctrlKey ? 10 : 1); + this._updateZoom(this._zoom * Math.pow(2, delta), e); + e.preventDefault(); + } + } + + scrollTo(selection) { + if (selection && selection.length > 0) { + const container = this._element('graph'); + let x = 0; + let y = 0; + for (const element of selection) { + const rect = element.getBoundingClientRect(); + x += rect.left + (rect.width / 2); + y += rect.top + (rect.height / 2); + } + x = x / selection.length; + y = y / selection.length; + const rect = container.getBoundingClientRect(); + const left = (container.scrollLeft + x - rect.left) - (rect.width / 2); + const top = (container.scrollTop + y - rect.top) - (rect.height / 2); + container.scrollTo({ left: left, top: top, behavior: 'smooth' }); + } + } + + async error(err, name, screen) { + if (this._sidebar) { + this._sidebar.close(); + } + this._host.exception(err, false); + const knowns = [ + { name: '', message: /^Invalid value identifier/, url: 'https://github.com/lutzroeder/netron/issues/540' }, + { name: '', message: /^Cannot read property/, url: 'https://github.com/lutzroeder/netron/issues/647' }, + { name: '', message: /^Failed to render tensor/, url: 'https://github.com/lutzroeder/netron/issues/681' }, + { name: 'Error', message: /^EPERM: operation not permitted/, url: 'https://github.com/lutzroeder/netron/issues/551' }, + { name: 'Error', message: /^EACCES: permission denied/, url: 'https://github.com/lutzroeder/netron/issues/504' }, + { name: 'RangeError', message: /^Offset is outside the bounds of the DataView/, url: 'https://github.com/lutzroeder/netron/issues/563' }, + { name: 'RangeError', message: /^Maximum call stack size exceeded/, url: 'https://github.com/lutzroeder/netron/issues/589' }, + { name: 'RangeError', message: /^Invalid string length/, url: 'https://github.com/lutzroeder/netron/issues/648' }, + { name: 'Python Error', message: /^Unknown function/, url: 'https://github.com/lutzroeder/netron/issues/546' }, + { name: 'Error loading model.', message: /^Unsupported file content \(/, url: 'https://github.com/lutzroeder/netron/issues/550' }, + { name: 'Error loading model.', message: /^Unsupported Protocol Buffers content/, url: 'https://github.com/lutzroeder/netron/issues/593' }, + { name: 'Error loading model.', message: /^Unsupported Protocol Buffers text content/, url: 'https://github.com/lutzroeder/netron/issues/594' }, + { name: 'Error loading model.', message: /^Unsupported JSON content/, url: 'https://github.com/lutzroeder/netron/issues/595' }, + { name: 'Error loading Caffe model.', message: /^File format is not caffe\.NetParameter/, url: 'https://github.com/lutzroeder/netron/issues/563' }, + { name: 'Error loading DaVinci OM model.', message: /^Unsupported DaVinci OM partition type\./, url: 'https://github.com/lutzroeder/netron/issues/1154' }, + { name: 'Error loading MNN model.', message: /^File format is not mnn\.Net/, url: 'https://github.com/lutzroeder/netron/issues/746' }, + { name: 'Error loading NNEF model.', message: /^.*/, url: 'https://github.com/lutzroeder/netron/issues/992' }, + { name: 'Error loading PyTorch model.', message: /^File does not contain root module or state dictionary/, url: 'https://github.com/lutzroeder/netron/issues/543' }, + { name: 'Error loading PyTorch model.', message: /^Module does not contain modules/, url: 'https://github.com/lutzroeder/netron/issues/544' }, + { name: 'Error loading PyTorch model.', message: /^Unknown type name/, url: 'https://github.com/lutzroeder/netron/issues/969' }, + { name: 'Error loading ONNX model.', message: /^File format is not onnx\.ModelProto \(Unexpected end of file\)\./, url: 'https://github.com/lutzroeder/netron/issues/1155' }, + { name: 'Error loading ONNX model.', message: /^File format is not onnx\.ModelProto \(Cannot read properties of undefined \(reading 'ModelProto'\)\)\./, url: 'https://github.com/lutzroeder/netron/issues/1156' }, + { name: 'Error loading ONNX model.', message: /^File format is not onnx\.ModelProto/, url: 'https://github.com/lutzroeder/netron/issues/549' }, + { name: 'Error loading TensorFlow Lite model.', message: /^Offset is outside the bounds of the DataView/, url: 'https://github.com/lutzroeder/netron/issues/563' }, + { name: 'Error loading TensorRT model.', message: /^Invalid file content. File contains undocumented TensorRT engine data\./, url: 'https://github.com/lutzroeder/netron/issues/725' } + ]; + const known = knowns.find((known) => (known.name.length === 0 || known.name === err.name) && err.message.match(known.message)); + const url = known && known.url ? known.url : null; + const message = err.message; + name = name || err.name; + const button = await this._host.error(name, message, url === null); + if (button === 0 && (url || this._host.type == 'Electron')) { + this._host.openURL(url || `${this._host.environment('repository')}/issues`); + } + this.show(screen !== undefined ? screen : 'welcome'); + } + + accept(file, size) { + return this._modelFactoryService.accept(file, size); + } + + async open(context) { + this._sidebar.close(); + await this._timeout(2); + try { + const model = await this._modelFactoryService.open(context); + const format = []; + if (model.format) { + format.push(model.format); + } + if (model.producer) { + format.push(`(${model.producer})`); + } + if (format.length > 0) { + this._host.event('model_open', { + model_format: model.format || '', + model_producer: model.producer || '' + }); + } + await this._timeout(20); + const graphs = Array.isArray(model.graphs) && model.graphs.length > 0 ? [ model.graphs[0] ] : []; + return await this._updateGraph(model, graphs); + } catch (error) { + if (error && context.identifier) { + error.context = context.identifier; + } + throw error; + } + } + + async _updateActiveGraph(graph) { + this._sidebar.close(); + if (this._model) { + const model = this._model; + this.show('welcome spinner'); + await this._timeout(200); + try { + await this._updateGraph(model, [ graph ]); + } catch (error) { + if (error) { + this.error(error, 'Graph update failed.', 'welcome'); + } + } + } + } + + get activeGraph() { + return Array.isArray(this._graphs) && this._graphs.length > 0 ? this._graphs[0] : null; + } + + async _updateGraph(model, graphs) { + await this._timeout(100); + const graph = Array.isArray(graphs) && graphs.length > 0 ? graphs[0] : null; + if (graph && graph != this._graphs[0]) { + const nodes = graph.nodes; + if (nodes.length > 2048) { + if (!this._host.confirm('Large model detected.', 'This graph contains a large number of nodes and might take a long time to render. Do you want to continue?')) { + this._host.event('graph_view', { + graph_node_count: nodes.length, + graph_skip: 1 } + ); + this.show(null); + return null; + } + } + } + const update = async (model, graphs) => { + this._model = model; + this._graphs = graphs; + await this.renderGraph(this._model, this.activeGraph, this._options); + if (this._page !== 'default') { + this.show('default'); + } + const path = this._element('toolbar-path'); + const back = this._element('toolbar-path-back-button'); + while (path.children.length > 1) { + path.removeChild(path.lastElementChild); + } + if (this._graphs.length <= 1) { + back.style.opacity = 0; + } else { + back.style.opacity = 1; + const last = this._graphs.length - 2; + const count = Math.min(2, last); + if (count < last) { + const element = this._host.document.createElement('button'); + element.setAttribute('class', 'toolbar-path-name-button'); + element.innerHTML = '…'; + path.appendChild(element); + } + for (let i = count; i >= 0; i--) { + const graph = this._graphs[i]; + const element = this._host.document.createElement('button'); + element.setAttribute('class', 'toolbar-path-name-button'); + element.addEventListener('click', () => { + if (i > 0) { + this._graphs = this._graphs.slice(i); + this._updateGraph(this._model, this._graphs); + } + this.showDefinition(this._graphs[0]); + }); + const name = graph && graph.name ? graph.name : ''; + if (name.length > 24) { + element.setAttribute('title', name); + element.innerHTML = `…${name.substring(name.length - 24, name.length)}`; + } else { + element.removeAttribute('title'); + element.innerHTML = name; + } + path.appendChild(element); + } + } + }; + const lastModel = this._model; + const lastGraphs = this._graphs; + try { + await update(model, graphs); + return this._model; + } catch (error) { + await update(lastModel, lastGraphs); + throw error; + } + } + + pushGraph(graph) { + if (graph && graph !== this.activeGraph && Array.isArray(graph.nodes)) { + this._sidebar.close(); + this._updateGraph(this._model, [ graph ].concat(this._graphs)); + } + } + + popGraph() { + if (this._graphs.length > 1) { + this._sidebar.close(); + return this._updateGraph(this._model, this._graphs.slice(1)); + } + return null; + } + + async renderGraph(model, graph, options) { + this._graph = null; + const canvas = this._element('canvas'); + while (canvas.lastChild) { + canvas.removeChild(canvas.lastChild); + } + if (!graph) { + return; + } + this._zoom = 1; + const groups = graph.groups; + const nodes = graph.nodes; + this._host.event('graph_view', { + graph_node_count: nodes.length, + graph_skip: 0 + }); + const layout = {}; + layout.nodesep = 20; + layout.ranksep = 20; + const rotate = graph.nodes.every((node) => node.inputs.filter((input) => input.value.every((argument) => !argument.initializer)).length === 0 && node.outputs.length === 0); + const horizontal = rotate ? options.direction === 'vertical' : options.direction !== 'vertical'; + if (horizontal) { + layout.rankdir = "LR"; + } + if (nodes.length > 3000) { + layout.ranker = 'longest-path'; + } + const viewGraph = new view.Graph(this, model, options, groups, layout); + viewGraph.add(graph); + // Workaround for Safari background drag/zoom issue: + // https://stackoverflow.com/questions/40887193/d3-js-zoom-is-not-working-with-mousewheel-in-safari + const background = this._host.document.createElementNS('http://www.w3.org/2000/svg', 'rect'); + background.setAttribute('id', 'background'); + background.setAttribute('fill', 'none'); + background.setAttribute('pointer-events', 'all'); + canvas.appendChild(background); + const origin = this._host.document.createElementNS('http://www.w3.org/2000/svg', 'g'); + origin.setAttribute('id', 'origin'); + canvas.appendChild(origin); + viewGraph.build(this._host.document, origin); + await this._timeout(20); + viewGraph.measure(); + viewGraph.layout(); + viewGraph.update(); + const elements = Array.from(canvas.getElementsByClassName('graph-input') || []); + if (elements.length === 0) { + const nodeElements = Array.from(canvas.getElementsByClassName('graph-node') || []); + if (nodeElements.length > 0) { + elements.push(nodeElements[0]); + } + } + const size = canvas.getBBox(); + const margin = 100; + const width = Math.ceil(margin + size.width + margin); + const height = Math.ceil(margin + size.height + margin); + origin.setAttribute('transform', `translate(${margin}, ${margin}) scale(1)`); + background.setAttribute('width', width); + background.setAttribute('height', height); + this._width = width; + this._height = height; + delete this._scrollLeft; + delete this._scrollRight; + canvas.setAttribute('viewBox', `0 0 ${width} ${height}`); + canvas.setAttribute('width', width); + canvas.setAttribute('height', height); + this._zoom = 1; + this._updateZoom(this._zoom); + const container = this._element('graph'); + if (elements && elements.length > 0) { + // Center view based on input elements + const xs = []; + const ys = []; + for (let i = 0; i < elements.length; i++) { + const element = elements[i]; + const rect = element.getBoundingClientRect(); + xs.push(rect.left + (rect.width / 2)); + ys.push(rect.top + (rect.height / 2)); + } + let [x] = xs; + const [y] = ys; + if (ys.every((y) => y === ys[0])) { + x = xs.reduce((a, b) => a + b, 0) / xs.length; + } + const graphRect = container.getBoundingClientRect(); + const left = (container.scrollLeft + x - graphRect.left) - (graphRect.width / 2); + const top = (container.scrollTop + y - graphRect.top) - (graphRect.height / 2); + container.scrollTo({ left: left, top: top, behavior: 'auto' }); + } else { + const canvasRect = canvas.getBoundingClientRect(); + const graphRect = container.getBoundingClientRect(); + const left = (container.scrollLeft + (canvasRect.width / 2) - graphRect.left) - (graphRect.width / 2); + const top = (container.scrollTop + (canvasRect.height / 2) - graphRect.top) - (graphRect.height / 2); + container.scrollTo({ left: left, top: top, behavior: 'auto' }); + } + this._graph = viewGraph; + } + + applyStyleSheet(element, name) { + let rules = []; + for (const styleSheet of this._host.document.styleSheets) { + if (styleSheet && styleSheet.href && styleSheet.href.endsWith(`/${name}`)) { + rules = styleSheet.cssRules; + break; + } + } + const nodes = element.getElementsByTagName('*'); + for (const node of nodes) { + for (const rule of rules) { + if (node.matches(rule.selectorText)) { + for (const item of rule.style) { + node.style[item] = rule.style[item]; + } + } + } + } + } + + export(file) { + const lastIndex = file.lastIndexOf('.'); + const extension = (lastIndex != -1) ? file.substring(lastIndex + 1).toLowerCase() : 'png'; + if (this.activeGraph && (extension === 'png' || extension === 'svg')) { + const canvas = this._element('canvas'); + const clone = canvas.cloneNode(true); + this.applyStyleSheet(clone, 'grapher.css'); + clone.setAttribute('id', 'export'); + clone.removeAttribute('viewBox'); + clone.removeAttribute('width'); + clone.removeAttribute('height'); + clone.style.removeProperty('opacity'); + clone.style.removeProperty('display'); + clone.style.removeProperty('width'); + clone.style.removeProperty('height'); + const background = clone.querySelector('#background'); + const origin = clone.querySelector('#origin'); + origin.setAttribute('transform', 'translate(0,0) scale(1)'); + background.removeAttribute('width'); + background.removeAttribute('height'); + + const parent = canvas.parentElement; + parent.insertBefore(clone, canvas); + const size = clone.getBBox(); + parent.removeChild(clone); + parent.removeChild(canvas); + parent.appendChild(canvas); + const delta = (Math.min(size.width, size.height) / 2.0) * 0.1; + const width = Math.ceil(delta + size.width + delta); + const height = Math.ceil(delta + size.height + delta); + origin.setAttribute('transform', `translate(${(delta - size.x)}, ${(delta - size.y)}) scale(1)`); + clone.setAttribute('width', width); + clone.setAttribute('height', height); + background.setAttribute('width', width); + background.setAttribute('height', height); + background.setAttribute('fill', '#fff'); + + const data = new XMLSerializer().serializeToString(clone); + + if (extension === 'svg') { + const blob = new Blob([ data ], { type: 'image/svg' }); + this._host.export(file, blob); + } + + if (extension === 'png') { + const image = new Image(); + image.onload = () => { + const max = Math.max(width, height); + const scale = Math.min(24000.0 / max, 2.0); + const canvas = this._host.document.createElement('canvas'); + canvas.width = Math.ceil(width * scale); + canvas.height = Math.ceil(height * scale); + const context = canvas.getContext('2d'); + context.scale(scale, scale); + context.drawImage(image, 0, 0); + canvas.toBlob((blob) => { + if (blob) { + this._host.export(file, blob); + } else { + const error = new Error('Image may be too large to render as PNG.'); + error.name = 'Error exporting image.'; + this._host.exception(error, false); + this._host.error(error.name, error.message); + } + }, 'image/png'); + }; + image.src = `data:image/svg+xml;base64,${this._host.window.btoa(unescape(encodeURIComponent(data)))}`; + } + } + } + + showModelProperties() { + if (this._model) { + try { + const modelSidebar = new view.ModelSidebar(this._host, this._model, this.activeGraph); + modelSidebar.on('update-active-graph', (sender, graph) => { + this._updateActiveGraph(graph); + }); + const content = modelSidebar.render(); + this._sidebar.open(content, 'Model Properties'); + } catch (error) { + if (error) { + error.context = this._model.identifier; + } + this.error(error, 'Error showing model properties.', null); + } + } + } + + showNodeProperties(node) { + if (node) { + try { + if (this._menu) { + this._menu.close(); + } + const nodeSidebar = new view.NodeSidebar(this._host, node); + nodeSidebar.on('show-documentation', (/* sender, e */) => { + this.showDefinition(node.type); + }); + nodeSidebar.on('export-tensor', (sender, tensor) => { + const defaultPath = tensor.name ? tensor.name.split('/').join('_').split(':').join('_').split('.').join('_') : 'tensor'; + this._host.save('NumPy Array', 'npy', defaultPath, (file) => { + try { + let data_type = tensor.type.dataType; + if (data_type === 'boolean') { + data_type = 'bool'; + } + const execution = new python.Execution(); + const bytes = execution.invoke('io.BytesIO', []); + const dtype = execution.invoke('numpy.dtype', [ data_type ]); + const array = execution.invoke('numpy.asarray', [ tensor.value, dtype ]); + execution.invoke('numpy.save', [ bytes, array ]); + bytes.seek(0); + const blob = new Blob([ bytes.read() ], { type: 'application/octet-stream' }); + this._host.export(file, blob); + } catch (error) { + this.error(error, 'Error saving NumPy tensor.', null); + } + }); + }); + nodeSidebar.on('error', (sender, error) => { + if (this._model) { + error.context = this._model.identifier; + } + this.error(error, null, null); + }); + nodeSidebar.on('activate', (sender, value) => { + this._graph.select([ value ]); + }); + nodeSidebar.on('deactivate', () => { + this._graph.select(null); + }); + nodeSidebar.on('select', (sender, value) => { + this.scrollTo(this._graph.activate(value)); + }); + this._sidebar.open(nodeSidebar.render(), 'Node Properties'); + } catch (error) { + if (error) { + error.context = this._model.identifier; + } + this.error(error, 'Error showing node properties.', null); + } + } + } + + showConnectionProperties(value, from, to) { + try { + if (this._menu) { + this._menu.close(); + } + const connectionSidebar = new view.ConnectionSidebar(this._host, value, from, to); + connectionSidebar.on('activate', (sender, value) => { + this._graph.select([ value ]); + }); + connectionSidebar.on('deactivate', () => { + this._graph.select(null); + }); + connectionSidebar.on('select', (sender, value) => { + this.scrollTo(this._graph.activate(value)); + }); + connectionSidebar.on('error', (sender, error) => { + if (this._model) { + error.context = this._model.identifier; + } + this.error(error, null, null); + }); + this._sidebar.open(connectionSidebar.render(), 'Connection Properties'); + } catch (error) { + if (error) { + error.context = this._model.identifier; + } + this.error(error, 'Error showing connection properties.', null); + } + } + + showDefinition(type) { + if (type && (type.description || type.inputs || type.outputs || type.attributes)) { + if (type.nodes && type.nodes.length > 0) { + this.pushGraph(type); + } + const documentationSidebar = new view.DocumentationSidebar(this._host, type); + documentationSidebar.on('navigate', (sender, e) => { + this._host.openURL(e.link); + }); + const title = type.type === 'function' ? 'Function' : 'Documentation'; + this._sidebar.push(documentationSidebar.render(), title); + } + } + + about() { + this._host.document.getElementById('version').innerText = this._host.version; + const handler = () => { + this._host.window.removeEventListener('keydown', handler); + this._host.document.body.removeEventListener('click', handler); + this._host.document.body.classList.remove('about'); + }; + this._host.window.addEventListener('keydown', handler); + this._host.document.body.addEventListener('click', handler); + this._host.document.body.classList.add('about'); + } +}; + +view.Menu = class { + + constructor(host) { + this.items = []; + this._darwin = host.environment('platform') === 'darwin'; + this._document = host.document; + this._stack = []; + this._root = []; + this._buttons = []; + this._accelerators = new Map(); + this._keyCodes = new Map([ + [ 'Backspace', 0x08 ], [ 'Enter', 0x0D ], [ 'Escape', 0x1B ], + [ 'Left', 0x25 ], [ 'Up', 0x26 ], [ 'Right', 0x27 ], [ 'Down', 0x28 ], + [ 'F5', 0x74 ], [ 'F11', 0x7a ] + ]); + this._symbols = new Map([ + [ 'Backspace', '⌫' ], [ 'Enter', '⏎' ], + [ 'Up', '↑' ], [ 'Down', '↓' ], + ]); + this._keydown = (e) => { + this._alt = false; + const code = e.keyCode | (e.altKey ? 0x0200 : 0) | (e.shiftKey ? 0x0100 : 0); + const modifier = (e.ctrlKey ? 0x0400 : 0) | (e.metaKey ? 0x0800 : 0); + if ((code | modifier) === 0x0212) { // Alt + this._alt = true; + } else { + const action = + this._accelerators.get(code | modifier) || + this._accelerators.get(code | ((e.ctrlKey && !this._darwin) || (e.metaKey && this._darwin) ? 0x1000 : 0)); + if (action && this._execute(action)) { + e.preventDefault(); + } else { + const item = this._mnemonic(code | modifier); + if (item && this._activate(item)) { + e.preventDefault(); + } + } + } + }; + this._keyup = (e) => { + const code = e.keyCode; + if (code === 0x0012 && this._alt) { // Alt + switch (this._stack.length) { + case 0: { + if (this.open()) { + e.preventDefault(); + } + break; + } + case 1: { + if (this.close()) { + e.preventDefault(); + } + break; + } + default: { + this._stack = [ this ]; + if (this._root.length > 1) { + this._root = [ this ]; + this._rebuild(); + } + this._update(); + e.preventDefault(); + break; + } + } + } + this._alt = false; + }; + this._next = () => { + const button = this._element.ownerDocument.activeElement; + const index = this._buttons.indexOf(button); + if (index !== -1 && index < this._buttons.length - 1) { + const next = this._buttons[index + 1]; + next.focus(); + } + }; + this._previous = () => { + const button = this._element.ownerDocument.activeElement; + const index = this._buttons.indexOf(button); + if (index > 0) { + const next = this._buttons[index - 1]; + next.focus(); + } + }; + this._push = () => { + const button = this._element.ownerDocument.activeElement; + if (button && button.getAttribute('data-type') === 'group') { + button.click(); + } + }; + this._pop = () => { + if (this._stack.length > 1) { + this._deactivate(); + } + }; + this._exit = () => { + this._deactivate(); + if (this._stack.length === 0) { + this.close(); + } + }; + host.window.addEventListener('keydown', this._keydown); + host.window.addEventListener('keyup', this._keyup); + } + + attach(element, button) { + this._element = element; + button.addEventListener('click', (e) => { + this.toggle(); + e.preventDefault(); + }); + } + + add(value) { + const item = new view.Menu.Command(value); + this.register(item, item.accelerator); + } + + group(label) { + const item = new view.Menu.Group(this, label); + item.identifier = `menu-item-${this.items.length}`; + this.items.push(item); + item.shortcut = this.register(item.accelerator); + return item; + } + + toggle() { + if (this._element.style.opacity >= 1) { + this.close(); + } else { + this._root = [ this ]; + this._stack = [ this ]; + this.open(); + } + } + + open() { + if (this._element) { + if (this._stack.length === 0) { + this.toggle(); + this._stack = [ this ]; + } + this._rebuild(); + this._update(); + this.register(this._exit, 'Escape'); + this.register(this._previous, 'Up'); + this.register(this._next, 'Down'); + this.register(this._pop, 'Left'); + this.register(this._push, 'Right'); + } + } + + close() { + if (this._element) { + this.unregister(this._exit); + this.unregister(this._previous); + this.unregister(this._next); + this.unregister(this._pop); + this.unregister(this._push); + this._element.style.opacity = 0; + this._element.style.left = '-17em'; + const button = this._element.ownerDocument.activeElement; + if (this._buttons.indexOf(button) > 0) { + button.blur(); + } + while (this._root.length > 1) { + this._deactivate(); + } + this._stack = []; + } + } + + register(action, accelerator) { + let shortcut = ''; + if (accelerator) { + let shift = false; + let alt = false; + let ctrl = false; + let cmd = false; + let cmdOrCtrl = false; + let key = ''; + for (const part of accelerator.split('+')) { + switch (part) { + case 'CmdOrCtrl': cmdOrCtrl = true; break; + case 'Cmd': cmd = true; break; + case 'Ctrl': ctrl = true; break; + case 'Alt': alt = true; break; + case 'Shift': shift = true; break; + default: key = part; break; + } + } + if (key !== '') { + if (this._darwin) { + shortcut += ctrl ? '⌃' : ''; + shortcut += alt ? '⌥' : ''; + shortcut += shift ? '⇧' : ''; + shortcut += cmdOrCtrl || cmd ? '⌘' : ''; + shortcut += this._symbols.has(key) ? this._symbols.get(key) : key; + } else { + shortcut += cmdOrCtrl || ctrl ? 'Ctrl+' : ''; + shortcut += alt ? 'Alt+' : ''; + shortcut += shift ? 'Shift+' : ''; + shortcut += key; + } + let code = (cmdOrCtrl ? 0x1000 : 0) | (cmd ? 0x0800 : 0) | (ctrl ? 0x0400 : 0) | (alt ? 0x0200 : 0 | shift ? 0x0100 : 0); + code |= this._keyCodes.has(key) ? this._keyCodes.get(key) : key.charCodeAt(0); + this._accelerators.set(code, action); + } + } + return shortcut; + } + + unregister(action) { + this._accelerators = new Map(Array.from(this._accelerators.entries()).filter(([, value]) => value !== action)); + } + + _execute(action) { + if (typeof action === 'function') { + action(); + return true; + } + switch (action ? action.type : null) { + case 'group': { + while (this._stack.length > this._root.length) { + this._stack.pop(); + } + this._root.push({ items: [ action ] }); + this._stack.push(action); + this._rebuild(); + this._update(); + return true; + } + case 'command': { + this.close(); + setTimeout(() => action.execute(), 10); + return true; + } + default: { + return false; + } + } + } + + _mnemonic(code) { + const key = /[a-zA-Z0-9]/.test(String.fromCharCode(code & 0x00FF)); + const modifier = (code & 0xFF00) !== 0; + const alt = (code & 0xFF00) === 0x0200; + if (alt && key) { + this.open(); + } + if (this._stack.length > 0 && key && (alt || !modifier)) { + const key = String.fromCharCode(code & 0x00FF); + const group = this._stack.length > 0 ? this._stack[this._stack.length - 1] : this; + const item = group.items.find((item) => key === item.mnemonic && (item.type === 'group' || item.type === 'command') && item.enabled); + if (item) { + return item; + } + } + return null; + } + + _activate(item) { + switch (item ? item.type : null) { + case 'group': { + this._stack.push(item); + this._rebuild(); + this._update(); + return true; + } + case 'command': { + return this._execute(item); + } + default: { + return false; + } + } + } + + _deactivate() { + if (this._root.length > 1) { + this._root.pop(); + const group = this._stack.pop(); + this._rebuild(); + this._update(); + if (group) { + const button = this._buttons.find((button) => button.getAttribute('id') === group.identifier); + if (button) { + button.focus(); + } + } + } else if (this._stack.length > 0) { + this._stack.pop(); + this._update(); + } + } + + _label(item, mnemonic) { + delete item.mnemonic; + const value = item.label; + if (value) { + const index = value.indexOf('&'); + if (index !== -1) { + if (mnemonic) { + item.mnemonic = value[index + 1].toUpperCase(); + return `${value.substring(0, index)}${value[index + 1]}${value.substring(index + 2)}`; + } + return value.substring(0, index) + value.substring(index + 1); + } + } + return value || ''; + } + + _rebuild() { + this._element.innerHTML = ''; + const root = this._root[this._root.length - 1]; + for (const group of root.items) { + const container = this._document.createElement('div'); + container.setAttribute('id', group.identifier); + container.setAttribute('class', 'menu-group'); + container.innerHTML = ""; + for (const item of group.items) { + switch (item.type) { + case 'group': + case 'command': { + const button = this._document.createElement('button'); + button.setAttribute('class', 'menu-command'); + button.setAttribute('id', item.identifier); + button.setAttribute('data-type', item.type); + button.addEventListener('mouseenter', () => button.focus()); + button.addEventListener('click', () => this._execute(item)); + const accelerator = this._document.createElement('span'); + accelerator.setAttribute('class', 'menu-shortcut'); + if (item.type === 'group') { + accelerator.innerHTML = '❯'; + } else if (item.shortcut) { + accelerator.innerHTML = item.shortcut; + } + button.appendChild(accelerator); + const content = this._document.createElement('span'); + content.setAttribute('class', 'menu-label'); + button.appendChild(content); + container.appendChild(button); + break; + } + case 'separator': { + const element = this._document.createElement('div'); + element.setAttribute('class', 'menu-separator'); + element.setAttribute('id', item.identifier); + container.appendChild(element); + break; + } + default: { + break; + } + } + } + this._element.appendChild(container); + } + this._element.style.opacity = 1.0; + this._element.style.left = '0px'; + if (this._root.length > 1) { + this._element.style.width = 'auto'; + this._element.style.maxWidth = '60%'; + } else { + this._element.style.removeProperty('width'); + this._element.style.maxWidth = 'auto'; + } + } + + _update() { + this._buttons = []; + const selected = this._stack.length > 0 ? this._stack[this._stack.length - 1] : null; + const root = this._root[this._root.length - 1]; + for (const group of root.items) { + let visible = false; + let block = false; + const active = this._stack.length <= 1 || this._stack[1] === group; + const container = this._document.getElementById(group.identifier); + container.childNodes[0].innerHTML = this._label(group, this === selected); + for (const item of group.items) { + switch (item.type) { + case 'group': + case 'command': { + const label = this._label(item, group === selected); + const button = this._document.getElementById(item.identifier); + button.childNodes[1].innerHTML = label; + if (item.enabled) { + button.removeAttribute('disabled'); + button.style.display = 'block'; + visible = true; + block = true; + if (active) { + this._buttons.push(button); + } + } else { + button.setAttribute('disabled', ''); + button.style.display = 'none'; + } + break; + } + case 'separator': { + const element = this._document.getElementById(item.identifier); + element.style.display = block ? 'block' : 'none'; + block = false; + break; + } + default: { + break; + } + } + } + for (let i = group.items.length - 1; i >= 0; i--) { + const item = group.items[i]; + if ((item.type === 'group' || item.type === 'command') && item.enabled) { + break; + } else if (item.type === 'separator') { + const element = this._document.getElementById(item.identifier); + element.style.display = 'none'; + } + } + if (!visible) { + container.style.display = 'none'; + } + container.style.opacity = active ? 1 : 0; + } + const button = this._element.ownerDocument.activeElement; + const index = this._buttons.indexOf(button); + if (index === -1 && this._buttons.length > 0) { + this._buttons[0].focus(); + } + } +}; + +view.Menu.Group = class { + + constructor(parent, label) { + this.type = 'group'; + this.parent = parent; + this.label = label; + this.items = []; + } + + get enabled() { + return this.items.some((item) => item.enabled); + } + + add(value) { + const item = Object.keys(value).length > 0 ? new view.Menu.Command(value) : new view.Menu.Separator(); + item.identifier = `${this.identifier}-${this.items.length}`; + this.items.push(item); + item.shortcut = this.parent.register(item, item.accelerator); + } + + group(label) { + const item = new view.Menu.Group(this, label); + item.identifier = `${this.identifier}-${this.items.length}`; + this.items.push(item); + item.shortcut = this.parent.register(item, item.accelerator); + return item; + } + + clear() { + for (const item of this.items) { + if (item.clear) { + item.clear(); + } + this.parent.unregister(item); + } + this.items = []; + } + + register(item, accelerator) { + return this.parent.register(item, accelerator); + } + + unregister(item) { + this.parent.unregister(item); + } +}; + +view.Menu.Command = class { + + constructor(item) { + this.type = 'command'; + this.accelerator = item.accelerator; + this._label = item.label; + this._enabled = item.enabled; + this._execute = item.execute; + } + + get label() { + return typeof this._label === 'function' ? this._label() : this._label; + } + + get enabled() { + return this._enabled ? this._enabled() : true; + } + + execute() { + if (this._execute && this.enabled) { + this._execute(); + } + } +}; + +view.Menu.Separator = class { + + constructor() { + this.type = 'separator'; + this.enabled = false; + } +}; + +view.Graph = class extends grapher.Graph { + + constructor(view, model, options, compound, layout) { + super(compound, layout); + this.view = view; + this.model = model; + this.options = options; + this._nodeKey = 0; + this._values = new Map(); + this._table = new Map(); + this._selection = new Set(); + } + + createNode(node, type) { + if (type) { + const value = new view.Node(this, { type: type }); + value.name = (this._nodeKey++).toString(); + this._table.set(type, value); + return value; + } + const value = new view.Node(this, node); + value.name = (this._nodeKey++).toString(); + this._table.set(node, value); + return value; + } + + createInput(input) { + const value = new view.Input(this, input); + value.name = (this._nodeKey++).toString(); + this._table.set(input, value); + return value; + } + + createOutput(output) { + const value = new view.Output(this, output); + value.name = (this._nodeKey++).toString(); + this._table.set(output, value); + return value; + } + + createValue(argument) { + const name = argument.name; + if (!this._values.has(name)) { + const value = new view.Value(this, argument); + this._values.set(name, value); + this._table.set(argument, value); + } else { + // duplicate argument name + const value = this._values.get(name); + this._table.set(argument, value); + } + return this._values.get(name); + } + + add(graph) { + const clusters = new Set(); + const clusterParentMap = new Map(); + const groups = graph.groups; + if (groups) { + for (const node of graph.nodes) { + if (node.group) { + const path = node.group.split('/'); + while (path.length > 0) { + const name = path.join('/'); + path.pop(); + clusterParentMap.set(name, path.join('/')); + } + } + } + } + for (const input of graph.inputs) { + const viewInput = this.createInput(input); + this.setNode(viewInput); + for (const value of input.value) { + this.createValue(value).from = viewInput; + } + } + for (const node of graph.nodes) { + const viewNode = this.createNode(node); + this.setNode(viewNode); + const inputs = node.inputs; + for (const input of inputs) { + for (const value of input.value) { + if (value.name != '' && !value.initializer) { + this.createValue(value).to.push(viewNode); + } + } + } + let outputs = node.outputs; + if (node.chain && node.chain.length > 0) { + const chainOutputs = node.chain[node.chain.length - 1].outputs; + if (chainOutputs.length > 0) { + outputs = chainOutputs; + } + } + for (const output of outputs) { + for (const value of output.value) { + if (!value) { + const error = new view.Error('Invalid null argument.'); + error.context = this.model.identifier; + throw error; + } + if (value.name != '') { + this.createValue(value).from = viewNode; + } + } + } + + if (node.controlDependencies && node.controlDependencies.length > 0) { + for (const value of node.controlDependencies) { + this.createValue(value).controlDependency(viewNode); + } + } + const createCluster = (name) => { + if (!clusters.has(name)) { + this.setNode({ name: name, rx: 5, ry: 5 }); + clusters.add(name); + const parent = clusterParentMap.get(name); + if (parent) { + createCluster(parent); + this.setParent(name, parent); + } + } + }; + if (groups) { + let groupName = node.group; + if (groupName && groupName.length > 0) { + if (!clusterParentMap.has(groupName)) { + const lastIndex = groupName.lastIndexOf('/'); + if (lastIndex != -1) { + groupName = groupName.substring(0, lastIndex); + if (!clusterParentMap.has(groupName)) { + groupName = null; + } + } else { + groupName = null; + } + } + if (groupName) { + createCluster(`${groupName}\ngroup`); + this.setParent(viewNode.name, `${groupName}\ngroup`); + } + } + } + } + for (const output of graph.outputs) { + const viewOutput = this.createOutput(output); + this.setNode(viewOutput); + for (const value of output.value) { + this.createValue(value).to.push(viewOutput); + } + } + } + + build(document, origin) { + for (const value of this._values.values()) { + value.build(); + } + super.build(document, origin); + } + + select(selection) { + if (this._selection.size > 0) { + for (const element of this._selection) { + element.deselect(); + } + this._selection.clear(); + } + if (selection) { + let array = []; + for (const value of selection) { + if (this._table.has(value)) { + const element = this._table.get(value); + array = array.concat(element.select()); + this._selection.add(element); + } + } + return array; + } + return null; + } + + activate(value) { + if (this._table.has(value)) { + this.select(null); + const element = this._table.get(value); + element.activate(); + return this.select([ value ]); + } + return []; + } + + focus(selection) { + for (const value of selection) { + const element = this._table.get(value); + if (element && !this._selection.has(element)) { + element.select(); + } + } + } + + blur(selection) { + for (const value of selection) { + const element = this._table.get(value); + if (element && !this._selection.has(element)) { + element.deselect(); + } + } + } +}; + +view.Node = class extends grapher.Node { + + constructor(context, value) { + super(); + this.context = context; + this.value = value; + view.Node.counter = view.Node.counter || 0; + this.id = `node-${value.name ? `name-${value.name}` : `id-${(view.Node.counter++)}`}`; + this._add(this.value); + } + + get class() { + return 'graph-node'; + } + + get inputs() { + return this.value.inputs; + } + + get outputs() { + return this.value.outputs; + } + + _add(node) { + const options = this.context.options; + const header = this.header(); + const styles = [ 'node-item-type' ]; + const type = node.type; + const category = type && type.category ? type.category : ''; + if (category) { + styles.push(`node-item-type-${category.toLowerCase()}`); + } + if (typeof type.name !== 'string' || !type.name.split) { // #416 + const error = new view.Error(`Unsupported node type '${JSON.stringify(type.name)}'.`); + if (this.context.model && this.context.model.identifier) { + error.context = this.context.model.identifier; + } + throw error; + } + const content = options.names && (node.name || node.location) ? (node.name || node.location) : type.name.split('.').pop(); + const tooltip = options.names && (node.name || node.location) ? type.name : (node.name || node.location); + const title = header.add(null, styles, content, tooltip); + title.on('click', () => { + this.context.activate(node); + }); + if (Array.isArray(node.type.nodes) && node.type.nodes.length > 0) { + const definition = header.add(null, styles, '\u0192', 'Show Function Definition'); + definition.on('click', () => this.context.view.pushGraph(node.type)); + } + if (Array.isArray(node.nodes)) { + // this._expand = header.add(null, styles, '+', null); + // this._expand.on('click', () => this.toggle()); + } + const initializers = []; + let hiddenInitializers = false; + if (options.weights) { + if (Array.isArray(node.inputs)) { + for (const input of node.inputs) { + if (input.visible !== false && input.value.length === 1 && input.value[0].initializer != null) { + initializers.push(input); + } + if ((input.visible === false || input.value.length > 1) && + input.value.some((argument) => argument.initializer != null)) { + hiddenInitializers = true; + } + } + } + } + const objects = []; + const attributes = []; + if (Array.isArray(node.attributes) && node.attributes.length > 0) { + for (const attribute of node.attributes) { + switch (attribute.type) { + case 'graph': + case 'object': + case 'object[]': + case 'function': + case 'function[]': { + objects.push(attribute); + break; + } + default: { + if (options.attributes && attribute.visible !== false) { + attributes.push(attribute); + } + } + } + } + attributes.sort((a, b) => a.name.toUpperCase().localeCompare(b.name.toUpperCase())); + } + if (initializers.length > 0 || hiddenInitializers || attributes.length > 0 || objects.length > 0) { + const list = this.list(); + list.on('click', () => this.context.activate(node)); + for (const argument of initializers) { + const [value] = argument.value; + const type = value.type; + let shape = ''; + let separator = ''; + if (type && type.shape && type.shape.dimensions && Array.isArray(type.shape.dimensions)) { + shape = `\u3008${type.shape.dimensions.map((d) => (d !== null && d !== undefined) ? d : '?').join('\u00D7')}\u3009`; + if (type.shape.dimensions.length === 0 && value.initializer) { + try { + const initializer = value.initializer; + const tensor = new view.Tensor(initializer); + const encoding = tensor.encoding; + if ((encoding === '<' || encoding === '>' || encoding === '|') && !tensor.empty && tensor.type.dataType !== '?') { + shape = tensor.toString(); + if (shape && shape.length > 10) { + shape = `${shape.substring(0, 10)}\u2026`; + } + separator = ' = '; + } + } catch (err) { + let type = '?'; + try { + type = value.initializer.type.toString(); + } catch (error) { + // continue regardless of error + } + const error = new view.Error(`Failed to render tensor of type '${type}' (${err.message}).`); + if (this.context.view.model && this.context.view.model.identifier) { + error.context = this.context.view.model.identifier; + } + throw error; + } + } + } + list.add(argument.name, shape, type ? type.toString() : '', separator); + } + if (hiddenInitializers) { + list.add('\u3008\u2026\u3009', '', null, ''); + } + for (const attribute of attributes) { + if (attribute.visible !== false) { + let value = new view.Formatter(attribute.value, attribute.type).toString(); + if (value && value.length > 25) { + value = `${value.substring(0, 25)}\u2026`; + } + list.add(attribute.name, value, attribute.type, ' = '); + } + } + for (const attribute of objects) { + if (attribute.type === 'graph') { + const node = this.context.createNode(null, attribute.value); + list.add(attribute.name, node, '', ''); + } + if (attribute.type === 'function' || attribute.type === 'object') { + const node = this.context.createNode(attribute.value); + list.add(attribute.name, node, '', ''); + } + if (attribute.type === 'function[]' || attribute.type === 'object[]') { + const nodes = attribute.value.map((value) => this.context.createNode(value)); + list.add(attribute.name, nodes, '', ''); + } + } + } + if (Array.isArray(node.nodes) && node.nodes.length > 0) { + // this.canvas = this.canvas(); + } + if (Array.isArray(node.chain) && node.chain.length > 0) { + for (const innerNode of node.chain) { + this.context.createNode(innerNode); + this._add(innerNode); + } + } + if (node.inner) { + this.context.createNode(node.inner); + this._add(node.inner); + } + } + + toggle() { + this._expand.content = '-'; + this._graph = new view.Graph(this.context.view, this.context.model, this.context.options, false, {}); + this._graph.add(this.value); + // const document = this.element.ownerDocument; + // const parent = this.element.parentElement; + // this._graph.build(document, parent); + // this._graph.update(); + this.canvas.width = 300; + this.canvas.height = 300; + this.layout(); + this.context.update(); + } + + activate() { + this.context.view.showNodeProperties(this.value); + } + + edge(to) { + this._edges = this._edges || new Map(); + if (!this._edges.has(to)) { + this._edges.set(to, new view.Edge(this, to)); + } + return this._edges.get(to); + } +}; + +view.Input = class extends grapher.Node { + + constructor(context, value) { + super(); + this.context = context; + this.value = value; + view.Input.counter = view.Input.counter || 0; + const types = value.value.map((argument) => argument.type || '').join('\n'); + let name = value.name || ''; + if (name.length > 16) { + name = name.split('/').pop(); + } + const header = this.header(); + const title = header.add(null, [ 'graph-item-input' ], name, types); + title.on('click', () => this.context.view.showModelProperties()); + this.id = `input-${name ? `name-${name}` : `id-${(view.Input.counter++)}`}`; + } + + get class() { + return 'graph-input'; + } + + get inputs() { + return []; + } + + get outputs() { + return [ this.value ]; + } + + activate() { + this.context.view.showModelProperties(); + } + + edge(to) { + this._edges = this._edges || new Map(); + if (!this._edges.has(to)) { + this._edges.set(to, new view.Edge(this, to)); + } + return this._edges.get(to); + } +}; + +view.Output = class extends grapher.Node { + + constructor(context, value) { + super(); + this.context = context; + this.value = value; + const types = value.value.map((argument) => argument.type || '').join('\n'); + let name = value.name || ''; + if (name.length > 16) { + name = name.split('/').pop(); + } + const header = this.header(); + const title = header.add(null, [ 'graph-item-output' ], name, types); + title.on('click', () => this.context.view.showModelProperties()); + } + + get inputs() { + return [ this.value ]; + } + + get outputs() { + return []; + } + + activate() { + this.context.view.showModelProperties(); + } +}; + +view.Value = class { + + constructor(context, argument) { + this.context = context; + this.value = argument; + this.from = null; + this.to = []; + } + + controlDependency(node) { + this._controlDependencies = this._controlDependencies || new Set(); + this._controlDependencies.add(this.to.length); + this.to.push(node); + } + + build() { + this._edges = this._edges || []; + if (this.from && Array.isArray(this.to)) { + for (let i = 0; i < this.to.length; i++) { + const to = this.to[i]; + let content = ''; + const type = this.value.type; + if (type && + type.shape && + type.shape.dimensions && + type.shape.dimensions.length > 0 && + type.shape.dimensions.every((dim) => !dim || Number.isInteger(dim) || dim instanceof base.Int64 || (typeof dim === 'string'))) { + content = type.shape.dimensions.map((dim) => (dim !== null && dim !== undefined) ? dim : '?').join('\u00D7'); + content = content.length > 16 ? '' : content; + } + if (this.context.options.names) { + content = this.value.name.split('\n').shift(); // custom argument id + } + const edge = this.from.edge(to); + if (!edge.value) { + edge.value = this; + if (content) { + edge.label = content; + } + edge.id = `edge-${this.value.name}`; + if (this._controlDependencies && this._controlDependencies.has(i)) { + edge.class = 'edge-path-control-dependency'; + } + } + this.context.setEdge(edge); + this._edges.push(edge); + } + } + } + + select() { + let array = []; + if (Array.isArray(this._edges)) { + for (const edge of this._edges) { + array = array.concat(edge.select()); + } + } + return array; + } + + deselect() { + if (Array.isArray(this._edges)) { + for (const edge of this._edges) { + edge.deselect(); + } + } + } + + activate() { + if (this.value && this.from && Array.isArray(this.to)) { + const value = this.value; + const from = this.from.value; + const to = this.to.map((node) => node.value); + this.context.view.showConnectionProperties(value, from, to); + } + } +}; + +view.Edge = class extends grapher.Edge { + + constructor(from, to) { + super(from, to); + this.v = from.name; + this.w = to.name; + } + + get minlen() { + if (this.from.inputs.every((argument) => argument.value.every((value) => value.initializer))) { + return 2; + } + return 1; + } + + emit(event) { + switch (event) { + case 'pointerover': { + this.value.context.focus([ this.value.value ]); + break; + } + case 'pointerleave': { + this.value.context.blur([ this.value.value ]); + break; + } + case 'click': { + this.value.context.activate(this.value.value); + break; + } + default: + break; + } + } +}; + +view.Sidebar = class { + + constructor(host) { + this._host = host; + this._stack = []; + const pop = () => this._update(this._stack.slice(0, -1)); + this._closeSidebarHandler = () => pop(); + this._closeSidebarKeyDownHandler = (e) => { + if (e.keyCode == 27) { + e.preventDefault(); + pop(); + } + }; + const sidebar = this._element('sidebar'); + sidebar.addEventListener('transitionend', (event) => { + if (event.propertyName === 'opacity' && sidebar.style.opacity === '0') { + const content = this._element('sidebar-content'); + content.innerHTML = ''; + } + }); + } + + _element(id) { + return this._host.document.getElementById(id); + } + + open(content, title) { + this._update([ { title: title, content: content } ]); + } + + close() { + this._update([]); + } + + push(content, title) { + this._update(this._stack.concat({ title: title, content: content })); + } + + _update(stack) { + const sidebar = this._element('sidebar'); + const container = this._element('graph'); + const closeButton = this._element('sidebar-closebutton'); + closeButton.removeEventListener('click', this._closeSidebarHandler); + this._host.document.removeEventListener('keydown', this._closeSidebarKeyDownHandler); + if (stack) { + this._stack = stack; + } else if (this._stack.length > 0) { + this._stack.pop(); + } + if (this._stack.length > 0) { + const item = this._stack[this._stack.length - 1]; + this._element('sidebar-title').innerHTML = item.title || ''; + closeButton.addEventListener('click', this._closeSidebarHandler); + const content = this._element('sidebar-content'); + if (typeof item.content == 'string') { + content.innerHTML = item.content; + } else if (item.content instanceof Array) { + content.innerHTML = ''; + for (const element of item.content) { + content.appendChild(element); + } + } else { + content.innerHTML = ''; + content.appendChild(item.content); + } + sidebar.style.width = 'min(calc(100% * 0.6), 42em)'; + sidebar.style.right = 0; + sidebar.style.opacity = 1; + this._host.document.addEventListener('keydown', this._closeSidebarKeyDownHandler); + container.style.width = 'max(40vw, calc(100vw - 42em))'; + } else { + sidebar.style.right = 'calc(0px - min(calc(100% * 0.6), 42em))'; + sidebar.style.opacity = 0; + container.style.width = '100%'; + container.focus(); + } + } +}; + +view.Control = class { + + constructor(host) { + this._host = host; + } + + createElement(tagName, className) { + const element = this._host.document.createElement(tagName); + if (className) { + element.setAttribute('class', className); + } + return element; + } + + on(event, callback) { + this._events = this._events || {}; + this._events[event] = this._events[event] || []; + this._events[event].push(callback); + } + + emit(event, data) { + if (this._events && this._events[event]) { + for (const callback of this._events[event]) { + callback(this, data); + } + } + } +}; + +view.ObjectSidebar = class extends view.Control { + + constructor(host) { + super(host); + this._container = this.createElement('div', 'sidebar-object'); + } + + add(name, item) { + const entry = new view.NameValueView(this._host, name, item); + const element = entry.render(); + this._container.appendChild(element); + } + + addProperty(name, value, style) { + const item = new view.ValueTextView(this._host, value, style); + this.add(name, item); + return item; + } + + addHeader(title) { + const element = this.createElement('div', 'sidebar-header'); + element.innerText = title; + this._container.appendChild(element); + } + + render() { + return [ this._container ]; + } +}; + +view.NodeSidebar = class extends view.ObjectSidebar { + + constructor(host, node) { + super(host); + this._node = node; + this._attributes = []; + this._inputs = []; + this._outputs = []; + if (node.type) { + const type = node.type; + const item = this.addProperty('type', node.type.identifier || node.type.name); + if (type && (type.description || type.inputs || type.outputs || type.attributes)) { + item.action(type.nodes ? '\u0192': '?', () => { + this.emit('show-documentation', null); + }); + } + const module = node.type.module; + const version = node.type.version; + const status = node.type.status; + if (module || version || status) { + const list = [ module, version ? `v${version}` : '', status ]; + this.addProperty('module', list.filter((value) => value).join(' ')); + } + } + if (node.name) { + this.addProperty('name', node.name); + } + if (node.location) { + this.addProperty('location', node.location); + } + if (node.description) { + this.addProperty('description', node.description); + } + if (node.device) { + this.addProperty('device', node.device); + } + const attributes = node.attributes; + if (Array.isArray(attributes) && attributes.length > 0) { + this.addHeader('Attributes'); + attributes.sort((a, b) => { + const au = a.name.toUpperCase(); + const bu = b.name.toUpperCase(); + return (au < bu) ? -1 : (au > bu) ? 1 : 0; + }); + for (const attribute of attributes) { + this._addAttribute(attribute.name, attribute); + } + } + const inputs = node.inputs; + if (Array.isArray(inputs) && inputs.length > 0) { + this.addHeader('Inputs'); + for (const input of inputs) { + this._addInput(input.name, input); + } + } + const outputs = node.outputs; + if (Array.isArray(outputs) && outputs.length > 0) { + this.addHeader('Outputs'); + for (const output of outputs) { + this._addOutput(output.name, output); + } + } + } + + _addAttribute(name, attribute) { + let value = null; + switch (attribute.type) { + case 'tensor': { + value = new view.ValueView(this._host, { type: attribute.value.type, initializer: attribute.value }, ''); + value.on('export-tensor', (sender, value) => this.emit('export-tensor', value)); + value.on('error', (sender, value) => this.emit('error', value)); + break; + } + case 'tensor[]': { + const values = attribute.value.map((value) => { + return { type: value.type, initializer: value }; + }); + value = new view.ArgumentView(this._host, { value: values }, ''); + break; + } + default: { + value = new view.AttributeView(this._host, attribute); + value.on('activate', (sender, graph) => { + this.emit('activate', graph); + }); + break; + } + } + const item = new view.NameValueView(this._host, name, value); + this._attributes.push(item); + this._container.appendChild(item.render()); + } + + _addInput(name, input) { + if (input.value.length > 0) { + const value = new view.ArgumentView(this._host, input); + value.on('export-tensor', (sender, value) => this.emit('export-tensor', value)); + value.on('error', (sender, value) => this.emit('error', value)); + value.on('activate', (sender, value) => this.emit('activate', value)); + value.on('deactivate', (sender, value) => this.emit('deactivate', value)); + value.on('select', (sender, value) => this.emit('select', value)); + const item = new view.NameValueView(this._host, name, value); + this._inputs.push(item); + this._container.appendChild(item.render()); + } + } + + _addOutput(name, output) { + if (output.value.length > 0) { + const value = new view.ArgumentView(this._host, output); + value.on('activate', (sender, value) => this.emit('activate', value)); + value.on('deactivate', (sender, value) => this.emit('deactivate', value)); + value.on('select', (sender, value) => this.emit('select', value)); + const item = new view.NameValueView(this._host, name, value); + this._outputs.push(item); + this._container.appendChild(item.render()); + } + } +}; + +view.NameValueView = class extends view.Control { + + constructor(host, name, value) { + super(host); + this._host = host; + this._name = name; + this._value = value; + const nameElement = this.createElement('div', 'sidebar-item-name'); + const input = this.createElement('input'); + input.setAttribute('type', 'text'); + input.setAttribute('value', name); + input.setAttribute('title', name); + input.setAttribute('readonly', 'true'); + nameElement.appendChild(input); + const valueElement = this.createElement('div', 'sidebar-item-value-list'); + for (const element of value.render()) { + valueElement.appendChild(element); + } + this._element = this.createElement('div', 'sidebar-item'); + this._element.appendChild(nameElement); + this._element.appendChild(valueElement); + } + + get name() { + return this._name; + } + + render() { + return this._element; + } + + toggle() { + this._value.toggle(); + } +}; + +view.SelectView = class extends view.Control { + + constructor(host, values, selected) { + super(); + this._host = host; + this._elements = []; + this._values = values; + + const selectElement = this.createElement('select', 'sidebar-item-select'); + selectElement.addEventListener('change', (e) => { + this.emit('change', this._values[e.target.selectedIndex]); + }); + this._elements.push(selectElement); + + for (const value of values) { + const optionElement = this.createElement('option'); + optionElement.innerText = value.name || ''; + if (value == selected) { + optionElement.setAttribute('selected', 'selected'); + } + selectElement.appendChild(optionElement); + } + } + + render() { + return this._elements; + } +}; + +view.ValueTextView = class extends view.Control { + + constructor(host, value, style) { + super(host); + this._element = this.createElement('div', 'sidebar-item-value'); + if (value) { + const list = Array.isArray(value) ? value : [ value ]; + let className = 'sidebar-item-value-line'; + for (const item of list) { + const line = this.createElement('div', className); + switch (style) { + case 'code': + line.innerHTML = `${item}`; + break; + case 'bold': + line.innerHTML = `${item}`; + break; + default: + line.innerText = item; + break; + } + this._element.appendChild(line); + className = 'sidebar-item-value-line-border'; + } + } + } + + action(text, callback) { + this._action = this.createElement('div', 'sidebar-item-value-expander'); + this._action.innerHTML = text; + this._action.addEventListener('click', () => { + callback(); + }); + this._element.insertBefore(this._action, this._element.childNodes[0]); + } + + render() { + return [ this._element ]; + } + + toggle() { + } +}; + +view.AttributeView = class extends view.Control { + + constructor(host, attribute) { + super(host); + this._attribute = attribute; + this._element = this.createElement('div', 'sidebar-item-value'); + const type = this._attribute.type; + if (type && type !== 'tensor') { + this._expander = this.createElement('div', 'sidebar-item-value-expander'); + this._expander.innerText = '+'; + this._expander.addEventListener('click', () => { + this.toggle(); + }); + this._element.appendChild(this._expander); + } + const value = this._attribute.value; + switch (type) { + case 'graph': { + const line = this.createElement('div', 'sidebar-item-value-line-link'); + line.innerHTML = value.name || ' '; + line.addEventListener('click', () => { + this.emit('activate', value); + }); + this._element.appendChild(line); + break; + } + case 'function': { + const line = this.createElement('div', 'sidebar-item-value-line-link'); + line.innerHTML = value.type.name; + line.addEventListener('click', () => { + this.emit('activate', value); + }); + this._element.appendChild(line); + break; + } + case 'tensor': { + throw new view.Error('Attribute view tensor not implemented.'); + } + default: { + let content = new view.Formatter(value, type).toString(); + if (content && content.length > 1000) { + content = `${content.substring(0, 1000)}\u2026`; + } + if (content && typeof content === 'string') { + content = content.split('<').join('<').split('>').join('>'); + } + const line = this.createElement('div', 'sidebar-item-value-line'); + line.innerHTML = content ? content : ' '; + this._element.appendChild(line); + } + } + } + + render() { + return [ this._element ]; + } + + toggle() { + if (this._expander.innerText == '+') { + this._expander.innerText = '-'; + const type = this._attribute.type; + const value = this._attribute.value; + const content = type == 'tensor' && value && value.type ? value.type.toString() : this._attribute.type; + const typeLine = this.createElement('div', 'sidebar-item-value-line-border'); + typeLine.innerHTML = `type: ` + `${content}`; + this._element.appendChild(typeLine); + const description = this._attribute.description; + if (description) { + const descriptionLine = this.createElement('div', 'sidebar-item-value-line-border'); + descriptionLine.innerHTML = description; + this._element.appendChild(descriptionLine); + } + } else { + this._expander.innerText = '+'; + while (this._element.childElementCount > 2) { + this._element.removeChild(this._element.lastChild); + } + } + } +}; + +view.ArgumentView = class extends view.Control { + + constructor(host, argument) { + super(); + this._argument = argument; + this._elements = []; + this._items = []; + for (const value of argument.value) { + const item = new view.ValueView(host, value); + item.on('export-tensor', (sender, value) => this.emit('export-tensor', value)); + item.on('error', (sender, value) => this.emit('error', value)); + item.on('activate', (sender, value) => this.emit('activate', value)); + item.on('deactivate', (sender, value) => this.emit('deactivate', value)); + item.on('select', (sender, value) => this.emit('select', value)); + this._items.push(item); + for (const element of item.render()) { + this._elements.push(element); + } + } + } + + render() { + return this._elements; + } + + toggle() { + for (const item of this._items) { + item.toggle(); + } + } +}; + +view.ValueView = class extends view.Control { + + constructor(host, value, name) { + super(host); + this._value = value; + this._element = this.createElement('div', 'sidebar-item-value'); + const type = this._value.type; + const initializer = this._value.initializer; + const quantization = this._value.quantization; + const location = this._value.location !== undefined; + if (initializer) { + this._element.classList.add('sidebar-item-value-dark'); + } + if (type || initializer || quantization || location || name !== undefined) { + this._expander = this.createElement('div', 'sidebar-item-value-expander'); + this._expander.innerText = '+'; + this._expander.addEventListener('click', () => { + this.toggle(); + }); + this._element.appendChild(this._expander); + } + const tensor = name !== undefined; + name = this._value.name ? this._value.name.split('\n').shift() : ''; // custom argument id + this._hasId = name && !tensor ? true : false; + this._hasCategory = initializer && initializer.category ? true : false; + if (this._hasId || (!this._hasCategory && !type && !tensor)) { + this._hasId = true; + const nameLine = this.createElement('div', 'sidebar-item-value-line'); + if (typeof name !== 'string') { + throw new Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + nameLine.innerHTML = `name: ${name || ' '}`; + nameLine.addEventListener('pointerenter', () => this.emit('activate', this._value)); + nameLine.addEventListener('pointerleave', () => this.emit('deactivate', this._value)); + if (!initializer) { + nameLine.style.cursor = 'pointer'; + nameLine.addEventListener('click', () => this.emit('select', this._value)); + } + this._element.appendChild(nameLine); + } else if (this._hasCategory) { + this._bold('category', initializer.category); + } else if (type) { + this._code('tensor', type.toString().split('<').join('<').split('>').join('>')); + } + } + + render() { + return [ this._element ]; + } + + toggle() { + if (this._expander) { + if (this._expander.innerText == '+') { + this._expander.innerText = '-'; + const initializer = this._value.initializer; + if (this._hasId && this._hasCategory) { + this._bold('category', initializer.category); + } + let type = null; + let denotation = null; + if (this._value.type) { + type = this._value.type.toString(); + denotation = this._value.type.denotation || null; + } + if (type && (this._hasId || this._hasCategory)) { + this._code('tensor', type.split('<').join('<').split('>').join('>')); + } + if (denotation) { + this._code('denotation', denotation); + } + const description = this._value.description; + if (description) { + const descriptionLine = this.createElement('div', 'sidebar-item-value-line-border'); + descriptionLine.innerHTML = description; + this._element.appendChild(descriptionLine); + } + const quantization = this._value.quantization; + if (quantization) { + if (typeof quantization.type !== 'string') { + throw new view.Error('Unsupported quantization value.'); + } + const line = this.createElement('div', 'sidebar-item-value-line-border'); + const content = [ + "", + "quantization: ", + `${quantization.type}`, + "", + "
",
+                        new view.Quantization(quantization).toString(),
+                        "
" + ]; + line.innerHTML = content.join(''); + this._element.appendChild(line); + } + const location = this._value.location; + if (location !== undefined) { + this._bold('location', location); + } + const layout = this._value.type ? this._value.type.layout : null; + if (layout) { + const layouts = new Map([ + [ 'sparse', 'sparse' ], + [ 'sparse.coo', 'sparse coo' ], + [ 'sparse.csr', 'sparse csr' ], + [ 'sparse.csc', 'sparse csc' ], + [ 'sparse.bsr', 'sparse bsr' ], + [ 'sparse.bsc', 'sparse bsc' ] + ]); + this._bold('layout', layouts.get(layout)); + } + if (initializer) { + this._tensor(initializer); + } + } else { + this._expander.innerText = '+'; + while (this._element.childElementCount > 2) { + this._element.removeChild(this._element.lastChild); + } + } + } + } + + _bold(name, value) { + const line = this.createElement('div'); + line.innerHTML = `${name}: ` + `${value}`; + this._add(line); + } + + _code(name, value) { + const line = this.createElement('div'); + line.innerHTML = `${name}: ` + `${value}`; + this._add(line); + } + + _add(child) { + child.className = this._element.childNodes.length < 2 ? 'sidebar-item-value-line' : 'sidebar-item-value-line-border'; + this._element.appendChild(child); + } + + _tensor(value) { + const contentLine = this.createElement('pre'); + try { + const tensor = new view.Tensor(value); + if (Array.isArray(tensor.stride) && tensor.stride.length > 0) { + this._code('stride', tensor.stride.join(',')); + } + if (tensor.encoding !== '<' && tensor.encoding !== '>' && tensor.encoding !== '|') { + contentLine.innerHTML = `Tensor encoding '${tensor.layout}' is not implemented.`; + } else if (tensor.layout && (tensor.layout !== 'sparse' && tensor.layout !== 'sparse.coo')) { + contentLine.innerHTML = `Tensor layout '${tensor.layout}' is not implemented.`; + } else if (tensor.empty) { + contentLine.innerHTML = 'Tensor data is empty.'; + } else if (tensor.type && tensor.type.dataType === '?') { + contentLine.innerHTML = 'Tensor data type is not defined.'; + } else if (tensor.type && !tensor.type.shape) { + contentLine.innerHTML = 'Tensor shape is not defined.'; + } else { + contentLine.innerHTML = tensor.toString(); + if (this._host.save && + value.type.shape && value.type.shape.dimensions && + value.type.shape.dimensions.length > 0) { + this._saveButton = this.createElement('div', 'sidebar-item-value-expander'); + this._saveButton.innerHTML = '💾'; + this._saveButton.addEventListener('click', () => { + this.emit('export-tensor', tensor); + }); + this._element.appendChild(this._saveButton); + } + } + } catch (err) { + contentLine.innerHTML = err.toString(); + this.emit('error', err); + } + const valueLine = this.createElement('div', 'sidebar-item-value-line-border'); + valueLine.appendChild(contentLine); + this._element.appendChild(valueLine); + } +}; + +view.NodeView = class extends view.Control { + + constructor(host, node) { + super(host); + this._node = node; + this._element = this.createElement('div', 'sidebar-item-value'); + const name = node.name; + const type = node.type ? node.type.name : ''; + if (name && type) { + this._expander = this.createElement('div', 'sidebar-item-value-expander'); + this._expander.innerText = '+'; + this._expander.addEventListener('click', () => { + this.toggle(); + }); + this._element.appendChild(this._expander); + } + if (type) { + const type = node.type.name; + const element = this.createElement('div', 'sidebar-item-value-line'); + element.innerHTML = `node: ${type || ' '}`; + element.addEventListener('pointerenter', () => this.emit('activate', this._node)); + element.addEventListener('pointerleave', () => this.emit('deactivate', this._node)); + element.addEventListener('click', () => this.emit('select', this._node)); + element.style.cursor = 'pointer'; + this._element.appendChild(element); + } else { + const element = this.createElement('div', 'sidebar-item-value-line'); + element.innerHTML = `name: ${name || ' '}`; + element.addEventListener('pointerenter', () => this.emit('activate', this._node)); + element.addEventListener('pointerleave', () => this.emit('deactivate', this._node)); + element.addEventListener('click', () => this.emit('select', this._node)); + element.style.cursor = 'pointer'; + this._element.appendChild(element); + } + } + + render() { + return [ this._element ]; + } + + toggle() { + if (this._expander) { + if (this._expander.innerText == '+') { + this._expander.innerText = '-'; + const name = this._node.name; + const element = this.createElement('div', 'sidebar-item-value-line-border'); + element.innerHTML = `name: ${name}`; + element.addEventListener('pointerenter', () => this.emit('activate', this._node)); + element.addEventListener('pointerleave', () => this.emit('deactivate', this._node)); + element.addEventListener('click', () => this.emit('select', this._node)); + element.style.cursor = 'pointer'; + this._element.appendChild(element); + } else { + this._expander.innerText = '+'; + while (this._element.childElementCount > 2) { + this._element.removeChild(this._element.lastChild); + } + } + } + } +}; + +view.NodeListView = class extends view.Control { + + constructor(host, list) { + super(); + this._host = host; + this._elements = []; + for (const node of list) { + const item = new view.NodeView(host, node); + item.on('activate', (sender, value) => this.emit('activate', value)); + item.on('deactivate', (sender, value) => this.emit('deactivate', value)); + item.on('select', (sender, value) => this.emit('select', value)); + item.toggle(); + for (const element of item.render()) { + this._elements.push(element); + } + } + } + + render() { + return this._elements; + } +}; + +view.ConnectionSidebar = class extends view.ObjectSidebar { + + constructor(host, value, from, to) { + super(host); + this._host = host; + this._value = value; + this._from = from; + this._to = to; + const [name] = value.name.split('\n'); + this.addProperty('name', name); + if (value.type) { + const item = new view.ValueView(this._host, value, ''); + this.add('type', item); + item.toggle(); + } + if (from) { + this.addHeader('Inputs'); + const list = new view.NodeListView(host, [ from ]); + list.on('activate', (sender, value) => this.emit('activate', value)); + list.on('deactivate', (sender, value) => this.emit('deactivate', value)); + list.on('select', (sender, value) => this.emit('select', value)); + const item = new view.NameValueView(this._host, 'from', list); + this._container.appendChild(item.render()); + } + if (Array.isArray(to) && to.length > 0) { + this.addHeader('Outputs'); + const list = new view.NodeListView(this._host, to); + list.on('activate', (sender, value) => this.emit('activate', value)); + list.on('deactivate', (sender, value) => this.emit('deactivate', value)); + list.on('select', (sender, value) => this.emit('select', value)); + const item = new view.NameValueView(this._host, 'to', list); + this._container.appendChild(item.render()); + } + } +}; + +view.ModelSidebar = class extends view.ObjectSidebar { + + constructor(host, model, graph) { + super(host); + this._model = model; + + if (model.format) { + this.addProperty('format', model.format); + } + if (model.producer) { + this.addProperty('producer', model.producer); + } + if (model.name) { + this.addProperty('name', model.name); + } + if (model.version) { + this.addProperty('version', model.version); + } + if (model.description) { + this.addProperty('description', model.description); + } + if (model.domain) { + this.addProperty('domain', model.domain); + } + if (model.imports) { + this.addProperty('imports', model.imports); + } + if (model.runtime) { + this.addProperty('runtime', model.runtime); + } + if (model.metadata) { + for (const [name, value] of Array.from(model.metadata)) { + this.addProperty(name, value); + } + } + const graphs = Array.isArray(model.graphs) ? model.graphs : []; + if (graphs.length === 1 && graphs[0].name) { + this.addProperty('graph', graphs[0].name); + } else if (graphs.length > 1) { + const selector = new view.SelectView(this._host, model.graphs, graph); + selector.on('change', (sender, data) => this.emit('update-active-graph', data)); + this.add('graph', selector); + } + if (graph) { + if (graph.version) { + this.addProperty('version', graph.version); + } + if (graph.type) { + this.addProperty('type', graph.type); + } + if (graph.tags) { + this.addProperty('tags', graph.tags); + } + if (graph.description) { + this.addProperty('description', graph.description); + } + if (Array.isArray(graph.inputs) && graph.inputs.length > 0) { + this.addHeader('Inputs'); + for (const input of graph.inputs) { + this.addArgument(input.name, input); + } + } + if (Array.isArray(graph.outputs) && graph.outputs.length > 0) { + this.addHeader('Outputs'); + for (const output of graph.outputs) { + this.addArgument(output.name, output); + } + } + } + } + + render() { + return [ this._container ]; + } + + addArgument(name, argument) { + const value = new view.ArgumentView(this._host, argument); + value.toggle(); + const item = new view.NameValueView(this._host, name, value); + this._container.appendChild(item.render()); + } +}; + +view.DocumentationSidebar = class extends view.Control { + + constructor(host, type) { + super(); + this._host = host; + this._type = type; + } + + render() { + if (!this._elements) { + this._elements = []; + const type = view.Documentation.format(this._type); + const element = this.createElement('div', 'sidebar-documentation'); + this._append(element, 'h1', type.name); + if (type.summary) { + this._append(element, 'p', type.summary); + } + if (type.description) { + this._append(element, 'p', type.description); + } + if (Array.isArray(type.attributes) && type.attributes.length > 0) { + this._append(element, 'h2', 'Attributes'); + const attributes = this._append(element, 'dl'); + for (const attribute of type.attributes) { + this._append(attributes, 'dt', attribute.name + (attribute.type ? `: ${attribute.type}` : '')); + this._append(attributes, 'dd', attribute.description); + } + element.appendChild(attributes); + } + if (Array.isArray(type.inputs) && type.inputs.length > 0) { + this._append(element, 'h2', `Inputs${type.inputs_range ? ` (${type.inputs_range})` : ''}`); + const inputs = this._append(element, 'dl'); + for (const input of type.inputs) { + this._append(inputs, 'dt', input.name + (input.type ? `: ${input.type}` : '') + (input.option ? ` (${input.option})` : '')); + this._append(inputs, 'dd', input.description); + } + } + if (Array.isArray(type.outputs) && type.outputs.length > 0) { + this._append(element, 'h2', `Outputs${type.outputs_range ? ` (${type.outputs_range})` : ''}`); + const outputs = this._append(element, 'dl'); + for (const output of type.outputs) { + this._append(outputs, 'dt', output.name + (output.type ? `: ${output.type}` : '') + (output.option ? ` (${output.option})` : '')); + this._append(outputs, 'dd', output.description); + } + } + if (Array.isArray(type.type_constraints) && type.type_constraints.length > 0) { + this._append(element, 'h2', 'Type Constraints'); + const type_constraints = this._append(element, 'dl'); + for (const type_constraint of type.type_constraints) { + this._append(type_constraints, 'dt', `${type_constraint.type_param_str}: ${type_constraint.allowed_type_strs.map((item) => `${item}`).join(', ')}`); + this._append(type_constraints, 'dd', type_constraint.description); + } + } + if (Array.isArray(type.examples) && type.examples.length > 0) { + this._append(element, 'h2', 'Examples'); + for (const example of type.examples) { + this._append(element, 'h3', example.summary); + this._append(element, 'pre', example.code); + } + } + if (Array.isArray(type.references) && type.references.length > 0) { + this._append(element, 'h2', 'References'); + const references = this._append(element, 'ul'); + for (const reference of type.references) { + this._append(references, 'li', reference.description); + } + } + if (this._host.type === 'Electron') { + element.addEventListener('click', (e) => { + if (e.target && e.target.href) { + const url = e.target.href; + if (url.startsWith('http://') || url.startsWith('https://')) { + e.preventDefault(); + this.emit('navigate', { link: url }); + } + } + }); + } + this._elements = [ element ]; + } + return this._elements; + } + + _append(parent, type, content) { + const element = this.createElement(type); + if (content) { + element.innerHTML = content; + } + parent.appendChild(element); + return element; + } +}; + +view.FindSidebar = class extends view.Control { + + constructor(host, graph) { + super(host); + this._graph = graph; + this._table = new Map(); + this._searchElement = this.createElement('input', 'sidebar-find-search'); + this._searchElement.setAttribute('id', 'search'); + this._searchElement.setAttribute('type', 'text'); + this._searchElement.setAttribute('spellcheck', 'false'); + this._searchElement.setAttribute('placeholder', 'Search'); + this._searchElement.addEventListener('input', (e) => { + this.update(e.target.value); + this.emit('search-text-changed', e.target.value); + }); + this._searchElement.addEventListener('keydown', (e) => { + if (e.keyCode === 0x08 && !e.altKey && !e.ctrlKey && !e.shiftKey && !e.metaKey) { + e.stopPropagation(); + } + }); + this._contentElement = this.createElement('ol', 'sidebar-find-content'); + this._contentElement.addEventListener('click', (e) => { + const identifier = e.target.getAttribute('data'); + if (this._table.has(identifier)) { + this.emit('select', this._table.get(identifier)); + } + }); + } + + on(event, callback) { + this._events = this._events || {}; + this._events[event] = this._events[event] || []; + this._events[event].push(callback); + } + + emit(event, data) { + if (this._events && this._events[event]) { + for (const callback of this._events[event]) { + callback(this, data); + } + } + } + + focus(searchText) { + this._searchElement.focus(); + this._searchElement.value = ''; + this._searchElement.value = searchText; + this.update(searchText); + } + + update(searchText) { + while (this._contentElement.lastChild) { + this._contentElement.removeChild(this._contentElement.lastChild); + } + this._table.clear(); + let index = 0; + const add = (value, content) => { + const key = index.toString(); + index++; + this._table.set(key, value); + const element = this.createElement('li'); + element.innerText = content; + element.setAttribute('data', key); + element.addEventListener('pointerover', (e) => { + const identifier = e.target.getAttribute('data'); + if (this._table.has(identifier)) { + this.emit('focus', this._table.get(identifier)); + } + }); + element.addEventListener('pointerleave', (e) => { + const identifier = e.target.getAttribute('data'); + if (this._table.has(identifier)) { + this.emit('blur', this._table.get(identifier)); + } + }); + this._contentElement.appendChild(element); + }; + let terms = null; + let match = null; + const unquote = searchText.match(new RegExp(/^'(.*)'|"(.*)"$/)); + if (unquote) { + const term = unquote[1] || unquote[2]; + terms = [ term ]; + match = (name) => { + return term === name; + }; + } else { + terms = searchText.trim().toLowerCase().split(' ').map((term) => term.trim()).filter((term) => term.length > 0); + match = (name) => { + return terms.every((term) => name && name.toLowerCase().indexOf(term) !== -1); + }; + } + const edges = new Set(); + const matchValue = (value) => { + if (terms.length === 0) { + return true; + } + if (value.name && match(value.name.split('\n').shift())) { + return true; + } + if (value.location && match(value.location)) { + return true; + } + if (value.type) { + for (const term of terms) { + if (value.type.dataType && term === value.type.dataType.toLowerCase()) { + return true; + } + if (value.type.shape) { + if (term === value.type.shape.toString().toLowerCase()) { + return true; + } + if (value.type.shape && Array.isArray(value.type.shape.dimensions)) { + const dimensions = value.type.shape.dimensions.map((dimension) => dimension ? dimension.toString().toLowerCase() : ''); + if (term === dimensions.join(',')) { + return true; + } + if (dimensions.some((dimension) => term === dimension)) { + return true; + } + } + } + } + } + return false; + }; + const edge = (value) => { + if (value.name && !edges.has(value.name) && matchValue(value)) { + add(value, `\u2192 ${value.name.split('\n').shift()}`); // split custom argument id + edges.add(value.name); + } + }; + for (const input of this._graph.inputs) { + for (const value of input.value) { + edge(value); + } + } + for (const node of this._graph.nodes) { + const initializers = []; + for (const input of node.inputs) { + for (const value of input.value) { + if (value.initializer) { + initializers.push(value); + } else { + edge(value); + } + } + } + const name = node.name; + const type = node.type.name; + const location = node.location; + if ((name && match(name)) || (type && match(type)) || (location && match(location))) { + add(node, `\u25A2 ${name || `[${type}]`}`); + } + for (const value of initializers) { + if (value.name && !edges.has(value.name) && matchValue(value)) { + add(node, `\u25A0 ${value.name.split('\n').shift()}`); // split custom argument id + } + } + } + for (const output of this._graph.outputs) { + for (const value of output.value) { + edge(value); + } + } + this._contentElement.style.display = this._contentElement.childNodes.length != 0 ? 'block' : 'none'; + } + + render() { + return [ this._searchElement, this._contentElement ]; + } +}; + +view.Tensor = class { + + constructor(tensor) { + this._tensor = tensor; + this._type = tensor.type; + this._encoding = tensor.encoding; + this._layout = tensor.type.layout; + this._stride = tensor.stride; + switch (this._encoding) { + case undefined: + case '': + case '<': { + this._data = this._tensor.values; + this._encoding = '<'; + this._littleEndian = true; + break; + } + case '>': { + this._data = this._tensor.values; + this._encoding = '>'; + this._littleEndian = false; + break; + } + case '|': { + this._values = this._tensor.values; + this._encoding = '|'; + break; + } + default: { + throw new view.Error(`Unsupported tensor encoding '${this._encoding}'.`); + } + } + switch (this._layout) { + case 'sparse': + case 'sparse.coo': { + this._indices = this._tensor.indices; + this._values = this._tensor.values; + break; + } + default: { + break; + } + } + view.Tensor.dataTypes = view.Tensor.dataTypeSizes || new Map([ + [ 'boolean', 1 ], + [ 'qint8', 1 ], [ 'qint16', 2 ], [ 'qint32', 4 ], + [ 'quint8', 1 ], [ 'quint16', 2 ], [ 'quint32', 4 ], + [ 'xint8', 1 ], + [ 'int8', 1 ], [ 'int16', 2 ], [ 'int32', 4 ], [ 'int64', 8 ], + [ 'uint8', 1 ], [ 'uint16', 2 ], [ 'uint32', 4, ], [ 'uint64', 8 ], + [ 'float16', 2 ], [ 'float32', 4 ], [ 'float64', 8 ], [ 'bfloat16', 2 ], + [ 'complex64', 8 ], [ 'complex128', 16 ], + [ 'float8e4m3fn', 1 ], [ 'float8e4m3fnuz', 1 ], [ 'float8e5m2', 1 ], [ 'float8e5m2fnuz', 1 ] + ]); + } + + get type() { + return this._type; + } + + get encoding() { + return this._encoding; + } + + get layout() { + return this._layout; + } + + get stride() { + return this._stride; + } + + get empty() { + switch (this._layout) { + case 'sparse': + case 'sparse.coo': { + return !this._values || this.indices || this._values.values === null || this._values.values.length === 0; + } + default: { + switch (this._encoding) { + case '<': + case '>': + return !(Array.isArray(this._data) || this._data instanceof Uint8Array || this._data instanceof Int8Array) || this._data.length === 0; + case '|': + return !(Array.isArray(this._values) || ArrayBuffer.isView(this._values)) || this._values.length === 0; + default: + throw new Error(`Unsupported tensor encoding '${this._encoding}'.`); + } + } + } + } + + get value() { + const context = this._context(); + context.limit = Number.MAX_SAFE_INTEGER; + switch (context.encoding) { + case '<': + case '>': { + return this._decodeData(context, 0, 0); + } + case '|': { + return this._decodeValues(context, 0, 0); + } + default: { + throw new Error(`Unsupported tensor encoding '${context.encoding}'.`); + } + } + } + + toString() { + const context = this._context(); + context.limit = 10000; + switch (context.encoding) { + case '<': + case '>': { + const value = this._decodeData(context, 0, 0); + return view.Tensor._stringify(value, '', ' '); + } + case '|': { + const value = this._decodeValues(context, 0, 0); + return view.Tensor._stringify(value, '', ' '); + } + default: { + throw new Error(`Unsupported tensor encoding '${context.encoding}'.`); + } + } + } + + _context() { + if (this._encoding !== '<' && this._encoding !== '>' && this._encoding !== '|') { + throw new Error(`Tensor encoding '${this._encoding}' is not supported.`); + } + if (this._layout && (this._layout !== 'sparse' && this._layout !== 'sparse.coo')) { + throw new Error(`Tensor layout '${this._layout}' is not supported.`); + } + const dataType = this._type.dataType; + const context = {}; + context.encoding = this._encoding; + context.dimensions = this._type.shape.dimensions.map((value) => !Number.isInteger(value) && value.toNumber ? value.toNumber() : value); + context.dataType = dataType; + const shape = context.dimensions; + context.stride = this._stride; + if (!Array.isArray(context.stride)) { + context.stride = new Array(shape.length); + let value = 1; + for (let i = shape.length - 1; i >= 0; i--) { + context.stride[i] = value; + value *= shape[i]; + } + } + switch (this._layout) { + case 'sparse': { + const indices = new view.Tensor(this._indices).value; + const values = new view.Tensor(this._values).value; + context.data = this._decodeSparse(dataType, context.dimensions, indices, values); + context.encoding = '|'; + break; + } + case 'sparse.coo': { + const values = new view.Tensor(this._values).value; + const data = new view.Tensor(this._indices).value; + const dimensions = context.dimensions.length; + let stride = 1; + const strides = context.dimensions.slice().reverse().map((dim) => { + const value = stride; + stride *= dim; + return value; + }).reverse(); + const indices = new Uint32Array(values.length); + for (let i = 0; i < dimensions; i++) { + const stride = strides[i]; + const dimension = data[i]; + for (let i = 0; i < indices.length; i++) { + indices[i] += dimension[i] * stride; + } + } + context.data = this._decodeSparse(dataType, context.dimensions, indices, values); + context.encoding = '|'; + break; + } + default: { + switch (this._encoding) { + case '<': + case '>': { + context.data = (this._data instanceof Uint8Array || this._data instanceof Int8Array) ? this._data : this._data.peek(); + context.view = new DataView(context.data.buffer, context.data.byteOffset, context.data.byteLength); + if (view.Tensor.dataTypes.has(dataType)) { + const itemsize = view.Tensor.dataTypes.get(dataType); + const length = context.data.length; + const stride = context.stride; + if (length < (itemsize * shape.reduce((a, v) => a * v, 1))) { + const max = stride.reduce((a, v, i) => v > stride[i] ? i : a, 0); + if (length !== (itemsize * stride[max] * shape[max])) { + throw new Error('Invalid tensor data size.'); + } + } + context.itemsize = itemsize; + context.stride = stride.map((v) => v * itemsize); + } else if (dataType.startsWith('uint') && !isNaN(parseInt(dataType.substring(4), 10))) { + context.dataType = 'uint'; + context.bits = parseInt(dataType.substring(4), 10); + context.itemsize = 1; + } else if (dataType.startsWith('int') && !isNaN(parseInt(dataType.substring(3), 10))) { + context.dataType = 'int'; + context.bits = parseInt(dataType.substring(3), 10); + context.itemsize = 1; + } else { + throw new Error(`Tensor data type '${dataType}' is not implemented.`); + } + break; + } + case '|': { + context.data = this._values; + if (!view.Tensor.dataTypes.has(dataType) && dataType !== 'string' && dataType !== 'object') { + throw new Error(`Tensor data type '${dataType}' is not implemented.`); + } + const size = context.dimensions.reduce((a, v) => a * v, 1); + if (size !== this._values.length) { + throw new Error('Invalid tensor data length.'); + } + break; + } + default: { + throw new view.Tensor(`Unsupported tensor encoding '${this._encoding}'.`); + } + } + } + } + context.index = 0; + context.count = 0; + return context; + } + + _decodeSparse(dataType, dimensions, indices, values) { + const size = dimensions.reduce((a, b) => a * b, 1); + const array = new Array(size); + switch (dataType) { + case 'boolean': + array.fill(false); + break; + default: + array.fill(0); + break; + } + if (indices.length > 0) { + if (Object.prototype.hasOwnProperty.call(indices[0], 'low')) { + for (let i = 0; i < indices.length; i++) { + const index = indices[i]; + array[index.high === 0 ? index.low : index.toNumber()] = values[i]; + } + } else { + for (let i = 0; i < indices.length; i++) { + array[indices[i]] = values[i]; + } + } + } + return array; + } + + _decodeData(context, dimension, offset) { + const results = []; + const shape = context.dimensions.length == 0 ? [ 1 ] : context.dimensions; + const size = shape[dimension]; + const dataType = context.dataType; + const view = context.view; + const stride = context.stride[dimension]; + if (dimension == shape.length - 1) { + const ellipsis = (context.count + size) > context.limit; + const length = ellipsis ? context.limit - context.count : size; + const max = offset + (length * context.itemsize); + switch (dataType) { + case 'boolean': + for (; offset < max; offset += stride) { + results.push(view.getUint8(offset) === 0 ? false : true); + } + break; + case 'qint8': + case 'xint8': + case 'int8': + for (; offset < max; offset += stride) { + results.push(view.getInt8(offset)); + } + break; + case 'qint16': + case 'int16': + for (; offset < max; offset += stride) { + results.push(view.getInt16(offset, this._littleEndian)); + } + break; + case 'qint32': + case 'int32': + for (; offset < max; offset += stride) { + results.push(view.getInt32(offset, this._littleEndian)); + } + break; + case 'int64': + for (; offset < max; offset += stride) { + results.push(view.getInt64(offset, this._littleEndian)); + } + break; + case 'int': + for (; offset < size; offset += stride) { + results.push(view.getIntBits(offset, context.bits, this._littleEndian)); + } + break; + case 'quint8': + case 'uint8': + for (; offset < max; offset += stride) { + results.push(view.getUint8(offset)); + } + break; + case 'quint16': + case 'uint16': + for (; offset < max; offset += stride) { + results.push(view.getUint16(offset, true)); + } + break; + case 'quint32': + case 'uint32': + for (; offset < max; offset += stride) { + results.push(view.getUint32(offset, true)); + } + break; + case 'uint64': + for (; offset < max; offset += stride) { + results.push(view.getUint64(offset, true)); + } + break; + case 'uint': + for (; offset < max; offset += stride) { + results.push(view.getUintBits(offset, context.bits, this._littleEndian)); + } + break; + case 'float16': + for (; offset < max; offset += stride) { + results.push(view.getFloat16(offset, this._littleEndian)); + } + break; + case 'float32': + for (; offset < max; offset += stride) { + results.push(view.getFloat32(offset, this._littleEndian)); + } + break; + case 'float64': + for (; offset < max; offset += stride) { + results.push(view.getFloat64(offset, this._littleEndian)); + } + break; + case 'bfloat16': + for (; offset < max; offset += stride) { + results.push(view.getBfloat16(offset, this._littleEndian)); + } + break; + case 'complex64': + for (; offset < max; offset += stride) { + results.push(view.getComplex64(offset, this._littleEndian)); + } + break; + case 'complex128': + for (; offset < size; offset += stride) { + results.push(view.getComplex128(offset, this._littleEndian)); + } + break; + case 'float8e4m3fn': + for (; offset < size; offset += stride) { + results.push(view.getFloat8e4m3(offset, true, false)); + } + break; + case 'float8e4m3fnuz': + for (; offset < size; offset += stride) { + results.push(view.getFloat8e4m3(offset, true, true)); + } + break; + case 'float8e5m2': + for (; offset < size; offset += stride) { + results.push(view.getFloat8e5m2(offset, false, false)); + } + break; + case 'float8e5m2fnuz': + for (; offset < size; offset += stride) { + results.push(view.getFloat8e5m2(offset, true, true)); + } + break; + default: + throw new Error(`Unsupported tensor data type '${dataType}'.`); + } + context.count += length; + if (ellipsis) { + results.push('...'); + } + } else { + for (let j = 0; j < size; j++) { + if (context.count >= context.limit) { + results.push('...'); + return results; + } + const nextOffset = offset + (j * stride); + results.push(this._decodeData(context, dimension + 1, nextOffset)); + } + } + if (context.dimensions.length == 0) { + return results[0]; + } + return results; + } + + _decodeValues(context, dimension, position) { + const results = []; + const shape = (context.dimensions.length == 0) ? [ 1 ] : context.dimensions; + const size = shape[dimension]; + const dataType = context.dataType; + const stride = context.stride[dimension]; + if (dimension == shape.length - 1) { + const ellipsis = (context.count + size) > context.limit; + const length = ellipsis ? context.limit - context.count : size; + const data = context.data; + for (let i = 0; i < length; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (dataType) { + case 'boolean': + results.push(data[position] === 0 ? false : true); + break; + default: + results.push(data[position]); + break; + } + position += stride; + context.count++; + } + } else { + for (let i = 0; i < size; i++) { + if (context.count >= context.limit) { + results.push('...'); + return results; + } + const nextPosition = position + (i * stride); + results.push(this._decodeValues(context, dimension + 1, nextPosition)); + } + } + if (context.dimensions.length == 0) { + return results[0]; + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(`${indentation}[`); + const items = value.map((item) => view.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(`${indentation}]`); + return result.join('\n'); + } + if (value === null) { + return `${indentation}null`; + } + switch (typeof value) { + case 'boolean': + return indentation + value.toString(); + case 'string': + return `${indentation}"${value}"`; + case 'number': + if (value == Infinity) { + return `${indentation}Infinity`; + } + if (value == -Infinity) { + return `${indentation}-Infinity`; + } + if (isNaN(value)) { + return `${indentation}NaN`; + } + return indentation + value.toString(); + default: + if (value && value.toString) { + return indentation + value.toString(); + } + return `${indentation}(undefined)`; + } + } +}; + +view.Quantization = class { + + constructor(quantization) { + Object.assign(this, quantization); + } + + toString() { + if (this.type === 'linear' || /^quant\d\d?_.*$/.test(this.type)) { + const content = []; + const scale = this.scale || []; + const offset = this.offset || []; + const bias = this.bias || []; + const max = this.max || []; + const min = this.min || []; + const length = Math.max(scale.length, offset.length, bias.length, min.length, max.length); + const size = length.toString().length; + for (let i = 0; i < length; i++) { + let s = 'q'; + let bracket = false; + if (i < offset.length && offset[i] !== undefined && offset[i] !== 0) { + const value = offset[i]; + s = value > 0 ? `${s} - ${value}` : `${s} + ${-value}`; + bracket = true; + } + if (i < scale.length && scale[i] !== undefined && scale[i] !== 0) { + const value = scale[i]; + s = bracket ? `(${s})` : s; + s = `${value} * ${s}`; + bracket = true; + } + if (i < bias.length && bias[i] !== undefined && bias[i] !== 0) { + const value = bias[i]; + s = bracket ? `(${s})` : s; + s = value < 0 ? `${s} - ${-value}` : `${s} + ${value}`; + } + if (i < min.length && min[i] !== undefined && min[i] !== 0) { + s = `${min[i]} \u2264 ${s}`; + } + if (i < max.length && max[i] !== undefined && max[i] !== 0) { + s = `${s} \u2264 ${max[i]}`; + } + content.push(length > 1 ? `${i.toString().padStart(size, ' ')}: ${s}` : `${s}`); + } + return content.join('\n'); + } else if (this.type === 'lookup') { + const size = this.value.length.toString().length; + return this.value.map((value, index) => `${index.toString().padStart(size, ' ')}: ${value}`).join('\n'); + } else if (this.type === 'annotation') { + return Array.from(this.value).map(([name, value]) => `${name} = ${value}`).join('\n'); + } else if (/^q\d_[01k]$/.test(this.type)) { + return ''; + } + throw new view.Error(`Unknown quantization type '${this.type}'.`); + } +}; + +view.Documentation = class { + + static format(source) { + if (source) { + const generator = new markdown.Generator(); + const target = {}; + if (source.name !== undefined) { + target.name = source.name; + } + if (source.module !== undefined) { + target.module = source.module; + } + if (source.category !== undefined) { + target.category = source.category; + } + if (source.summary !== undefined) { + target.summary = generator.html(source.summary); + } + if (source.description !== undefined) { + target.description = generator.html(source.description); + } + if (Array.isArray(source.attributes)) { + target.attributes = source.attributes.map((source) => { + const target = {}; + target.name = source.name; + if (source.type !== undefined) { + target.type = source.type; + } + if (source.option !== undefined) { + target.option = source.option; + } + if (source.optional !== undefined) { + target.optional = source.optional; + } + if (source.required !== undefined) { + target.required = source.required; + } + if (source.minimum !== undefined) { + target.minimum = source.minimum; + } + if (source.src !== undefined) { + target.src = source.src; + } + if (source.src_type !== undefined) { + target.src_type = source.src_type; + } + if (source.description !== undefined) { + target.description = generator.html(source.description); + } + if (source.default !== undefined) { + target.default = source.default; + } + if (source.visible !== undefined) { + target.visible = source.visible; + } + return target; + }); + } + if (Array.isArray(source.inputs)) { + target.inputs = source.inputs.map((source) => { + const target = {}; + target.name = source.name; + if (source.type !== undefined) { + target.type = source.type; + } + if (source.description !== undefined) { + target.description = generator.html(source.description); + } + if (source.default !== undefined) { + target.default = source.default; + } + if (source.src !== undefined) { + target.src = source.src; + } + if (source.list !== undefined) { + target.list = source.list; + } + if (source.isRef !== undefined) { + target.isRef = source.isRef; + } + if (source.typeAttr !== undefined) { + target.typeAttr = source.typeAttr; + } + if (source.numberAttr !== undefined) { + target.numberAttr = source.numberAttr; + } + if (source.typeListAttr !== undefined) { + target.typeListAttr = source.typeListAttr; + } + if (source.option !== undefined) { + target.option = source.option; + } + if (source.optional !== undefined) { + target.optional = source.optional; + } + if (source.visible !== undefined) { + target.visible = source.visible; + } + return target; + }); + } + if (Array.isArray(source.outputs)) { + target.outputs = source.outputs.map((source) => { + const target = {}; + target.name = source.name; + if (source.type) { + target.type = source.type; + } + if (source.description !== undefined) { + target.description = generator.html(source.description); + } + if (source.list !== undefined) { + target.list = source.list; + } + if (source.typeAttr !== undefined) { + target.typeAttr = source.typeAttr; + } + if (source.typeListAttr !== undefined) { + target.typeListAttr = source.typeAttr; + } + if (source.numberAttr !== undefined) { + target.numberAttr = source.numberAttr; + } + if (source.isRef !== undefined) { + target.isRef = source.isRef; + } + if (source.option !== undefined) { + target.option = source.option; + } + return target; + }); + } + if (Array.isArray(source.references)) { + target.references = source.references.map((source) => { + if (source) { + target.description = generator.html(source.description); + } + return target; + }); + } + if (source.version !== undefined) { + target.version = source.version; + } + if (source.operator !== undefined) { + target.operator = source.operator; + } + if (source.identifier !== undefined) { + target.identifier = source.identifier; + } + if (source.package !== undefined) { + target.package = source.package; + } + if (source.support_level !== undefined) { + target.support_level = source.support_level; + } + if (source.min_input !== undefined) { + target.min_input = source.min_input; + } + if (source.max_input !== undefined) { + target.max_input = source.max_input; + } + if (source.min_output !== undefined) { + target.min_output = source.min_output; + } + if (source.max_input !== undefined) { + target.max_output = source.max_output; + } + if (source.inputs_range !== undefined) { + target.inputs_range = source.inputs_range; + } + if (source.outputs_range !== undefined) { + target.outputs_range = source.outputs_range; + } + if (source.examples !== undefined) { + target.examples = source.examples; + } + if (source.constants !== undefined) { + target.constants = source.constants; + } + if (source.type_constraints !== undefined) { + target.type_constraints = source.type_constraints; + } + return target; + } + return ''; + } +}; + +view.Formatter = class { + + constructor(value, type, quote) { + this._value = value; + this._type = type; + this._quote = quote; + this._values = new Set(); + } + + toString() { + return this._format(this._value, this._type, this._quote); + } + + _format(value, type, quote) { + + if (value && value.__class__ && value.__class__.__module__ === 'builtins' && value.__class__.__name__ === 'type') { + return `${value.__module__}.${value.__name__}`; + } + if (value && value.__class__ && value.__class__.__module__ === 'builtins' && value.__class__.__name__ === 'function') { + return `${value.__module__}.${value.__name__}`; + } + if (typeof value === 'function') { + return value(); + } + if (value && (value instanceof base.Int64 || value instanceof base.Uint64)) { + return value.toString(); + } + if (Number.isNaN(value)) { + return 'NaN'; + } + switch (type) { + case 'shape': + return value ? value.toString() : '(null)'; + case 'shape[]': + if (value && !Array.isArray(value)) { + throw new Error(`Invalid shape '${JSON.stringify(value)}'.`); + } + return value ? value.map((item) => item.toString()).join(', ') : '(null)'; + case 'graph': + return value ? value.name : '(null)'; + case 'graph[]': + return value ? value.map((graph) => graph.name).join(', ') : '(null)'; + case 'tensor': + if (value && value.type && value.type.shape && value.type.shape.dimensions && value.type.shape.dimensions.length == 0) { + return value.toString(); + } + return '[...]'; + case 'object': + case 'function': + return value.type.name; + case 'object[]': + case 'function[]': + return value ? value.map((item) => item.type.name).join(', ') : '(null)'; + case 'type': + return value ? value.toString() : '(null)'; + case 'type[]': + return value ? value.map((item) => item.toString()).join(', ') : '(null)'; + default: + break; + } + if (typeof value === 'string' && (!type || type != 'string')) { + return quote ? `"${value}"` : value; + } + if (Array.isArray(value)) { + if (value.length == 0) { + return quote ? '[]' : ''; + } + let ellipsis = false; + if (value.length > 1000) { + value = value.slice(0, 1000); + ellipsis = true; + } + const itemType = (type && type.endsWith('[]')) ? type.substring(0, type.length - 2) : null; + const array = value.map((item) => { + if (item && (item instanceof base.Int64 || item instanceof base.Uint64)) { + return item.toString(); + } + if (Number.isNaN(item)) { + return 'NaN'; + } + const quote = !itemType || itemType === 'string'; + return this._format(item, itemType, quote); + }); + if (ellipsis) { + array.push('\u2026'); + } + return quote ? [ '[', array.join(', '), ']' ].join(' ') : array.join(', '); + } + if (value === null) { + return quote ? 'null' : ''; + } + if (value === undefined) { + return 'undefined'; + } + if (value !== Object(value)) { + return value.toString(); + } + if (this._values.has(value)) { + return '\u2026'; + } + this._values.add(value); + let list = null; + const entries = Object.entries(value).filter(([name]) => !name.startsWith('__') && !name.endsWith('__')); + if (entries.length == 1) { + list = [ this._format(entries[0][1], null, true) ]; + } else { + list = entries.map(([name, value]) => `${name}: ${this._format(value, null, true)}`); + } + let objectType = value.__type__; + if (!objectType && value.constructor.name && value.constructor.name !== 'Object') { + objectType = value.constructor.name; + } + if (objectType) { + return objectType + (list.length == 0 ? '()' : [ '(', list.join(', '), ')' ].join('')); + } + switch (list.length) { + case 0: + return quote ? '()' : ''; + case 1: + return list[0]; + default: + return quote ? [ '(', list.join(', '), ')' ].join(' ') : list.join(', '); + } + } +}; + +markdown.Generator = class { + + constructor() { + this._newlineRegExp = /^\n+/; + this._codeRegExp = /^( {4}[^\n]+\n*)+/; + this._fencesRegExp = /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/; + this._hrRegExp = /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/; + this._headingRegExp = /^ {0,3}(#{1,6}) +([^\n]*?)(?: +#+)? *(?:\n+|$)/; + this._blockquoteRegExp = /^( {0,3}> ?(([^\n]+(?:\n(?! {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)| {0,3}#{1,6} | {0,3}>| {0,3}(?:`{3,}(?=[^`\n]*\n)|~{3,})[^\n]*\n| {0,3}(?:[*+-]|1[.)]) |<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?: +|\n|\/?>)|<(?:script|pre|style|!--))[^\n]+)*)|[^\n]*)(?:\n|$))+/; + this._listRegExp = /^( {0,3})((?:[*+-]|\d{1,9}[.)])) [\s\S]+?(?:\n+(?=\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$))|\n+(?= {0,3}\[((?!\s*\])(?:\\[[\]]|[^[\]])+)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)((?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))))? *(?:\n+|$))|\n{2,}(?! )(?!\1(?:[*+-]|\d{1,9}[.)]) )\n*|\s*$)/; + this._htmlRegExp = /^ {0,3}(?:<(script|pre|style)[\s>][\s\S]*?(?:<\/\1>[^\n]*\n+|$)||$)[^\n]*(\n+|$)|<\?[\s\S]*?(?:\?>\n*|$)|\n*|$)|\n*|$)|<\/?(address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?: +|\n|\/?>)[\s\S]*?(?:\n{2,}|$)|<(?!script|pre|style)([a-z][\w-]*)(?: +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?)*? *\/?>(?=[ \t]*(?:\n|$))[\s\S]*?(?:\n{2,}|$)|<\/(?!script|pre|style)[a-z][\w-]*\s*>(?=[ \t]*(?:\n|$))[\s\S]*?(?:\n{2,}|$))/i; + this._defRegExp = /^ {0,3}\[((?!\s*\])(?:\\[[\]]|[^[\]])+)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)((?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))))? *(?:\n+|$)/; + this._nptableRegExp = /^ *([^|\n ].*\|.*)\n {0,3}([-:]+ *\|[-| :]*)(?:\n((?:(?!\n| {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)| {0,3}#{1,6} | {0,3}>| {4}[^\n]| {0,3}(?:`{3,}(?=[^`\n]*\n)|~{3,})[^\n]*\n| {0,3}(?:[*+-]|1[.)]) |<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?: +|\n|\/?>)|<(?:script|pre|style|!--)).*(?:\n|$))*)\n*|$)/; + this._tableRegExp = /^ *\|(.+)\n {0,3}\|?( *[-:]+[-| :]*)(?:\n *((?:(?!\n| {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)| {0,3}#{1,6} | {0,3}>| {4}[^\n]| {0,3}(?:`{3,}(?=[^`\n]*\n)|~{3,})[^\n]*\n| {0,3}(?:[*+-]|1[.)]) |<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?: +|\n|\/?>)|<(?:script|pre|style|!--)).*(?:\n|$))*)\n*|$)/; + this._lheadingRegExp = /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/; + this._textRegExp = /^[^\n]+/; + this._bulletRegExp = /(?:[*+-]|\d{1,9}[.)])/; + this._itemRegExp = /^( *)((?:[*+-]|\d{1,9}[.)])) ?[^\n]*(?:\n(?!\1(?:[*+-]|\d{1,9}[.)]) ?)[^\n]*)*/gm; + this._paragraphRegExp = /^([^\n]+(?:\n(?! {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)| {0,3}#{1,6} | {0,3}>| {0,3}(?:`{3,}(?=[^`\n]*\n)|~{3,})[^\n]*\n| {0,3}(?:[*+-]|1[.)]) |<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?: +|\n|\/?>)|<(?:script|pre|style|!--))[^\n]+)*)/; + this._backpedalRegExp = /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/; + this._escapeRegExp = /^\\([!"#$%&'()*+,\-./:;<=>?@[\]\\^_`{|}~~|])/; + this._escapesRegExp = /\\([!"#$%&'()*+,\-./:;<=>?@[\]\\^_`{|}~])/g; + /* eslint-disable no-control-regex */ + this._autolinkRegExp = /^<([a-zA-Z][a-zA-Z0-9+.-]{1,31}:[^\s\x00-\x1f<>]*|[a-zA-Z0-9.!#$%&'*+/=?_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_]))>/; + this._linkRegExp = /^!?\[((?:\[(?:\\.|[^[\]\\])*\]|\\.|`[^`]*`|[^[\]\\`])*?)\]\(\s*(<(?:\\[<>]?|[^\s<>\\])*>|[^\s\x00-\x1f]*)(?:\s+("(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)))?\s*\)/; + /* eslint-enable no-control-regex */ + this._urlRegExp = /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9-]+\.?)+[^\s<]*|^[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/i; + this._tagRegExp = /^|^<\/[a-zA-Z][\w:-]*\s*>|^<[a-zA-Z][\w-]*(?:\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?)*?\s*\/?>|^<\?[\s\S]*?\?>|^|^/; + this._reflinkRegExp = /^!?\[((?:\[(?:\\.|[^[\]\\])*\]|\\.|`[^`]*`|[^[\]\\`])*?)\]\[(?!\s*\])((?:\\[[\]]?|[^[\]\\])+)\]/; + this._nolinkRegExp = /^!?\[(?!\s*\])((?:\[[^[\]]*\]|\\[[\]]|[^[\]])*)\](?:\[\])?/; + this._reflinkSearchRegExp = /!?\[((?:\[(?:\\.|[^[\]\\])*\]|\\.|`[^`]*`|[^[\]\\`])*?)\]\[(?!\s*\])((?:\\[[\]]?|[^[\]\\])+)\]|!?\[(?!\s*\])((?:\[[^[\]]*\]|\\[[\]]|[^[\]])*)\](?:\[\])?(?!\()/g; + this._strongStartRegExp = /^(?:(\*\*(?=[*!"#$%&'()+\-.,/:;<=>?@[\]`{|}~]))|\*\*)(?![\s])|__/; + this._strongMiddleRegExp = /^\*\*(?:(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^*]|\\\*)|__[^_]*?__|\*\*\[^\*\]*?\*\*)|\*(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^*]|\\\*)|__[^_]*?__|\*\*\[^\*\]*?\*\*)*?\*)+?\*\*$|^__(?![\s])((?:(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^_]|\\_)|__[^_]*?__|\*\*\[^\*\]*?\*\*)|_(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^_]|\\_)|__[^_]*?__|\*\*\[^\*\]*?\*\*)*?_)+?)__$/; + this._strongEndAstRegExp = /[^!"#$%&'()+\-.,/:;<=>?@[\]`{|}~\s]\*\*(?!\*)|[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~]\*\*(?!\*)(?:(?=[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~_\s]|$))/g; + this._strongEndUndRegExp = /[^\s]__(?!_)(?:(?=[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~*\s])|$)/g; + this._emStartRegExp = /^(?:(\*(?=[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~]))|\*)(?![*\s])|_/; + this._emMiddleRegExp = /^\*(?:(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^*]|\\\*)|__[^_]*?__|\*\*\[^\*\]*?\*\*)|\*(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^*]|\\\*)|__[^_]*?__|\*\*\[^\*\]*?\*\*)*?\*)+?\*$|^_(?![_\s])(?:(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^_]|\\_)|__[^_]*?__|\*\*\[^\*\]*?\*\*)|_(?:(?!__[^_]*?__|\*\*\[^\*\]*?\*\*)(?:[^_]|\\_)|__[^_]*?__|\*\*\[^\*\]*?\*\*)*?_)+?_$/; + this._emEndAstRegExp = /[^!"#$%&'()+\-.,/:;<=>?@[\]`{|}~\s]\*(?!\*)|[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~]\*(?!\*)(?:(?=[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~_\s]|$))/g; + this._emEndUndRegExp = /[^\s]_(?!_)(?:(?=[!"#$%&'()+\-.,/:;<=>?@[\]`{|}~*\s])|$)/g; + this._codespanRegExp = /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/; + this._brRegExp = /^( {2,}|\\)\n(?!\s*$)/; + this._delRegExp = /^~+(?=\S)([\s\S]*?\S)~+/; + this._textspanRegExp = /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\?@[\]`{|}~])/; + this._blockSkipRegExp = /\[[^\]]*?\]\([^)]*?\)|`[^`]*?`|<[^>]*?>/g; + this._escapeTestRegExp = /[&<>"']/; + this._escapeReplaceRegExp = /[&<>"']/g; + this._escapeTestNoEncodeRegExp = /[<>"']|&(?!#?\w+;)/; + this._escapeReplaceNoEncodeRegExp = /[<>"']|&(?!#?\w+;)/g; + this._escapeReplacementsMap = { '&': '&', '<': '<', '>': '>', '"': '"', "'": ''' }; + } + + html(source) { + const tokens = []; + const links = new Map(); + source = source.replace(/\r\n|\r/g, '\n').replace(/\t/g, ' '); + this._tokenize(source, tokens, links, true); + this._tokenizeBlock(tokens, links); + const result = this._render(tokens, true); + return result; + } + + _tokenize(source, tokens, links, top) { + source = source.replace(/^ +$/gm, ''); + while (source) { + let match = this._newlineRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + if (match[0].length > 1) { + tokens.push({ type: 'space' }); + } + continue; + } + match = this._codeRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + const lastToken = tokens[tokens.length - 1]; + if (lastToken && lastToken.type === 'paragraph') { + lastToken.text += `\n${match[0].trimRight()}`; + } else { + const text = match[0].replace(/^ {4}/gm, '').replace(/\n*$/, ''); + tokens.push({ type: 'code', text: text }); + } + continue; + } + match = this._fencesRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + const language = match[2] ? match[2].trim() : match[2]; + let content = match[3] || ''; + const matchIndent = match[0].match(/^(\s+)(?:```)/); + if (matchIndent !== null) { + const [, indent] = matchIndent; + content = content.split('\n').map((node) => { + const match = node.match(/^\s+/); + return (match !== null && match[0].length >= indent.length) ? node.slice(indent.length) : node; + }).join('\n'); + } + tokens.push({ type: 'code', language: language, text: content }); + continue; + } + match = this._headingRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'heading', depth: match[1].length, text: match[2] }); + continue; + } + match = this._nptableRegExp.exec(source); + if (match) { + const header = this._splitCells(match[1].replace(/^ *| *\| *$/g, '')); + const align = match[2].replace(/^ *|\| *$/g, '').split(/ *\| */); + if (header.length === align.length) { + const cells = match[3] ? match[3].replace(/\n$/, '').split('\n') : []; + const token = { type: 'table', header: header, align: align, cells: cells, raw: match[0] }; + for (let i = 0; i < token.align.length; i++) { + if (/^ *-+: *$/.test(token.align[i])) { + token.align[i] = 'right'; + } else if (/^ *:-+: *$/.test(token.align[i])) { + token.align[i] = 'center'; + } else if (/^ *:-+ *$/.test(token.align[i])) { + token.align[i] = 'left'; + } else { + token.align[i] = null; + } + } + token.cells = token.cells.map((cell) => this._splitCells(cell, token.header.length)); + source = source.substring(token.raw.length); + tokens.push(token); + continue; + } + } + match = this._hrRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'hr' }); + continue; + } + match = this._blockquoteRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + const text = match[0].replace(/^ *> ?/gm, ''); + tokens.push({ type: 'blockquote', text: text, tokens: this._tokenize(text, [], links, top) }); + continue; + } + match = this._listRegExp.exec(source); + if (match) { + const [value, , bull] = match; + const ordered = bull.length > 1; + const parent = bull[bull.length - 1] === ')'; + let raw = value; + const list = { type: 'list', raw: raw, ordered: ordered, start: ordered ? +bull.slice(0, -1) : '', loose: false, items: [] }; + const itemMatch = value.match(this._itemRegExp); + let next = false; + const length = itemMatch.length; + for (let i = 0; i < length; i++) { + let item = itemMatch[i]; + raw = item; + let space = item.length; + item = item.replace(/^ *([*+-]|\d+[.)]) ?/, ''); + if (~item.indexOf('\n ')) { + space -= item.length; + item = item.replace(new RegExp(`^ {1,${space}}`, 'gm'), ''); + } + if (i !== length - 1) { + const [bullet] = this._bulletRegExp.exec(itemMatch[i + 1]); + if (ordered ? bullet.length === 1 || (!parent && bullet[bullet.length - 1] === ')') : (bullet.length > 1)) { + const addBack = itemMatch.slice(i + 1).join('\n'); + list.raw = list.raw.substring(0, list.raw.length - addBack.length); + i = length - 1; + } + } + let loose = next || /\n\n(?!\s*$)/.test(item); + if (i !== length - 1) { + next = item.charAt(item.length - 1) === '\n'; + if (!loose) { + loose = next; + } + } + if (loose) { + list.loose = true; + } + const task = /^\[[ xX]\] /.test(item); + let checked = undefined; + if (task) { + checked = item[1] !== ' '; + item = item.replace(/^\[[ xX]\] +/, ''); + } + list.items.push({ type: 'list_item', raw, task: task, checked: checked, loose: loose, text: item }); + } + source = source.substring(list.raw.length); + for (const item of list.items) { + item.tokens = this._tokenize(item.text, [], links, false); + } + tokens.push(list); + continue; + } + match = this._htmlRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'html', pre: (match[1] === 'pre' || match[1] === 'script' || match[1] === 'style'), text: match[0] }); + continue; + } + if (top) { + match = this._defRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + match[3] = match[3] ? match[3].substring(1, match[3].length - 1) : match[3]; + const tag = match[1].toLowerCase().replace(/\s+/g, ' '); + if (!links.has(tag)) { + links.set(tag, { href: match[2], title: match[3] }); + } + continue; + } + } + match = this._tableRegExp.exec(source); + if (match) { + const header = this._splitCells(match[1].replace(/^ *| *\| *$/g, '')); + const align = match[2].replace(/^ *|\| *$/g, '').split(/ *\| */); + if (header.length === align.length) { + const cells = match[3] ? match[3].replace(/\n$/, '').split('\n') : []; + const token = { type: 'table', header: header, align: align, cells: cells, raw: match[0] }; + for (let i = 0; i < token.align.length; i++) { + if (/^ *-+: *$/.test(token.align[i])) { + token.align[i] = 'right'; + } else if (/^ *:-+: *$/.test(token.align[i])) { + token.align[i] = 'center'; + } else if (/^ *:-+ *$/.test(token.align[i])) { + token.align[i] = 'left'; + } else { + token.align[i] = null; + } + } + token.cells = token.cells.map((cell) => this._splitCells(cell.replace(/^ *\| *| *\| *$/g, ''), token.header.length)); + source = source.substring(token.raw.length); + tokens.push(token); + continue; + } + } + match = this._lheadingRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'heading', depth: match[2].charAt(0) === '=' ? 1 : 2, text: match[1] }); + continue; + } + if (top) { + match = this._paragraphRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'paragraph', text: match[1].charAt(match[1].length - 1) === '\n' ? match[1].slice(0, -1) : match[1] }); + continue; + } + } + match = this._textRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + const lastToken = tokens[tokens.length - 1]; + if (lastToken && lastToken.type === 'text') { + lastToken.text += `\n${match[0]}`; + } else { + tokens.push({ type: 'text', text: match[0] }); + } + continue; + } + throw new Error(`Unexpected '${source.charCodeAt(0)}'.`); + } + return tokens; + } + + _tokenizeInline(source, links, inLink, inRawBlock, prevChar) { + const tokens = []; + let maskedSource = source; + if (links.size > 0) { + while (maskedSource) { + const match = this._reflinkSearchRegExp.exec(maskedSource); + if (match) { + if (links.has(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) { + maskedSource = `${maskedSource.slice(0, match.index)}[${'a'.repeat(match[0].length - 2)}]${maskedSource.slice(this._reflinkSearchRegExp.lastIndex)}`; + } + continue; + } + break; + } + } + while (maskedSource) { + const match = this._blockSkipRegExp.exec(maskedSource); + if (match) { + maskedSource = `${maskedSource.slice(0, match.index)}[${'a'.repeat(match[0].length - 2)}]${maskedSource.slice(this._blockSkipRegExp.lastIndex)}`; + continue; + } + break; + } + while (source) { + let match = this._escapeRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'escape', text: this._escape(match[1]) }); + continue; + } + match = this._tagRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + if (!inLink && /^/i.test(match[0])) { + inLink = false; + } + if (!inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(match[0])) { + inRawBlock = true; + } else if (inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(match[0])) { + inRawBlock = false; + } + tokens.push({ type: 'html', raw: match[0], text: match[0] }); + continue; + } + match = this._linkRegExp.exec(source); + if (match) { + let index = -1; + const [, , ref] = match; + if (ref.indexOf(')') !== -1) { + let level = 0; + for (let i = 0; i < ref.length; i++) { + switch (ref[i]) { + case '\\': + i++; + break; + case '(': + level++; + break; + case ')': + level--; + if (level < 0) { + index = i; + i = ref.length; + } + break; + default: + break; + } + } + } + if (index > -1) { + const length = (match[0].indexOf('!') === 0 ? 5 : 4) + match[1].length + index; + match[2] = match[2].substring(0, index); + match[0] = match[0].substring(0, length).trim(); + match[3] = ''; + } + const title = (match[3] ? match[3].slice(1, -1) : '').replace(this._escapesRegExp, '$1'); + const href = match[2].trim().replace(/^<([\s\S]*)>$/, '$1').replace(this._escapesRegExp, '$1'); + const token = this._outputLink(match, href, title); + source = source.substring(match[0].length); + if (token.type === 'link') { + token.tokens = this._tokenizeInline(token.text, links, true, inRawBlock, ''); + } + tokens.push(token); + continue; + } + match = this._reflinkRegExp.exec(source) || this._nolinkRegExp.exec(source); + if (match) { + let link = (match[2] || match[1]).replace(/\s+/g, ' '); + link = links.get(link.toLowerCase()); + if (!link || !link.href) { + const text = match[0].charAt(0); + source = source.substring(text.length); + tokens.push({ type: 'text', text: text }); + } else { + source = source.substring(match[0].length); + const token = this._outputLink(match, link); + if (token.type === 'link') { + token.tokens = this._tokenizeInline(token.text, links, true, inRawBlock, ''); + } + tokens.push(token); + } + continue; + } + match = this._strongStartRegExp.exec(source); + if (match && (!match[1] || (match[1] && (prevChar === '' || this._punctuationRegExp.exec(prevChar))))) { + const masked = maskedSource.slice(-1 * source.length); + const endReg = match[0] === '**' ? this._strongEndAstRegExp : this._strongEndUndRegExp; + endReg.lastIndex = 0; + let cap; + while ((match = endReg.exec(masked)) != null) { + cap = this._strongMiddleRegExp.exec(masked.slice(0, match.index + 3)); + if (cap) { + break; + } + } + if (cap) { + const text = source.substring(2, cap[0].length - 2); + source = source.substring(cap[0].length); + tokens.push({ type: 'strong', text: text, tokens: this._tokenizeInline(text, links, inLink, inRawBlock, '') }); + continue; + } + } + match = this._emStartRegExp.exec(source); + if (match && (!match[1] || (match[1] && (prevChar === '' || this._punctuationRegExp.exec(prevChar))))) { + const masked = maskedSource.slice(-1 * source.length); + const endReg = match[0] === '*' ? this._emEndAstRegExp : this._emEndUndRegExp; + endReg.lastIndex = 0; + let cap; + while ((match = endReg.exec(masked)) != null) { + cap = this._emMiddleRegExp.exec(masked.slice(0, match.index + 2)); + if (cap) { + break; + } + } + if (cap) { + const text = source.slice(1, cap[0].length - 1); + source = source.substring(cap[0].length); + tokens.push({ type: 'em', text: text, tokens: this._tokenizeInline(text, links, inLink, inRawBlock, '') }); + continue; + } + } + match = this._codespanRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + let content = match[2].replace(/\n/g, ' '); + if (/[^ ]/.test(content) && content.startsWith(' ') && content.endsWith(' ')) { + content = content.substring(1, content.length - 1); + } + tokens.push({ type: 'codespan', text: this._encode(content) }); + continue; + } + match = this._brRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + tokens.push({ type: 'br' }); + continue; + } + match = this._delRegExp.exec(source); + if (match) { + const [value, text] = match; + source = source.substring(value.length); + tokens.push({ type: 'del', text: text, tokens: this._tokenizeInline(text, links, inLink, inRawBlock, '') }); + continue; + } + match = this._autolinkRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + const text = this._escape(match[1]); + const href = match[2] === '@' ? `mailto:${text}` : text; + tokens.push({ type: 'link', text: text, href: href, tokens: [ { type: 'text', raw: text, text } ] }); + continue; + } + if (!inLink) { + match = this._urlRegExp.exec(source); + if (match) { + const email = match[2] === '@'; + let [value] = match; + if (!email) { + let prevCapZero; + do { + prevCapZero = value; + [value] = this._backpedalRegExp.exec(value); + } while (prevCapZero !== value); + } + const text = this._escape(value); + const href = email ? (`mailto:${text}`) : (match[1] === 'www.' ? `http://${text}` : text); + source = source.substring(value.length); + tokens.push({ type: 'link', text: text, href: href, tokens: [ { type: 'text', text: text } ] }); + continue; + } + } + match = this._textspanRegExp.exec(source); + if (match) { + source = source.substring(match[0].length); + prevChar = match[0].slice(-1); + tokens.push({ type: 'text' , text: inRawBlock ? match[0] : this._escape(match[0]) }); + continue; + } + throw new Error(`Unexpected '${source.charCodeAt(0)}'.`); + } + return tokens; + } + + _tokenizeBlock(tokens, links) { + for (const token of tokens) { + switch (token.type) { + case 'paragraph': + case 'text': + case 'heading': { + token.tokens = this._tokenizeInline(token.text, links, false, false, ''); + break; + } + case 'table': { + token.tokens = {}; + token.tokens.header = token.header.map((header) => this._tokenizeInline(header, links, false, false, '')); + token.tokens.cells = token.cells.map((cell) => cell.map((row) => this._tokenizeInline(row, links, false, false, ''))); + break; + } + case 'blockquote': { + this._tokenizeBlock(token.tokens, links); + break; + } + case 'list': { + for (const item of token.items) { + this._tokenizeBlock(item.tokens, links); + } + break; + } + default: { + break; + } + } + } + } + + _render(tokens, top) { + let html = ''; + while (tokens.length > 0) { + const token = tokens.shift(); + switch (token.type) { + case 'space': { + continue; + } + case 'hr': { + html += '
\n'; + continue; + } + case 'heading': { + const level = token.depth; + html += `${this._renderInline(token.tokens)}\n`; + continue; + } + case 'code': { + const code = token.text; + const [language] = (token.language || '').match(/\S*/); + html += `
${token.escaped ? code : this._encode(code)}
\n`; + continue; + } + case 'table': { + let header = ''; + let cell = ''; + for (let j = 0; j < token.header.length; j++) { + const content = this._renderInline(token.tokens.header[j]); + const align = token.align[j]; + cell += `${content}\n`; + } + header += `\n${cell}\n`; + let body = ''; + for (let j = 0; j < token.cells.length; j++) { + const row = token.tokens.cells[j]; + cell = ''; + for (let k = 0; k < row.length; k++) { + const content = this._renderInline(row[k]); + const align = token.align[k]; + cell += `${content}\n`; + } + body += `\n${cell}\n`; + } + html += `\n\n${header}\n${body ? `${body}` : body}
\n`; + continue; + } + case 'blockquote': { + html += `
\n${this._render(token.tokens, true)}
\n`; + continue; + } + case 'list': { + const ordered = token.ordered; + const start = token.start; + const loose = token.loose; + let body = ''; + for (const item of token.items) { + let itemBody = ''; + if (item.task) { + const checkbox = ` `; + if (loose) { + if (item.tokens.length > 0 && item.tokens[0].type === 'text') { + item.tokens[0].text = `${checkbox} ${item.tokens[0].text}`; + if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') { + item.tokens[0].tokens[0].text = `${checkbox} ${item.tokens[0].tokens[0].text}`; + } + } else { + item.tokens.unshift({ type: 'text', text: checkbox }); + } + } else { + itemBody += checkbox; + } + } + itemBody += this._render(item.tokens, loose); + body += `
  • ${itemBody}
  • \n`; + } + const type = (ordered ? 'ol' : 'ul'); + html += `<${type}${ordered && start !== 1 ? (` start="${start}"`) : ''}>\n${body}\n`; + continue; + } + case 'html': { + html += token.text; + continue; + } + case 'paragraph': { + html += `

    ${this._renderInline(token.tokens)}

    \n`; + continue; + } + case 'text': { + html += top ? '

    ' : ''; + html += token.tokens ? this._renderInline(token.tokens) : token.text; + while (tokens.length > 0 && tokens[0].type === 'text') { + const token = tokens.shift(); + html += `\n${token.tokens ? this._renderInline(token.tokens) : token.text}`; + } + html += top ? '

    \n' : ''; + continue; + } + default: { + throw new Error(`Unexpected token type '${token.type}'.`); + } + } + } + return html; + } + + _renderInline(tokens) { + let html = ''; + for (const token of tokens) { + switch (token.type) { + case 'escape': + case 'html': + case 'text': { + html += token.text; + break; + } + case 'link': { + const text = this._renderInline(token.tokens); + html += `
    ${text}`; + break; + } + case 'image': { + html += `${token.text}`; + break; + } + case 'strong': { + const text = this._renderInline(token.tokens); + html += `${text}`; + break; + } + case 'em': { + const text = this._renderInline(token.tokens); + html += `${text}`; + break; + } + case 'codespan': { + html += `${token.text}`; + break; + } + case 'br': { + html += '
    '; + break; + } + case 'del': { + const text = this._renderInline(token.tokens); + html += `${text}`; + break; + } + default: { + throw new Error(`Unexpected token type '${token.type}'.`); + } + } + } + return html; + } + + _outputLink(match, href, title) { + title = title ? this._escape(title) : null; + const text = match[1].replace(/\\([[\]])/g, '$1'); + return match[0].charAt(0) !== '!' ? + { type: 'link', href: href, title: title, text: text } : + { type: 'image', href: href, title: title, text: this._escape(text) }; + } + + _splitCells(tableRow, count) { + const row = tableRow.replace(/\|/g, (match, offset, str) => { + let escaped = false; + let position = offset; + while (--position >= 0 && str[position] === '\\') { + escaped = !escaped; + } + return escaped ? '|' : ' |'; + }); + const cells = row.split(/ \|/); + if (cells.length > count) { + cells.splice(count); + } else { + while (cells.length < count) { + cells.push(''); + } + } + return cells.map((cell) => cell.trim().replace(/\\\|/g, '|')); + } + + _encode(content) { + if (this._escapeTestRegExp.test(content)) { + return content.replace(this._escapeReplaceRegExp, (ch) => this._escapeReplacementsMap[ch]); + } + return content; + } + + _escape(content) { + if (this._escapeTestNoEncodeRegExp.test(content)) { + return content.replace(this._escapeReplaceNoEncodeRegExp, (ch) => this._escapeReplacementsMap[ch]); + } + return content; + } +}; + +view.Context = class { + + constructor(context, identifier, stream) { + this._context = context; + this._tags = new Map(); + this._content = new Map(); + this._identifier = typeof identifier === 'string' ? identifier : context.identifier; + this._stream = stream || context.stream; + } + + get identifier() { + return this._identifier; + } + + get stream() { + return this._stream; + } + + get reader() { + return new base.StreamReader(this._stream); + } + + async request(file) { + return this._context.request(file, 'utf-8', null); + } + + async fetch(file) { + const stream = await this._context.request(file, null); + return new view.Context(this, file, stream, new Map()); + } + + async require(id) { + return this._context.require(id); + } + + exception(error, fatal) { + if (error && this.identifier) { + error.context = this.identifier; + } + this._context.exception(error, fatal); + } + + peek(type) { + if (!this._content.has(type)) { + this._content.set(type, undefined); + const stream = this.stream; + if (stream) { + const position = stream.position; + const match = (buffer, signature) => { + return signature.length <= buffer.length && buffer.every((value, index) => signature[index] === undefined || signature[index] === value); + }; + const buffer = stream.peek(Math.min(stream.length, 16)); + const skip = + match(buffer, [ 0x80, undefined, 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]) || // PyTorch + (type !== 'npz' && type !== 'zip' && match(buffer, [ 0x50, 0x4B, 0x03, 0x04 ])) || // Zip + (type !== 'hdf5' && match(buffer, [ 0x89, 0x48, 0x44, 0x46, 0x0D, 0x0A, 0x1A, 0x0A ])) || // \x89HDF\r\n\x1A\n + Array.from(this._tags).some(([key, value]) => key !== 'flatbuffers' && value.size > 0) || + Array.from(this._content.values()).some((obj) => obj !== undefined); + if (!skip) { + switch (type) { + case 'json': { + try { + const buffer = stream.peek(Math.min(this.stream.length, 0x1000)); + if (stream.length < 0x7ffff000 && + (buffer.length < 8 || String.fromCharCode.apply(null, buffer.slice(0, 8)) !== '\x89HDF\r\n\x1A\n') && + (buffer.some((v) => v === 0x22 || v === 0x5b || v === 0x5d || v === 0x7b || v === 0x7d))) { + const reader = json.TextReader.open(stream); + if (reader) { + const obj = reader.read(); + this._content.set(type, obj); + } + } + } catch (error) { + // continue regardless of error + } + break; + } + case 'json.gz': { + try { + const entries = this.peek('gzip'); + if (entries && entries.size === 1) { + const stream = entries.values().next().value; + const reader = json.TextReader.open(stream); + if (reader) { + const obj = reader.read(); + this._content.set(type, obj); + } + } + } catch (error) { + // continue regardless of error + } + break; + } + case 'pkl': { + let unpickler = null; + const types = new Set(); + try { + const archive = zip.Archive.open(stream, 'zlib'); + const data = archive ? archive.entries.get('') : stream; + let condition = false; + if (data.length > 2) { + const head = data.peek(2); + condition = head[0] === 0x80 && head[1] < 7; + if (!condition) { + data.seek(-1); + const tail = data.peek(1); + data.seek(0); + condition = tail[0] === 0x2e; + } + } + if (condition) { + const execution = new python.Execution(); + execution.on('resolve', (_, name) => types.add(name)); + const pickle = execution.__import__('pickle'); + unpickler = new pickle.Unpickler(data); + } + } catch (error) { + // continue regardless of error + } + if (unpickler) { + const storages = new Map(); + unpickler.persistent_load = (saved_id) => { + if (Array.isArray(saved_id) && saved_id.length > 3) { + switch (saved_id[0]) { + case 'storage': { + const [, storage_type, key, , size] = saved_id; + if (!storages.has(key)) { + const storage = new storage_type(size); + storages.set(key, storage); + } + return storages.get(key); + } + default: { + throw new python.Error(`Unsupported persistent load type '${saved_id[0]}'.`); + } + } + } + throw new view.Error("Unsupported 'persistent_load'."); + }; + try { + const obj = unpickler.load(); + this._content.set(type, obj); + } catch (error) { + this._content.set(type, error); + } + if (Array.from(types).every((name) => !name.startsWith('__torch__.'))) { + for (const name of types) { + this.exception(new view.Error(`Unknown type name '${name}'.`)); + } + } else { + this._content.set(type, new view.Error("PyTorch standalone 'data.pkl' format not supported.")); + } + } + break; + } + case 'hdf5': { + const file = hdf5.File.open(stream); + if (file) { + try { + this._content.set(type, file.read()); + } catch (error) { + this._content.set(type, error); + } + } + break; + } + case 'zip': + case 'tar': + case 'gzip': { + this._content.set('zip', undefined); + this._content.set('tar', undefined); + this._content.set('gzip', undefined); + let stream = this._stream; + try { + const archive = zip.Archive.open(this._stream, 'gzip'); + if (archive) { + let entries = archive.entries; + if (entries.size === 1) { + const key = entries.keys().next().value; + stream = entries.values().next().value; + const name = key === '' ? this.identifier.replace(/\.gz$/, '') : key; + entries = new Map([[ name, stream ]]); + } + this._content.set('gzip', entries); + } + } catch (error) { + this._content.set('gzip', error); + } + let skipTar = false; + try { + const archive = zip.Archive.open(stream, 'zip'); + if (archive) { + this._content.set('zip', archive.entries); + skipTar = true; + } + } catch (error) { + this._content.set('zip', error); + } + if (!skipTar) { + try { + const archive = tar.Archive.open(stream); + if (archive) { + this._content.set('tar', archive.entries); + } + } catch (error) { + this._content.set('tar', error); + } + } + break; + } + case 'npz': { + try { + const content = new Map(); + const entries = this.peek('zip'); + if (entries instanceof Map && entries.size > 0 && + Array.from(entries.keys()).every((name) => name.endsWith('.npy'))) { + const execution = new python.Execution(); + for (const [name, stream] of entries) { + const buffer = stream.peek(); + const bytes = execution.invoke('io.BytesIO', [ buffer ]); + const array = execution.invoke('numpy.load', [ bytes ]); + content.set(name, array); + } + this._content.set(type, content); + } + } catch (error) { + // continue regardless of error + } + break; + } + default: { + throw new view.Error(`Unsupported open format type '${type}'.`); + } + } + } + if (stream.position !== position) { + stream.seek(0); + } + } + } + return this._content.get(type); + } + + read(type) { + if (!this._content.has(type)) { + switch (type) { + case 'json': { + const reader = json.TextReader.open(this._stream); + if (reader) { + const obj = reader.read(); + this._content.set('json', obj); + return obj; + } + throw new view.Error('Invalid JSON content.'); + } + default: { + break; + } + } + } + return this.peek(type); + } + + tags(type) { + if (!this._tags.has(type)) { + let tags = new Map(); + const stream = this.stream; + if (stream) { + const position = stream.position; + const signatures = [ + [ 0x89, 0x48, 0x44, 0x46, 0x0D, 0x0A, 0x1A, 0x0A ], // HDF5 + [ 0x80, undefined, 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ], // PyTorch + [ 0x50, 0x4b ], // Zip + [ 0x1f, 0x8b ] // Gzip + ]; + const skip = + signatures.some((signature) => signature.length <= stream.length && stream.peek(signature.length).every((value, index) => signature[index] === undefined || signature[index] === value)) || + (Array.from(this._tags).some(([key, value]) => key !== 'flatbuffers' && value.size > 0) && type !== 'pb+') || + Array.from(this._content.values()).some((obj) => obj !== undefined) || + (stream.length < 0x7ffff000 && json.TextReader.open(stream)); + if (!skip && stream.length < 0x7ffff000) { + try { + switch (type) { + case 'pbtxt': { + const reader = protobuf.TextReader.open(stream); + tags = reader ? reader.signature() : tags; + break; + } + case 'pb': { + const reader = protobuf.BinaryReader.open(stream); + tags = reader.signature(); + break; + } + case 'pb+': { + const reader = protobuf.BinaryReader.open(stream); + tags = reader.decode(); + break; + } + case 'flatbuffers': { + if (stream.length >= 8) { + const buffer = stream.peek(Math.min(32, stream.length)); + const reader = flatbuffers.BinaryReader.open(buffer); + const identifier = reader.identifier; + if (identifier.length > 0) { + tags.set('file_identifier', identifier); + } + } + break; + } + case 'xml': { + const reader = xml.TextReader.open(stream); + if (reader) { + const document = reader.peek(); + const element = document.documentElement; + const namespaceURI = element.namespaceURI; + const localName = element.localName; + const name = namespaceURI ? `${namespaceURI}:${localName}` : localName; + tags.set(name, element); + } + break; + } + default: { + throw new view.Error(`Unsupported tags format type '${type}'.`); + } + } + } catch (error) { + tags.clear(); + } + } + if (stream.position !== position) { + stream.seek(position); + } + } + this._tags.set(type, tags); + } + return this._tags.get(type); + } + + metadata(name) { + return view.Metadata.open(this, name); + } +}; + +view.EntryContext = class { + + constructor(host, entries) { + this._host = host; + this._entries = entries; + } + + async request(file, encoding, base) { + if (base === undefined) { + const stream = this._entries.get(file); + if (!stream) { + throw new view.Error('File not found.'); + } + if (encoding) { + const decoder = new TextDecoder(encoding); + const buffer = stream.peek(); + const value = decoder.decode(buffer); + return value; + } + return stream; + } + return this._host.request(file, encoding, base); + } + + async require(id) { + return this._host.require(id); + } + + exception(error, fatal) { + this._host.exception(error, fatal); + } +}; + +view.ArchiveError = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading archive.'; + } +}; + +view.ModelFactoryService = class { + + constructor(host) { + this._host = host; + this._patterns = new Set([ '.zip', '.tar', '.tar.gz', '.tgz', '.gz' ]); + this._factories = []; + this.register('./server', [ '.netron']); + this.register('./pytorch', [ '.pt', '.pth', '.ptl', '.pt1', '.pyt', '.pyth', '.pkl', '.pickle', '.h5', '.t7', '.model', '.dms', '.tar', '.ckpt', '.chkpt', '.tckpt', '.bin', '.pb', '.zip', '.nn', '.torchmodel', '.torchscript', '.pytorch', '.ot', '.params', '.trt', '.ff', '.ptmf', '.jit', '.pte', '.bin.index.json', 'serialized_exported_program.json' ], [ '.model', '.pt2' ]); + this.register('./onnx', [ '.onnx', '.onn', '.pb', '.onnxtxt', '.pbtxt', '.prototxt', '.txt', '.model', '.pt', '.pth', '.pkl', '.ort', '.ort.onnx', 'onnxmodel', 'ngf', 'json' ]); + this.register('./mxnet', [ '.json', '.params' ], [ '.mar']); + this.register('./coreml', [ '.mlmodel', '.bin', 'manifest.json', 'metadata.json', 'featuredescriptions.json', '.pb', '.pbtxt' ], [ '.mlpackage' ]); + this.register('./caffe', [ '.caffemodel', '.pbtxt', '.prototxt', '.pt', '.txt' ]); + this.register('./caffe2', [ '.pb', '.pbtxt', '.prototxt' ]); + this.register('./torch', [ '.t7', '.net' ]); + this.register('./tflite', [ '.tflite', '.lite', '.tfl', '.bin', '.pb', '.tmfile', '.h5', '.model', '.json', '.txt', '.dat', '.nb', '.ckpt' ]); + this.register('./tf', [ '.pb', '.meta', '.pbtxt', '.prototxt', '.txt', '.pt', '.json', '.index', '.ckpt', '.graphdef', '.pbmm', /.data-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]$/, /^events.out.tfevents./ ], [ '.zip' ]); + this.register('./tensorrt', [ '.trt', '.trtmodel', '.engine', '.model', '.txt', '.uff', '.pb', '.tmfile', '.onnx', '.pth', '.dnn', '.plan', '.pt', '.dat' ]); + this.register('./keras', [ '.h5', '.hd5', '.hdf5', '.keras', '.json', '.cfg', '.model', '.pb', '.pth', '.weights', '.pkl', '.lite', '.tflite', '.ckpt', '.pb', 'model.weights.npz' ], [ '.zip' ]); + this.register('./numpy', [ '.npz', '.npy', '.pkl', '.pickle', '.model', '.model2', '.mge', '.joblib' ]); + this.register('./lasagne', [ '.pkl', '.pickle', '.joblib', '.model', '.pkl.z', '.joblib.z' ]); + this.register('./lightgbm', [ '.txt', '.pkl', '.model' ]); + this.register('./sklearn', [ '.pkl', '.pickle', '.joblib', '.model', '.meta', '.pb', '.pt', '.h5', '.pkl.z', '.joblib.z', '.pickle.dat' ]); + this.register('./megengine', [ '.tm', '.mge' ]); + this.register('./pickle', [ '.pkl', '.pickle', '.joblib', '.model', '.meta', '.pb', '.pt', '.h5', '.pkl.z', '.joblib.z', '.pdstates', '.mge' ]); + this.register('./cntk', [ '.model', '.cntk', '.cmf', '.dnn' ]); + this.register('./uff', [ '.uff', '.pb', '.pbtxt', '.uff.txt', '.trt', '.engine' ]); + this.register('./paddle', [ '.pdmodel', '.pdiparams', '.pdparams', '.pdopt', '.paddle', '__model__', '.__model__', '.pbtxt', '.txt', '.tar', '.tar.gz', '.nb' ]); + this.register('./bigdl', [ '.model', '.bigdl' ]); + this.register('./darknet', [ '.cfg', '.model', '.txt', '.weights' ]); + this.register('./mediapipe', [ '.pbtxt' ]); + this.register('./rknn', [ '.rknn', '.nb', '.onnx', '.json' ]); + this.register('./dlc', [ '.dlc', 'model', '.params' ]); + this.register('./armnn', [ '.armnn', '.json' ]); + this.register('./mnn', ['.mnn']); + this.register('./ncnn', [ '.param', '.bin', '.cfg.ncnn', '.weights.ncnn', '.ncnnmodel' ]); + this.register('./tnn', [ '.tnnproto', '.tnnmodel' ]); + this.register('./tengine', ['.tmfile']); + this.register('./mslite', [ '.ms']); + this.register('./barracuda', [ '.nn' ]); + this.register('./circle', [ '.circle' ]); + this.register('./dnn', [ '.dnn' ]); + this.register('./xmodel', [ '.xmodel' ]); + this.register('./kmodel', [ '.kmodel' ]); + this.register('./flux', [ '.bson' ]); + this.register('./dl4j', [ '.json', '.bin' ]); + this.register('./openvino', [ '.xml', '.bin' ]); + this.register('./mlnet', [ '.zip', '.mlnet' ]); + this.register('./acuity', [ '.json' ]); + this.register('./imgdnn', [ '.dnn', 'params', '.json' ]); + this.register('./flax', [ '.msgpack' ]); + this.register('./om', [ '.om', '.onnx', '.pb', '.engine' ]); + this.register('./gguf', [ '.gguf', /^[^.]+$/ ]); + this.register('./nnabla', [ '.nntxt' ], [ '.nnp' ]); + this.register('./hickle', [ '.h5', '.hkl' ]); + this.register('./nnef', [ '.nnef', '.dat' ]); + this.register('./onednn', [ '.json']); + this.register('./mlir', [ '.mlir']); + this.register('./sentencepiece', [ '.model' ]); + this.register('./hailo', [ '.hn', '.har', '.metadata.json' ]); + this.register('./nnc', [ '.nnc' ]); + this.register('./safetensors', [ '.safetensors', '.safetensors.index.json' ]); + this.register('./modular', [ '.maxviz' ]); + this.register('./cambricon', [ '.cambricon' ]); + this.register('./weka', [ '.model' ]); + } + + register(module, factories, containers) { + for (const pattern of factories) { + this._factories.push({ pattern: pattern, module: module }); + this._patterns.add(pattern); + } + for (const pattern of containers || []) { + this._patterns.add(pattern); + } + } + + async open(context) { + try { + await this._openSignature(context); + const content = new view.Context(context); + const model = await this._openContext(content); + if (!model) { + const check = (obj) => { + if (obj instanceof Error) { + throw obj; + } + return obj instanceof Map && obj.size > 0; + }; + let entries = context.entries; + if (!check(entries)) { + entries = content.peek('zip'); + if (!check(entries)) { + entries = content.peek('tar'); + if (!check(entries)) { + entries = content.peek('gzip'); + } + } + } + if (!check(entries)) { + this._unsupported(content); + } + const entryContext = await this._openEntries(entries); + if (!entryContext) { + this._unsupported(content); + } + return this._openContext(entryContext); + } + return model; + } catch (error) { + if (error && context.identifier) { + error.context = context.identifier; + } + throw error; + } + } + + _unsupported(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const stream = context.stream; + const callbacks = [ + (stream) => zip.Archive.open(stream, 'zip'), + (stream) => tar.Archive.open(stream), + (stream) => zip.Archive.open(stream, 'gzip') + ]; + for (const callback of callbacks) { + let archive = null; + try { + /* eslint-disable no-await-in-loop */ + archive = callback(stream); + /* eslint-enable no-await-in-loop */ + } catch (error) { + // continue regardless of error + } + if (archive) { + throw new view.Error("Archive contains no model files."); + } + } + const json = () => { + const obj = context.peek('json'); + if (obj) { + const formats = [ + { name: 'Netron metadata', tags: [ '[].name', '[].schema' ] }, + { name: 'Netron metadata', tags: [ '[].name', '[].attributes' ] }, + { name: 'Netron metadata', tags: [ '[].name', '[].category' ] }, + { name: 'Netron test data', tags: [ '[].type', '[].target', '[].source', '[].format', '[].link' ] }, + { name: 'Darkflow metadata', tags: [ 'net', 'type', 'model' ] }, + { name: 'keras-yolo2 configuration', tags: [ 'model', 'train', 'valid' ] }, + { name: 'Vulkan SwiftShader ICD manifest', tags: [ 'file_format_version', 'ICD' ] }, + { name: 'DeepLearningExamples configuration', tags: [ 'attention_probs_dropout_prob', 'hidden_act', 'hidden_dropout_prob', 'hidden_size', ] }, + { name: 'GitHub page data', tags: [ 'payload', 'title' ] }, + { name: 'NuGet assets', tags: [ 'version', 'targets', 'packageFolders' ] }, + { name: 'NuGet data', tags: [ 'format', 'restore', 'projects' ] }, + { name: 'NPM package', tags: [ 'name', 'version', 'dependencies' ] }, + { name: 'NetworkX adjacency_data', tags: [ 'directed', 'graph', 'nodes' ] }, + { name: 'Waifu2x data', tags: [ 'name', 'arch_name', 'channels' ] }, + { name: 'Waifu2x data', tags: [ '[].nInputPlane', '[].nOutputPlane', '[].weight', '[].bias' ] }, + { name: 'Brain.js data', tags: [ 'type', 'sizes', 'layers' ] }, + { name: 'Custom Vision metadata', tags: [ 'CustomVision.Metadata.Version' ] }, + { name: 'W&B metadata', tags: [ 'program', 'host', 'executable' ] } + ]; + const match = (obj, tag) => { + if (tag.startsWith('[].')) { + tag = tag.substring(3); + return (Array.isArray(obj) && obj.some((item) => Object.prototype.hasOwnProperty.call(item, tag))); + } + return Object.prototype.hasOwnProperty.call(obj, tag); + }; + for (const format of formats) { + if (format.tags.every((tag) => match(obj, tag))) { + throw new view.Error(`Invalid file content. File contains ${format.name}.`); + } + } + const content = `${JSON.stringify(obj).substring(0, 100).replace(/\s/, '').substring(0, 48)}...`; + throw new view.Error(`Unsupported JSON content '${content.length > 64 ? `${content.substring(0, 100)}...` : content}' for extension '.${extension}'.`); + } + }; + const pbtxt = () => { + const formats = [ + { name: 'ImageNet LabelMap data', tags: [ 'entry', 'entry.target_class' ] }, + { name: 'StringIntLabelMapProto data', tags: [ 'item', 'item.id', 'item.name' ] }, + { name: 'caffe.LabelMap data', tags: [ 'item', 'item.name', 'item.label' ] }, + { name: 'Triton Inference Server configuration', tags: [ 'name', 'platform', 'input', 'output' ] }, + { name: 'TensorFlow OpList data', tags: [ 'op', 'op.name', 'op.input_arg' ] }, + { name: 'vitis.ai.proto.DpuModelParamList data', tags: [ 'model', 'model.name', 'model.kernel' ] }, + { name: 'object_detection.protos.DetectionModel data', tags: [ 'model', 'model.ssd' ] }, + { name: 'object_detection.protos.DetectionModel data', tags: [ 'model', 'model.faster_rcnn' ] }, + { name: 'tensorflow.CheckpointState data', tags: [ 'model_checkpoint_path', 'all_model_checkpoint_paths' ] }, + { name: 'apollo.perception.camera.traffic_light.detection.DetectionParam data', tags: [ 'min_crop_size', 'crop_method' ] }, + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'caffe_ssd' ] }, // https://github.com/TexasInstruments/edgeai-mmdetection/blob/master/mmdet/utils/proto/mmdet_meta_arch.proto + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'tf_od_api_ssd' ] }, + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'tidl_ssd' ] }, + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'tidl_faster_rcnn' ] }, + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'tidl_yolo' ] }, + { name: 'tidl_meta_arch.TIDLMetaArch data', tags: [ 'tidl_retinanet' ] }, + { name: 'domi.InsertNewOps data', tags: [ 'aipp_op' ] } // https://github.com/Ascend/parser/blob/development/parser/proto/insert_op.proto + ]; + const tags = context.tags('pbtxt'); + if (tags.size > 0) { + for (const format of formats) { + if (format.tags.every((tag) => tags.has(tag))) { + const error = new view.Error(`Invalid file content. File contains ${format.name}.`); + error.context = context.identifier; + throw error; + } + } + const entries = []; + entries.push(...Array.from(tags).filter(([key]) => key.toString().indexOf('.') === -1)); + entries.push(...Array.from(tags).filter(([key]) => key.toString().indexOf('.') !== -1)); + const content = entries.map(([key, value]) => value === true ? key : `${key}:${JSON.stringify(value)}`).join(','); + throw new view.Error(`Unsupported Protocol Buffers text content '${content.length > 64 ? `${content.substring(0, 100)}...` : content}' for extension '.${extension}'.`); + } + }; + const pb = () => { + const tags = context.tags('pb+'); + if (Object.keys(tags).length > 0) { + const formats = [ + { name: 'sentencepiece.ModelProto data', tags: [[1,[[1,2],[2,5],[3,0]]],[2,[[1,2],[2,2],[3,0],[4,0],[5,2],[6,0],[7,2],[10,5],[16,0],[40,0],[41,0],[42,0],[43,0]]],[3,[]],[4,[]],[5,[]]] }, + { name: 'mediapipe.BoxDetectorIndex data', tags: [[1,[[1,[[1,[[1,5],[2,5],[3,5],[4,5],[6,0],[7,5],[8,5],[10,5],[11,0],[12,0]]],[2,5],[3,[]]]],[2,false],[3,false],[4,false],[5,false]]],[2,false],[3,false]] }, + { name: 'third_party.tensorflow.python.keras.protobuf.SavedMetadata data', tags: [[1,[[1,[[1,0],[2,0]]],[2,0],[3,2],[4,2],[5,2]]]] }, + { name: 'pblczero.Net data', tags: [[1,5],[2,2],[3,[[1,0],[2,0],[3,0]],[10,[[1,[]],[2,[]],[3,[]],[4,[]],[5,[]],[6,[]]]],[11,[]]]] }, // https://github.com/LeelaChessZero/lczero-common/blob/master/proto/net.proto + { name: 'optimization_guide.proto.PageTopicsOverrideList', tags: [[1,[[1,2],[2,[]]]]] }, // https://github.com/chromium/chromium/blob/main/components/optimization_guide/proto/page_topics_override_list.proto + { name: 'optimization_guide.proto.ModelInfo', tags: [[1,0],[2,0],[4,0],[6,[]],[7,[]],[9,0]] } // https://github.com/chromium/chromium/blob/22b0d711657b451b61d50dd2e242b3c6e38e6ef5/components/optimization_guide/proto/models.proto#L80 + ]; + const match = (tags, schema) => { + for (const [key, inner] of schema) { + const value = tags[key]; + if (value === undefined) { + continue; + } + if (inner === false) { + return false; + } + if (Array.isArray(inner)) { + if (typeof value !== 'object' || !match(value, inner)) { + return false; + } + } else if (inner !== value) { + if (inner === 2 && !Array.isArray(value) && Object(value) === (value) && Object.keys(value).length === 0) { + return true; + } + return false; + } + } + return true; + }; + const tags = context.tags('pb+'); + for (const format of formats) { + if (match(tags, format.tags)) { + const error = new view.Error(`Invalid file content. File contains ${format.name}.`); + error.context = context.identifier; + throw error; + } + } + const format = (tags) => { + const content = Object.entries(tags).map(([key, value]) => { + return `${key}:${Object(value) === value ? `{${format(value)}}` : value}`; + }); + return content.join(','); + }; + const content = format(tags); + throw new view.Error(`Unsupported Protocol Buffers content '${content.length > 64 ? `${content.substring(0, 100)}...` : content}' for extension '.${extension}'.`); + } + }; + const flatbuffers = () => { + const tags = context.tags('flatbuffers'); + if (tags.has('file_identifier')) { + const file_identifier = tags.get('file_identifier'); + const formats = [ + { name: 'onnxruntime.experimental.fbs.InferenceSession data', identifier: 'ORTM' }, + { name: 'tflite.Model data', identifier: 'TFL3' } + ]; + for (const format of formats) { + if (file_identifier === format.identifier) { + throw new view.Error(`Invalid file content. File contains ${format.name}.`); + } + } + } + }; + const xml = () => { + const tags = context.tags('xml'); + if (tags.size > 0) { + const formats = [ + { name: 'OpenCV storage data', tags: [ 'opencv_storage' ] }, + { name: 'XHTML markup', tags: [ 'http://www.w3.org/1999/xhtml:html' ] } + ]; + for (const format of formats) { + if (format.tags.some((tag) => tags.has(tag))) { + const error = new view.Error(`Invalid file content. File contains ${format.name}.`); + error.content = context.identifier; + throw error; + } + } + throw new view.Error(`Unsupported XML content '${tags.keys().next().value}'.`); + } + }; + const hdf5 = () => { + const obj = context.peek('hdf5'); + if (obj instanceof Error) { + throw obj; + } + }; + const unknown = () => { + if (stream) { + stream.seek(0); + const buffer = stream.peek(Math.min(16, stream.length)); + const bytes = Array.from(buffer).map((c) => (c < 16 ? '0' : '') + c.toString(16)).join(''); + const content = stream.length > 268435456 ? `(${bytes}) [${stream.length}]`: `(${bytes})`; + throw new view.Error(`Unsupported file content ${content} for extension '.${extension}'.`); + } + throw new view.Error("Unsupported file directory."); + }; + json(); + pbtxt(); + pb(); + flatbuffers(); + xml(); + hdf5(); + unknown(); + } + + async _openContext(context) { + const modules = this._filter(context).filter((module) => module && module.length > 0); + const errors = []; + let success = false; + const next = async () => { + if (modules.length > 0) { + let module = null; + try { + const id = modules.shift(); + module = await this._host.require(id); + if (!module.ModelFactory) { + throw new view.Error(`Failed to load module '${id}'.`); + } + } catch (error) { + success = true; + modules.splice(0, modules.length); + errors.push(error); + } + if (module) { + try { + const modelFactory = new module.ModelFactory(); + const target = modelFactory.match(context); + if (target) { + success = true; + const model = await modelFactory.open(context, target); + if (!model.identifier) { + model.identifier = context.identifier; + } + return model; + } + } catch (error) { + if (context.stream && context.stream.position !== 0) { + context.stream.seek(0); + } + errors.push(error); + } + } + return await next(); + } + if (success) { + if (errors.length === 1) { + throw errors[0]; + } + throw new view.Error(errors.map((err) => err.message).join('\n')); + } + return null; + }; + return await next(); + } + + async _openEntries(entries) { + try { + const rootFolder = (files) => { + const map = files.map((file) => file.split('/').slice(0, -1)); + const at = (index) => (list) => list[index]; + const rotate = (list) => list.length === 0 ? [] : list[0].map((item, index) => list.map(at(index))); + const equals = (list) => list.every((item) => item === list[0]); + const folder = rotate(map).filter(equals).map(at(0)).join('/'); + return folder.length === 0 ? folder : `${folder}/`; + }; + const list = Array.from(entries).map(([name, stream]) => { + return { name: name, stream: stream }; + }); + const files = list.filter((entry) => { + if (entry.name.endsWith('/')) { + return false; + } + if (entry.name.split('/').pop().startsWith('.')) { + return false; + } + if (!entry.name.startsWith('./') && entry.name.startsWith('.')) { + return false; + } + return true; + }); + const folder = rootFolder(files.map((entry) => entry.name)); + const filter = async (queue, entries) => { + entries = new Map(Array.from(entries) + .filter(([path]) => path.startsWith(folder)) + .map(([path, stream]) => [ path.substring(folder.length), stream ])); + const entryContext = new view.EntryContext(this._host, entries); + let matches = []; + for (const entry of queue) { + const identifier = entry.name.substring(folder.length); + const context = new view.Context(entryContext, identifier, entry.stream); + const modules = this._filter(context); + for (const id of modules) { + /* eslint-disable no-await-in-loop */ + const module = await this._host.require(id); + /* eslint-enable no-await-in-loop */ + if (!module.ModelFactory) { + throw new view.ArchiveError(`Failed to load module '${id}'.`, null); + } + const modelFactory = new module.ModelFactory(); + if (modelFactory.match(context)) { + matches.push(context); + break; + } + } + } + if (matches.length === 0) { + return null; + } + // PyTorch + if (matches.length === 2 && + matches.some((context) => context.identifier === 'serialized_exported_program.json') && + matches.some((context) => context.identifier === 'serialized_state_dict.pt')) { + matches = matches.filter((context) => context.identifier === 'serialized_exported_program.json'); + } + if (matches.length === 3 && + matches.some((context) => context.identifier === 'serialized_exported_program.json') && + matches.some((context) => context.identifier === 'serialized_state_dict.pt') && + matches.some((context) => context.identifier === 'serialized_constants.pt')) { + matches = matches.filter((context) => context.identifier === 'serialized_exported_program.json'); + } + // MXNet + if (matches.length === 2 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.params')) && + matches.some((context) => context.identifier.toLowerCase().endsWith('-symbol.json'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.params')); + } + // TensorFlow.js + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.bin')) && + matches.some((context) => context.identifier.toLowerCase().endsWith('.json'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.json')); + } + // ncnn + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.bin')) && + matches.some((context) => context.identifier.toLowerCase().endsWith('.param'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.param')); + } + // ncnn + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.bin')) && + matches.some((context) => context.identifier.toLowerCase().endsWith('.param.bin'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.param.bin')); + } + // NNEF + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.nnef')) && + matches.some((context) => context.identifier.toLowerCase().endsWith('.dat'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.nnef')); + } + // Paddle + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.pdmodel')) && + (matches.some((context) => context.identifier.toLowerCase().endsWith('.pdparams')) || + matches.some((context) => context.identifier.toLowerCase().endsWith('.pdopt')) || + matches.some((context) => context.identifier.toLowerCase().endsWith('.pdiparams')))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.pdmodel')); + } + // Paddle Lite + if (matches.length > 0 && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === '__model__.nb') && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'param.nb')) { + matches = matches.filter((context) => context.identifier.toLowerCase().split('/').pop() == '__model__.nb'); + } + // TensorFlow Bundle + if (matches.length > 1 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.data-00000-of-00001'))) { + matches = matches.filter((context) => !context.identifier.toLowerCase().endsWith('.data-00000-of-00001')); + } + // TensorFlow SavedModel + if (matches.length === 2 && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'keras_metadata.pb')) { + matches = matches.filter((context) => context.identifier.toLowerCase().split('/').pop() !== 'keras_metadata.pb'); + } + // Keras + if (matches.length === 3 && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'model.weights.h5' || context.identifier.toLowerCase().split('/').pop() === 'model.weights.npz') && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'config.json') && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'metadata.json')) { + matches = matches.filter((context) => context.identifier.toLowerCase().split('/').pop() == 'model.weights.h5' || context.identifier.toLowerCase().split('/').pop() === 'model.weights.npz'); + } + if (matches.length === 2 && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'model.weights.h5' || context.identifier.toLowerCase().split('/').pop() === 'model.weights.npz') && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'config.json')) { + matches = matches.filter((context) => context.identifier.toLowerCase().split('/').pop() == 'model.weights.h5' || context.identifier.toLowerCase().split('/').pop() === 'model.weights.npz'); + } + if (matches.length === 2 && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'config.json') && + matches.some((context) => context.identifier.toLowerCase().split('/').pop() === 'metadata.json')) { + matches = matches.filter((context) => context.identifier.toLowerCase().split('/').pop() == 'config.json'); + } + // Hailo + if (matches.length >= 2 && + matches.some((context) => context.identifier.toLowerCase().endsWith('.metadata.json'))) { + matches = matches.filter((context) => context.identifier.toLowerCase().endsWith('.metadata.json')); + } + if (matches.length > 1) { + throw new view.ArchiveError('Archive contains multiple model files.'); + } + const match = matches.shift(); + return match; + }; + const queue = files.slice(0).filter((entry) => entry.name.substring(folder.length).indexOf('/') < 0); + const context = await filter(queue, entries); + if (!context) { + const queue = files.slice(0).filter((entry) => entry.name.substring(folder.length).indexOf('/') >= 0); + return await filter(queue, entries); + } + return context; + } catch (error) { + throw new view.ArchiveError(error.message); + } + } + + accept(identifier, size) { + const extension = identifier.indexOf('.') === -1 ? '' : identifier.split('.').pop().toLowerCase(); + identifier = identifier.toLowerCase().split('/').pop(); + let accept = false; + for (const extension of this._patterns) { + if ((typeof extension === 'string' && identifier.endsWith(extension)) || + (extension instanceof RegExp && extension.exec(identifier))) { + accept = true; + break; + } + } + this._host.event('model_file', { + file_extension: extension, + file_size: size || 0, + file_accept: accept ? 1 : 0 + }); + return accept; + } + + _filter(context) { + const identifier = context.identifier.toLowerCase().split('/').pop(); + const list = this._factories.filter((entry) => + (typeof entry.pattern === 'string' && identifier.endsWith(entry.pattern)) || + (entry.pattern instanceof RegExp && entry.pattern.test(identifier))); + return Array.from(new Set(list.map((entry) => entry.module))); + } + + async _openSignature(context) { + const stream = context.stream; + if (stream) { + let empty = true; + let position = 0; + while (position < stream.length) { + const buffer = stream.read(Math.min(4096, stream.length - position)); + position += buffer.length; + if (!buffer.every((value) => value === 0x00)) { + empty = false; + break; + } + } + stream.seek(0); + if (empty) { + throw new view.Error('File has no content.'); + } + /* eslint-disable no-control-regex */ + const entries = [ + { name: 'ELF executable', value: /^\x7FELF/ }, + { name: 'PNG image', value: /^\x89PNG/ }, + { name: 'Git LFS header', value: /^version https:\/\/git-lfs.github.com/ }, + { name: 'Git LFS header', value: /^\s*oid sha256:/ }, + { name: 'GGML data', value: /^lmgg|fmgg|tjgg|algg|fugg/ }, + { name: 'HTML markup', value: /^\s*/ }, + { name: 'HTML markup', value: /^\s*/ }, + { name: 'HTML markup', value: /^\s*/ }, + { name: 'HTML markup', value: /^\s*/ }, + { name: 'HTML markup', value: /^\s*= 5) { + const signature = [ 0xac, 0xed ]; + if (stream.peek(2).every((value, index) => value === signature[index])) { + const reader = new java.io.InputObjectStream(stream); + const obj = reader.read(); + if (obj && obj.$class && obj.$class.name) { + return 'weka'; + } + } + } + } catch (err) { + // continue regardless of error + } + return undefined; + } + + async open(context) { + const reader = new java.io.InputObjectStream(context.stream); + const obj = reader.read(); + throw new weka.Error(`Unsupported type '${obj.$class.name}'.`); + } +}; + +weka.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Weka model.'; + } +}; + +java.io = {}; + +java.io.InputObjectStream = class { + + constructor(stream) { + // Object Serialization Stream Protocol + // https://www.cis.upenn.edu/~bcpierce/courses/629/jdkdocs/guide/serialization/spec/protocol.doc.html + if (stream.length < 5) { + throw new java.io.Error('Invalid stream size'); + } + const signature = [ 0xac, 0xed ]; + if (!stream.peek(2).every((value, index) => value === signature[index])) { + throw new java.io.Error('Invalid stream signature'); + } + this._reader = new java.io.InputObjectStream.BinaryReader(stream.peek()); + this._references = []; + this._reader.skip(2); + const version = this._reader.uint16(); + if (version !== 0x0005) { + throw new java.io.Error(`Unsupported version '${version}'.`); + } + } + + read() { + return this._object(); + } + + _object() { + const code = this._reader.byte(); + switch (code) { + case 0x73: { // TC_OBJECT + const obj = {}; + obj.$class = this._classDesc(); + this._newHandle(obj); + this._classData(obj); + return obj; + } + case 0x74: { // TC_STRING + return this._newString(false); + } + default: { + throw new java.io.Error(`Unsupported code '${code}'.`); + } + } + } + + _classDesc() { + const code = this._reader.byte(); + switch (code) { + case 0x72: // TC_CLASSDESC + this._reader.skip(-1); + return this._newClassDesc(); + case 0x71: // TC_REFERENCE + return this._references[this._reader.uint32() - 0x7e0000]; + case 0x70: // TC_NULL + this._reader.byte(); + return null; + default: + throw new java.io.Error(`Unsupported code '${code}'.`); + } + } + + _newClassDesc() { + const code = this._reader.byte(); + switch (code) { + case 0x72: { // TC_CLASSDESC + const classDesc = {}; + classDesc.name = this._reader.string(); + classDesc.id = this._reader.uint64().toString(); + this._newHandle(classDesc); + classDesc.flags = this._reader.byte(); + classDesc.fields = []; + const count = this._reader.uint16(); + for (let i = 0; i < count; i++) { + const field = {}; + field.type = String.fromCharCode(this._reader.byte()); + field.name = this._reader.string(); + if (field.type === '[' || field.type === 'L') { + field.classname = this._object(); + } + classDesc.fields.push(field); + } + if (this._reader.byte() !== 0x78) { + throw new java.io.Error('Expected TC_ENDBLOCKDATA.'); + } + classDesc.superClass = this._classDesc(); + return classDesc; + } + case 0x7D: // TC_PROXYCLASSDESC + return null; + default: + throw new java.io.Error(`Unsupported code '${code}'.`); + } + } + + _classData(/* obj */) { + /* + const classname = obj.$class.name; + let flags = obj.$class.flags; + let superClass = obj.$class.superClass; + while (superClass) { + flags |= superClass.flags; + superClass = superClass.superClass; + } + if (flags & 0x02) { // SC_SERIALIZABLE + debugger; + const customObject = objects[classname]; + const hasReadObjectMethod = customObject && customObject.readObject; + if (flags & 0x01) { // SC_WRITE_METHOD + if (!hasReadObjectMethod) { + throw new Error('Class "'+ classname + '" dose not implement readObject()'); + } + customObject.readObject(this, obj); + if (this._reader.byte() !== 0x78) { // TC_ENDBLOCKDATA + throw new java.io.Error('Expected TC_ENDBLOCKDATA.'); + } + } + else { + if (hasReadObjectMethod) { + customObject.readObject(this, obj); + if (this._reader.byte() !== 0x78) { // TC_ENDBLOCKDATA + throw new java.io.Error('Expected TC_ENDBLOCKDATA.'); + } + } + else { + this._nowrclass(obj); + } + } + } + else if (flags & 0x04) { // SC_EXTERNALIZABLE + if (flags & 0x08) { // SC_BLOCK_DATA + this._objectAnnotation(obj); + } + else { + this._externalContents(); + } + } + else { + throw new Error('Illegal flags: ' + flags); + } + */ + } + + _newString(long) { + const value = this._reader.string(long); + this._newHandle(value); + return value; + } + + _newHandle(obj) { + this._references.push(obj); + } +}; + +java.io.InputObjectStream.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + this._length = buffer.length; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + + skip(offset) { + this._position += offset; + if (this._position > this._end) { + throw new java.io.Error(`Expected ${this._position - this._end} more bytes. The file might be corrupted. Unexpected end of file.`); + } + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } + + uint16() { + const position = this._position; + this.skip(2); + return this._view.getUint16(position, false); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._view.getUint32(position, false); + } + + uint64() { + const position = this._position; + this.skip(8); + return this._view.getUint64(position, false); + } + + string(long) { + const size = long ? this.uint64().toNumber() : this.uint16(); + const position = this._position; + this.skip(size); + this._decoder = this._decoder || new TextDecoder('utf-8'); + return this._decoder.decode(this._buffer.subarray(position, this._position)); + } +}; + +java.io.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Object Serialization Stream Protocol.'; + } +}; + +export const ModelFactory = weka.ModelFactory; diff --git a/xml.js b/xml.js new file mode 100644 index 00000000000..cf82e3c1cc3 --- /dev/null +++ b/xml.js @@ -0,0 +1,1753 @@ + +const xml = {}; +import * as text from './text.js'; + +// https://www.w3.org/TR/xml + +xml.TextReader = class { + + static open(data, callback) { + const decoder = text.Decoder.open(data); + for (;;) { + const c = decoder.decode(); + if (c === '<') { + break; + } + if (c === ' ' || c === '\n' || c === '\r' || c === '\t') { + continue; + } + return null; + } + return new xml.TextReader(data, callback); + } + + constructor(data, callback) { + this._data = data; + this._callback = callback; + this._entities = new Map([ [ 'quot', '"' ], [ 'amp', '&' ], [ 'apos', "'" ], [ 'lt', '<' ], [ 'gt', '>' ] ]); + this._nameStartCharRegExp = /[:A-Z_a-z\xC0-\xD6\xD8-\xF6\xF8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/; + this._nameCharRegExp = new RegExp(`[-.0-9\\xB7${this._nameStartCharRegExp.source.slice(1, -1)}]`); + xml.Utility.nameStartCharRegExp = this._nameStartCharRegExp; + } + + peek() { + this._peek = true; + const value = this.read(); + delete this._peek; + return value; + } + + read() { + this._stack = []; + this._context = []; + this._pushBuffer(this._data, '', '', false); + this._version = 0; + /* eslint-disable */ + this._charRegExp = /[\x09\x0a\x0d\x20-\uD7FF\uE000-\uFFFD]/; + /* eslint-enable */ + this._parameterEntities = false; + this._characterData = true; + this._push(new xml.Document()); + const document = this._document(); + for (;;) { + this._start = this._position; + switch (this._char) { + case '<': { + this._next(); + switch (this._char) { + case '?': { + this._processingInstruction(); + break; + } + case '!': { + this._next(); + if (this._match('--')) { + this._comment(); + } else if (this._match('[CDATA')) { + this._assert(this._stack.length > 1); + this._characterData = true; + this._expect('['); + const data = this._terminal(']]>'); + const node = document.createCDATASection(data); + this._appendChild(node); + } else if (this._match('DOCTYPE')) { + this._assert(this._stack.length > 1 || !document.documentElement || !document.documentType); + this._whitespace(1); + const name = this._name(); + this._assert(name !== null); + let systemId = ''; + let publicId = ''; + let whitespace = this._whitespace(0); + if (whitespace && this._match('SYSTEM')) { + this._whitespace(1); + systemId = this._systemLiteral(); + this._whitespace(0); + whitespace = true; + } else if (whitespace && this._match('PUBLIC')) { + this._whitespace(1); + publicId = this._pubidLiteral(); + this._whitespace(1); + systemId = this._systemLiteral(); + this._whitespace(0); + whitespace = true; + } + const node = document.createDocumentType(name, publicId, systemId); + this._appendChild(node); + this._push(node); + node.parameterEntities = new xml.NamedNodeMap(); + node.elements = new xml.NamedNodeMap(); + this._parameterEntities = true; + this._characterData = false; + const internalSubset = whitespace && this._match('['); + if (internalSubset) { + this._internalSubset(']'); + } + if (systemId && !this._standalone) { + this._pushResource(systemId, '', true); + this._internalSubset(undefined); + this._popContext(); + } + this._characterData = true; + this._parameterEntities = false; + const values = node.entities.filter((entity) => entity.value).map((entity) => entity.value); + for (const entity of node.entities.filter((entity) => entity.notationName)) { + const reference = `&${entity.localName};`; + if (values.some((value) => value.indexOf(reference) >= 0)) { + this._error(`Entity references unparsed entity '${entity.localName}'`); + } + } + if (internalSubset) { + this._expect(']'); + this._whitespace(0); + } + this._expect('>'); + this._assert(this._pop().nodeType === xml.NodeType.DocumentType); + } else { + this._unexpected(); + } + break; + } + case '/': { + this._next(); + const name = this._name(); + this._assert(name !== null); + this._whitespace(0); + this._expect('>'); + const node = this._pop(); + const nodeName = node.prefix ? `${node.prefix}:${node.localName}` : node.localName; + if (name !== nodeName) { + this._error(`Opening tag <${nodeName}> and ending tag mismatch`, this._start); + } + break; + } + default: { + this._assert(this._stack.length > 1 || !this._document.documentElement); + const name = this._name(); + this._assert(name !== null); + this._assert(!name.startsWith('xmlns:')); + const attributes = []; + let whitespace = this._whitespace(0); + if (whitespace) { + while (this._char !== '/' && this._char !== '>') { + if (!whitespace) { + this._unexpected(); + } + const position = this._position; + const name = this._name(); + if (!name) { + this._unexpected(); + } + this._whitespace(0); + this._expect('='); + this._whitespace(0); + const valuePosition = this._valuePosition; + const value = this._attributeValue(); + attributes.push({ + qualifiedName: name, + value: value, + position: position, + valuePosition: valuePosition + }); + whitespace = this._whitespace(0); + if (name === 'xmlns' && (!this._validateNamespace(value) || value === 'http://www.w3.org/2000/xmlns/' || value === 'http://www.w3.org/XML/1998/namespace')) { + this._error(`Invalid namespace '${value}'`, valuePosition); + } + if (name === 'xml:space' && value !== 'preserve' && value !== 'default') { + this._error(`Unexpected xml:space attribute value '${value}'`, position); + } + } + } + const namespaces = new Map(); + for (const entry of attributes.reverse()) { + const name = entry.qualifiedName; + const value = entry.value; + this._assert(name !== 'xmlns:'); + const [prefix, localName] = xml.Utility.split(name); + entry.prefix = prefix; + entry.localName = localName; + if (entry.prefix !== null) { + this._assert(entry.localName !== ''); + if (entry.prefix === 'xmlns' && entry.localName) { + if (!this._validateNamespace(value) || value === 'http://www.w3.org/2000/xmlns/') { + this._error(`Invalid namespace '${value}'`, entry.valuePosition); + } + if (entry.localName === 'xmlns' || (entry.localName === 'xml' && value !== 'http://www.w3.org/XML/1998/namespace') || (entry.localName !== 'xml' && value === 'http://www.w3.org/XML/1998/namespace')) { + this._error(`Invalid namespace prefix '${entry.localName}'`, entry.position); + } + if (this._version === 0 && value.length === 0) { + this._error("Invalid namespace declaration'", entry.position); + } + namespaces.set(entry.localName, value); + } + } else if (entry.localName === 'xmlns') { + namespaces.set('', value); + } + } + const pair = xml.Utility.split(name); + const prefix = pair[0] || ''; + const namespaceURI = namespaces.has(prefix) ? namespaces.get(prefix) : this._lookupNamespaceURI(prefix); + let element = null; + const documentType = document.documentType; + const elementType = documentType ? documentType.elements.getNamedItem(name) : null; + if (namespaceURI !== null) { + this._assert(name === ':' || (!name.endsWith(':') && !name.startsWith(':'))); + if (prefix && namespaceURI === '') { + this._error(`Invalid namespace prefix '${prefix}'`, this._start); + } + element = document.createElementNS(namespaceURI, name); + } else { + this._assert((pair[0] === null && !name.endsWith(':')) || name === ':' || elementType !== null); + element = document.createElement(name); + } + const parent = this._node(); + if (parent.nodeType === xml.NodeType.Document && parent.documentElement !== null) { + this._error('Duplicate document element', this._start); + } + this._appendChild(element); + const keys = new Set(); + for (const attr of attributes) { + const name = attr.qualifiedName; + const prefix = attr.prefix || ''; + const namespaceURI = namespaces.has(prefix) ? namespaces.get(prefix) : this._lookupNamespaceURI(prefix); + let attribute = null; + if (namespaceURI) { + attribute = document.createAttributeNS(namespaceURI, name); + } else { + const attributeType = elementType ? elementType.attributes.getNamedItem(name) : null; + this._assert(name.indexOf(':') === -1 || attributeType); + attribute = document.createAttribute(name); + } + const key = `${attribute.namespaceURI || ''}|${attribute.localName}`; + this._assert(!keys.has(key)); + keys.add(key); + attribute.value = attr.value; + attribute.ownerElement = element; + element.setAttributeNode(attribute); + } + const close = this._match('/'); + this._expect('>'); + if (this._peek && this._stack.length === 1 && this._nodeType() === xml.NodeType.Document) { + return this._pop(); + } + if (!close) { + this._push(element); + } + break; + } + } + break; + } + default: { + while (this._char === undefined && this._context.length > 0) { + this._popContext(); + } + if (this._char === undefined) { + if (this._stack.length === 1 && this._nodeType() === xml.NodeType.Document) { + this._assert(document.documentElement); + const documentType = document.documentType; + if (documentType) { + delete documentType.parameterEntities; + delete documentType.elements; + } + const value = this._pop(); + for (const key of Object.keys(this)) { + if (key !== '_data' && key !== '_callback' && key !== '_entities' && !key.startsWith('_name')) { + delete this[key]; + } + } + return value; + } + this._unexpected(); + } + const node = this._node(); + if (node.nodeType === xml.NodeType.Element) { + const documentType = document.documentType; + const name = node.prefix ? `${node.prefix}:${node.localName}` : node.localName; + const elementType = documentType ? documentType.elements.getNamedItem(name) : null; + this._characterData = elementType ? elementType.characterData : false; + this._seek(this._position); + const data = []; + while (this._char !== '<' && this._char !== undefined) { + if (this._char === ']' && this._match(']]>')) { + this._unexpected(); + } + data.push(this._content()); + if (data.length > 65536) { + this._error('Invalid character data buffer size.'); + } + } + if (data.length > 0) { + const content = data.splice(0, data.length).join(''); + if (content.trim().length > 0) { + const node = document.createTextNode(content); + this._appendChild(node); + } + } + continue; + } + if (!this._whitespace(0)) { + this._unexpected(); + } + break; + } + } + } + } + + _internalSubset(terminal) { + for (;;) { + this._start = this._position; + switch (this._char) { + case '<': { + this._next(); + switch (this._char) { + case '?': { + this._processingInstruction(); + break; + } + case '!': { + this._next(); + if (this._match('--')) { + this._parameterEntities = false; + this._characterData = true; + this._comment(); + this._parameterEntities = true; + } else if (this._match('ENTITY')) { + const documentType = this._node(); + this._assert(documentType.nodeType === xml.NodeType.DocumentType); + this._parameterEntities = false; + this._whitespace(1); + const parameter = this._char === '%'; + if (parameter) { + this._next(); + this._whitespace(1); + } + this._parameterEntities = true; + const name = this._entityName(); + const node = documentType.createEntity(name); + let whitespace = this._whitespace(0); + if (whitespace && (this._char === '"' || this._char === "'")) { + node.value = this._entityValue(); + this._whitespace(0); + } else { + if (whitespace && this._match('SYSTEM')) { + this._whitespace(1); + node.systemId = this._systemLiteral(); + whitespace = this._whitespace(0); + } else if (whitespace && this._match('PUBLIC')) { + this._whitespace(1); + node.publicId = this._pubidLiteral(); + this._whitespace(1); + node.systemId = this._systemLiteral(); + whitespace = this._whitespace(0); + } else { + this._unexpected(); + } + if (whitespace && !parameter) { + if (this._match('NDATA')) { + this._whitespace(1); + const name = this._name(); + this._assert(name !== null); + node.notationName = name; + this._whitespace(0); + } + } + } + this._expect('>'); + if (parameter) { + documentType.parameterEntities.setNamedItem(node); + } else { + this._appendChild(node); + } + } else if (this._match('ELEMENT')) { + const documentType = this._node(); + this._assert(documentType.nodeType === xml.NodeType.DocumentType); + this._whitespace(1); + const name = this._name(); + this._assert(name !== null); + this._whitespace(1); + const elementType = this._elementType(name); + if (this._match('EMPTY')) { + this._whitespace(0); + } else if (this._match('ANY')) { + this._whitespace(0); + } else { + this._expect('('); + this._whitespace(0); + if (this._match('#PCDATA')) { + elementType.characterData = true; + this._whitespace(0); + if (this._match(')')) { + this._match('*'); + } else { + this._whitespace(0); + while (this._match('|')) { + this._whitespace(0); + const name = this._name(); + this._assert(name); + this._whitespace(0); + } + this._expect(')*'); + } + } else { + this._elementChildren(); + } + } + this._whitespace(0); + this._expect('>'); + } else if (this._match('ATTLIST')) { + const documentType = this._node(); + this._assert(documentType.nodeType === xml.NodeType.DocumentType); + this._whitespace(1); + const name = this._name(); + this._assert(name !== null); + const elementType = this._elementType(name); + while (this._whitespace(0)) { + const attributeType = this._attributeDefinition(); + if (!attributeType) { + break; + } + elementType.attributes.setNamedItem(attributeType); + } + this._whitespace(0); + this._expect('>'); + } else if (this._match('NOTATION')) { + this._assert(this._nodeType() === xml.NodeType.DocumentType); + const notation = { systemId: null, publicId: null }; + this._whitespace(1); + notation.name = this._entityName(); + let whitespace = this._whitespace(0); + if (whitespace && this._match('SYSTEM')) { + this._whitespace(1); + notation.systemId = this._systemLiteral(); + whitespace = this._whitespace(0); + } + if (whitespace && this._match('PUBLIC')) { + this._whitespace(1); + notation.publicId = this._pubidLiteral(); + if (this._whitespace(0) && (this._char === '"') || this._char === "'") { + notation.systemId = this._systemLiteral(); + this._whitespace(0); + } + } + this._assert(notation.systemId || notation.publicId); + this._expect('>'); + } else if (this._match('[')) { + this._whitespace(0); + if (this._match('INCLUDE')) { + this._assert(this._context.length > 0); + this._whitespace(0); + this._expect('['); + this._internalSubset(']'); + this._expect(']]>'); + } else if (this._match('IGNORE')) { + this._whitespace(0); + this._expect('['); + this._ignoreSectContents(); + } + } else { + this._unexpected(); + } + break; + } + default: { + break; + } + } + break; + } + case '%': { + this._resolveParameterEntityReference(); + break; + } + default: { + if (this._char === terminal) { + return; + } + if (!this._whitespace(0)) { + this._unexpected(); + } + break; + } + } + } + } + + _ignoreSectContents() { + while (!this._match(']]>')) { + if (this._match('= 0x10000 && c <= 0xEFFFF)) { + name.push(this._char); + this._next(); + if (this._char !== undefined) { + let c = this._char.codePointAt(0); + while (this._nameCharRegExp.test(this._char) || (c >= 0x300 && c <= 0x36f) || (c >= 0x203F && c <= 0x2040)) { + name.push(this._char); + this._next(); + if (this._char === undefined || this._implicitSpace) { + break; + } + c = this._char.codePointAt(0); + } + } + } + if (name.length > 0) { + return name.join(''); + } + this._seek(position); + return null; + } + + _nmtoken() { + const position = this._position; + const name = []; + let c = this._char.codePointAt(0); + while (this._nameCharRegExp.test(this._char) || (c >= 0x300 && c <= 0x36f) || (c >= 0x203F && c <= 0x2040)) { + name.push(this._char); + this._next(); + if (this._char === undefined) { + break; + } + c = this._char.codePointAt(0); + } + if (name.length > 0) { + return name.join(''); + } + this._seek(position); + return null; + } + + _entityName() { + const position = this._position; + const name = this._name(); + if (name === null) { + this._error('Expected entity name', position); + } + if (!name.endsWith(':') && name.indexOf(':') !== -1) { + this._error('Invalid colon in entity name', position); + } + return name; + } + + _entityValue() { + const quote = this._char; + this._parameterEntities = false; + this._characterData = true; + const decoder = this._decoder; + const position = this._position; + this._next(); + while (this._char !== quote) { + if (this._char === undefined) { + this._unexpected(); + } + this._next(); + } + const end = this._position; + this._parameterEntities = true; + this._seek(position); + this._next(); + const data = []; + while (this._position !== end || this._decoder !== decoder) { + if (this._char === undefined) { + this._unexpected(); + } + if (this._char === '%') { + if (this._context.length === 0) { + this._error('Invalid parameter entity reference in internal subset'); + } + this._assert(); + this._resolveParameterEntityReference(); + continue; + } + if (this._char === '&') { + data.push(this._entityReference()); + this._expect(';'); + continue; + } + + data.push(this._char); + this._next(); + } + this._next(); + this._parameterEntities = true; + this._characterData = false; + return data.join(''); + } + + _elementType(name) { + const documentType = this._document().documentType; + let elementType = documentType.elements.getNamedItem(name); + if (!elementType) { + elementType = { localName: name, characterData: false, attributes: new xml.NamedNodeMap() }; + documentType.elements.setNamedItem(elementType); + } + return elementType; + } + + _elementChildren() { + let separator = undefined; + const choice = new Set(); + for (;;) { + const name = this._name(); + if (name) { + this._assert(separator !== '|' || !choice.has(name)); + choice.add(name); + this._match('?') || this._match('*') || this._match('+'); + this._whitespace(0); + } else if (this._match('(')) { + this._elementChildren(); + this._whitespace(0); + } else { + this._unexpected(); + } + if (this._match(')')) { + break; + } + if (separator && separator !== this._char) { + this._unexpected(); + } + if (this._char !== '|' && this._char !== ',') { + this._unexpected(); + } + separator = this._char; + this._next(); + this._whitespace(0); + } + this._match('?') || this._match('*') || this._match('+'); + } + + _attributeDefinition() { + this._whitespace(0); + const name = this._name(); + if (name) { + this._whitespace(1); + if (this._match('CDATA') || this._match('IDREFS') || this._match('IDREF') || this._match('ID') || this._match('ENTITIES') || this._match('ENTITY') || this._match('NMTOKENS') || this._match('NMTOKEN') || + this._enumeratedType()) { + this._whitespace(1); + if (!this._match('#REQUIRED') && !this._match('#IMPLIED')) { + if (this._match('#FIXED')) { + this._whitespace(1); + } + this._parameterEntities = false; + this._attributeValue(); + this._parameterEntities = true; + } + return { localName: name }; + } + this._assert(false); + } + return null; + } + + _enumeratedType() { + if (this._match('NOTATION')) { + this._whitespace(1); + this._expect('('); + do { + this._whitespace(0); + const name = this._name(); + this._assert(name); + this._whitespace(0); + } + while (this._match('|')); + this._expect(')'); + return true; + } + if (this._match('(')) { + do { + this._whitespace(0); + const name = this._nmtoken(); + this._assert(name); + this._whitespace(0); + } + while (this._match('|')); + this._expect(')'); + return true; + } + return false; + } + + _content() { + const c = this._char !== '&' ? this._char : this._resolveEntityReference(); + if (c === undefined) { + return ''; + } + const code = c.codePointAt(0); + if ((!this._charRegExp.test(c) && (code < 0x10000 || c > 0x10FFFF))) { + this._unexpected(); + } + this._next(); + return c; + } + + _attributeValue() { + const quote = this._char; + if (quote !== '"' && quote !== "'") { + this._unexpected(); + } + this._characterData = true; + const decoder = this._decoder; + const position = this._position; + this._next(); + while (this._char !== quote) { + if (this._char === undefined || this._char === '<') { + this._unexpected(); + } + this._next(); + } + const end = this._position; + this._characterData = false; + this._seek(position); + this._next(); + const data = []; + while (this._position !== end || this._decoder !== decoder) { + if (this._char === undefined && this._context.length > 0) { + this._popContext(); + continue; + } + if (this._char === '<') { + this._unexpected(); + } + data.push(this._content()); + if (data.length > 65536) { + this._error('Invalid character data buffer size.'); + } + } + this._characterData = true; + this._next(); + return data.join(''); + } + + _validateNamespace(value) { + if (value && (value.startsWith('#') || value.indexOf(':') === -1)) { + return false; + } + if (this._version > 0) { + return true; + } + return /^[A-Za-z0-9-._~:/?#[\]@!$&'()*+,;%=]*$/.exec(value) !== null; + } + + _pubidLiteral() { + const quote = this._char; + if (quote !== '"' && quote !== "'") { + this._unexpected(); + } + this._next(); + const data = []; + while (this._char !== quote) { + if (/[a-zA-Z0-9-'()+,./:=?;!*#@$_%]/.test(this._char) || this._char === ' ' || this._char === '\r' || this._char === '\n') { + data.push(this._char); + this._next(); + if (this._char === undefined) { + this._unexpected(); + } + continue; + } + this._unexpected(); + } + this._next(); + return data.join(''); + } + + _systemLiteral() { + const quote = this._char; + if (quote !== '"' && quote !== "'") { + this._unexpected(); + } + this._next(); + const data = []; + while (this._char !== quote) { + data.push(this._char); + this._next(); + if (this._char === undefined) { + this._unexpected(); + } + } + this._next(); + const value = data.join(''); + if (value.indexOf('#') >= 0) { + this._unexpected(); + } + const match = /(.*\/)[^/]*/.exec(this._base); + return (match ? match[1] : '') + value; + } + + _terminal(terminal) { + const data = []; + while (!this._match(terminal)) { + if (this._char === undefined) { + this._unexpected(); + } + const c = this._char.codePointAt(0); + if (c !== 0x09 && c !== 0x0A && c !== 0x0D && (c < 0x20 || c > 0xD7FF) && (c < 0xE000 || c > 0xFFFD) && (c < 0x10000 || c > 0x10FFFF)) { + this._unexpected(); + } + data.push(this._char); + this._next(); + } + return data.join(''); + } + + _resolveParameterEntityReference() { + const position = this._position; + this._next(); + const name = this._name(); + this._assert(name !== null); + if (this._char === ';') { + const entity = this._document().documentType.parameterEntities.getNamedItem(name); + if (entity) { + const implicitSpace = !this._entity && !this._context.some((context) => context.entity); + if (entity.systemId) { + this._pushResource(entity.systemId, name, false); + } else { + this._pushString(entity.value, name, false); + } + if (implicitSpace) { + this._implicitSpace = true; + } + return; + } + this._error(`Undefined ENTITY '${name}'`, position); + } + this._unexpected(); + } + + _resolveEntityReference() { + const position = this._position; + let entity = this._entityReference(); + const name = entity.substring(1, entity.length - 1); + if (name.startsWith('#x')) { + const value = parseInt(name.substring(2), 16); + return String.fromCodePoint(value); + } else if (name.startsWith('#')) { + const value = parseInt(name.substring(1), 10); + return String.fromCodePoint(value); + } else if (this._entities.has(name)) { + return this._entities.get(name); + } + const documentType = this._document().documentType; + entity = documentType ? documentType.entities.getNamedItem(name) : null; + if (entity) { + if (entity.systemId) { + this._pushResource(entity.systemId, name, true); + } else { + this._pushString(entity.value, name, true); + } + } else if (this._context.length !== 0 || !documentType || documentType.parameterEntities.length === 0) { + this._error(`Undefined ENTITY '${name}'`, position); + } + return undefined; + } + + /* eslint-disable consistent-return */ + _entityReference() { + if (this._char === '&') { + const position = this._position; + this._next(); + if (this._match('#x')) { + const data = []; + while (/[0-9a-fA-F]/.test(this._char)) { + data.push(this._char); + this._next(); + if (this._char === undefined) { + this._unexpected(); + } + } + this._assert(this._char === ';'); + if (data.length > 0) { + const text = data.join(''); + const value = parseInt(text, 16); + this._assert(value <= 0x10FFFF, `Invalid value '&#x${text};'`, position); + return `&#x${text};`; + } + } else if (this._match('#')) { + const data = []; + while (/[0-9]/.test(this._char)) { + data.push(this._char); + this._next(); + if (this._char === undefined) { + this._unexpected(); + } + } + this._assert(this._char === ';'); + if (data.length > 0) { + const text = data.join(''); + const value = parseInt(text, 10); + this._assert(value <= 0x10FFFF, `Invalid value '&#${text};'`, position); + return `&#${text};`; + } + } else { + const name = this._name(); + this._assert(name !== null); + this._assert(this._char === ';'); + return `&${name};`; + } + } + this._unexpected(); + } + /* eslint-enable consistent-return */ + + _comment() { + const data = this._terminal('--'); + const node = this._document().createComment(data); + this._appendChild(node); + this._expect('>'); + } + + _processingInstruction() { + this._next(); + const name = this._entityName(); + let whitespace = this._char === '?' ? false : this._whitespace(1); + const position = this._position; + const data = this._terminal('?>'); + if (name.toLowerCase() === 'xml') { + this._seek(position); + this._assert(name === 'xml', `'${name}' must be lower case`); + this._assert(this._start === this._prolog, "Prolog must start with XML declaration", this._start); + this._assert(typeof this._data !== 'string', 'Invalid text declaration', this._start); + const obj = { version: '', encoding: '', standalone: 'no' }; + for (const name of Object.keys(obj)) { + const expect = (name == 'version' && this._context.length === 0) || (name == 'encoding' && this._context.length > 0); + if ((whitespace || expect) && (expect ? this._expect(name) : this._match(name))) { + this._whitespace(0); + this._expect('='); + this._whitespace(0); + obj[name] = this._attributeValue(); + whitespace = this._whitespace(0); + } + } + this._expect('?>'); + obj.encoding = obj.encoding.toLowerCase(); + if (this._decoder.encoding && obj.encoding !== this._decoder.encoding) { + const position = this._position; + this._decoder = text.Decoder.open(this._data, obj.encoding); + this._seek(position); + } + if (obj.version.length > 0) { + const match = /^(\d)\.(\d)$/.exec(obj.version); + this._assert(match && match[1] === '1', `Invalid XML version '${obj.version}'`); + const version = Number.parseInt(match[2], 10); + if (version > this._version) { + /* eslint-disable */ + this._charRegExp = /[\x01-\uD7FF\uE000-\uFFFD]/; + /* eslint-enable */ + this._version = version; + } + this._assert(this._context.length === 0 || this._context.some((context) => context.version >= this._version)); + } + this._assert(obj.standalone === 'no' || (obj.standalone === 'yes' && !this._entity && this._context.length === 0)); + this._standalone = obj.standalone === 'yes'; + } + const node = this._document().createProcessingInstruction(name, data); + this._appendChild(node); + } + + _whitespace(count) { + const position = this._position; + let index = 0; + if (this._implicitSpace) { + index++; + this._implicitSpace = false; + } + while (this._char === ' ' || this._char === '\n' || this._char === '\r' || this._char === '\t' || (this._version > 0 && this._char === '\x85')) { + index++; + this._next(); + } + if (index < count) { + this._seek(position); + this._unexpected(); + } + return index > 0; + } + + _pushResource(identifier, entity, stop) { + const content = this._callback(identifier); + this._pushBuffer(content, identifier, entity, stop); + } + + _pushBuffer(data, base, entity, stop) { + const signature = text.Decoder.open(data); + const decoder = signature.encoding === 'utf-8' ? text.Decoder.open(data, 'utf-8') : signature; + this._pushContext(decoder, data, base, entity, stop, false); + this._data = data; + } + + _pushString(value, entity, stop) { + const decoder = text.Decoder.open(value); + this._pushContext(decoder, value, this._base, entity, stop); + } + + _pushContext(decoder, data, base, entity, stop) { + if (this._context.some((context) => context && context.base === base && context.entity === entity)) { + this._assert(!entity, `Recursive entity '${entity}'`); + this._assert(!base, `Recursive base '${base}'`); + } + if (base.length !== 0 || entity.length !== 0) { + this._context.push(this._state); + } + this._stop = stop; + this._entity = entity; + this._base = base; + this._data = data; + this._decoder = decoder; + this._prolog = this._decoder.position; + this._char = ''; + this._next(); + } + + _popContext() { + const entity = this._entity; + this._state = this._context.pop(); + if (entity) { + this._expect(';'); + this._implicitSpace = !this._context.some((context) => context.entity); + } + } + + get _state() { + return { + base: this._base, + data: this._data, + decoder: this._decoder, + position: this._position, + version: this._version, + entity: this._entity, + prolog: this._prolog, + stop: this._stop, + }; + } + + set _state(value) { + this._stop = value.stop; + this._base = value.base; + this._data = value.data; + this._decoder = value.decoder; + this._seek(value.position); + this._version = value.version; + this._entity = value.entity; + this._prolog = value.prolog; + } + + _next() { + if (this._char === undefined) { + this._unexpected(); + } + this._position = this._decoder.position; + this._char = this._decoder.decode(); + this._implicitSpace = false; + if (this._parameterEntities && this._char === '%' && (this._entity || this._base)) { + this._resolveParameterEntityReference(); + } + if (!this._characterData) { + if (this._char === '&' && (this._entity || this._base)) { + const c = this._resolveEntityReference(); + if (c !== undefined) { + this._char = c; + } + } + } + if (this._char === '\uffff' || this._char === '\ufffe' || (this._version > 0 && this._char >= '\x7f' && this._char <= '\x9f' && this._char != '\x85')) { + this._unexpected(); + } + if (this._char === undefined) { + if (!this._stop && this._context.length > 0) { + this._popContext(); + } + } + } + + _seek(position) { + this._decoder.position = position; + this._char = ''; + this._next(); + } + + _expect(value) { + if (!this._match(value)) { + this._unexpected(); + } + return true; + } + + _match(value) { + if (this._char !== value[0]) { + return false; + } + if (value.length === 1) { + this._next(); + return true; + } + if (this._context.length === 0) { + const position = this._position; + for (let i = 0; i < value.length; i++) { + if (this._char !== value[i]) { + this._seek(position); + return false; + } + this._next(); + } + return true; + } + const context = Array.from(this._context); + const state = this._state; + for (let i = 0; i < value.length; i++) { + if (this._char !== value[i]) { + this._context = context; + this._state = state; + return false; + } + this._next(); + } + return true; + } + + _assert(value, message, position) { + if (value === false || value === undefined || value === null) { + this._error(message, position); + } + } + + _error(message, position) { + if (position) { + this._parameterEntities = false; + this._characterData = true; + this._seek(position); + } + if (message) { + throw new xml.Error(`${message} ${this._location()}`); + } + this._unexpected(); + } + + _unexpected() { + let c = this._char; + if (c === undefined) { + throw new xml.Error('Unexpected end of XML input.'); + } else if (c === '"') { + c = 'string'; + } else if ((c >= '0' && c <= '9') || c === '-') { + c = 'number'; + } else { + if (c < ' ' || c > '\x7F') { + c = c.codePointAt(0); + if (c < 0x0100) { + c = `\\x${(`0${c.toString(16)}`).slice(-2)}`; + } else if (c < 0x010000) { + c = `\\u${(`000${c.toString(16)}`).slice(-4)}`; + } else { + c = `\\u${(`00000${c.toString(16)}`).slice(-6)}`; + } + } + c = `token '${c}'`; + } + this._error(`Unexpected ${c}`); + } + + _location() { + while (typeof this._data === 'string') { + this._popContext(); + } + this._parameterEntities = false; + this._characterData = true; + let line = 1; + let column = 1; + this._decoder.position = 0; + let c; + do { + if (this._decoder.position === this._position) { + break; + } + c = this._decoder.decode(); + if (c === '\n') { + line++; + column = 1; + } else { + column++; + } + } + while (c !== undefined); + const file = this._base ? `${this._base}:` : ''; + return `at ${file}${line}:${column}.`; + } +}; + +xml.NodeList = class extends Array { + + constructor() { + super(); + } + + item(index) { + return this[index] || null; + } +}; + +xml.Node = class { + + constructor(document, nodeType) { + this._ownerDocument = document; + this._nodeType = nodeType; + this._childNodes = new xml.NodeList(); + } + + get ownerDocument() { + return this._ownerDocument; + } + + get nodeType() { + return this._nodeType; + } + + get localName() { + throw new xml.Error('Not implemented.'); + } + + get namespaceURI() { + return null; + } + + get childNodes() { + return this._childNodes; + } + + get parentNode() { + return this._parentNode; + } + + set parentNode(value) { + this._parentNode = value; + } + + get firstChild() { + return this._firstChild; + } + + set firstChild(value) { + this._firstChild = value; + } + + get lastChild() { + return this._lastChild || null; + } + + set lastChild(value) { + this._lastChild = value; + } + + get previousSibling() { + return this._previousSibling; + } + + set previousSibling(value) { + this._previousSibling = value; + } + + get nextSibling() { + return this._nextSibling; + } + + set nextSibling(value) { + this._nextSibling = value; + } + + appendChild(newChild) { + this.firstChild = this.firstChild || newChild; + newChild.previousSibling = this.lastChild; + if (newChild.previousSibling) { + newChild.previousSibling.nextSibling = newChild; + } + this.lastChild = newChild; + this.childNodes[this.childNodes.length] = newChild; + newChild.parentNode = this; + } + + lookupNamespaceURI(prefix) { + switch (prefix) { + case 'xml': + return 'http://www.w3.org/XML/1998/namespace'; + case 'xmlns': + return 'http://www.w3.org/2000/xmlns/'; + default: + return null; + } + } +}; + +xml.Element = class extends xml.Node { + + constructor(document, namespaceURI, qualifiedName) { + super(document, xml.NodeType.Element); + this._namespaces = new Map(); + this._attributes = new xml.NamedNodeMap(); + this._namespaceURI = namespaceURI; + if (namespaceURI === null) { + this._prefix = null; + this._localName = qualifiedName; + } else { + const [prefix, localName] = xml.Utility.split(qualifiedName); + this._prefix = prefix; + this._localName = localName; + } + } + + get localName() { + return this._localName; + } + + get prefix() { + return this._prefix; + } + + get namespaceURI() { + return this._namespaceURI; + } + + get attributes() { + return this._attributes; + } + + get textContent() { + return this.childNodes.map((node) => node.nodeType === xml.NodeType.ProcessingInstruction || node.nodeType === xml.NodeType.Comment ? '' : node.textContent).join(''); + } + + getElementsByTagName(tagName) { + const list = new xml.NodeList(); + let node = this.firstChild; + while (node) { + if (node.nodeType === xml.NodeType.Element && (tagName === '*' || tagName === (node.prefix ? `${node.prefix}:${node.localName}` : node.localName))) { + list.push(node); + } + node = node.nextSibling; + } + return list; + } + + getAttribute(name) { + const node = this.getAttributeNode(name); + return node ? node.value || '' : ''; + } + + getAttributeNode(name) { + return this.attributes.getNamedItem(name); + } + + setAttributeNode(node) { + const oldNode = this.attributes.setNamedItem(node); + if (node.namespaceURI === 'http://www.w3.org/2000/xmlns/') { + const prefix = node.prefix ? node.localName : ''; + this._namespaces.set(prefix, node.value); + } + return oldNode; + } + + lookupNamespaceURI(prefix) { + if (this._namespaces.has(prefix)) { + return this._namespaces.get(prefix); + } + if (this.parentNode) { + return this.parentNode.lookupNamespaceURI(prefix); + } + return super.lookupNamespaceURI(prefix); + } +}; + +xml.Attribute = class extends xml.Node { + + constructor(document, namespaceURI, qualifiedName) { + super(document, xml.NodeType.Attribute); + this._namespaceURI = namespaceURI; + if (namespaceURI === null) { + this._prefix = null; + this._localName = qualifiedName; + } else { + const [prefix, localName] = xml.Utility.split(qualifiedName); + this._prefix = prefix; + this._localName = localName; + } + } + + get ownerElement() { + return this._ownerElement; + } + + set ownerElement(value) { + this._ownerElement = value; + } + + get localName() { + return this._localName; + } + + get prefix() { + return this._prefix; + } + + get namespaceURI() { + return this._namespaceURI; + } + + get value() { + return this._value; + } + + set value(value) { + this._value = value; + } +}; + +xml.CharacterData = class extends xml.Node { + + constructor(document, nodeType, data) { + super(document, nodeType); + this._data = data; + } + + get data() { + return this._data; + } + + get textContent() { + return this._data; + } +}; + +xml.Text = class extends xml.CharacterData { + + constructor(document, data) { + super(document, xml.NodeType.Text, data); + } + + get localName() { + return '#text'; + } +}; + +xml.CDataSection = class extends xml.CharacterData { + + constructor(document, data) { + super(document, xml.NodeType.CDATA, data); + } +}; + +xml.Entity = class extends xml.Node { + + constructor(document, name) { + super(document, xml.NodeType.Entity); + this._name = name; + this._publicId = ''; + this._systemId = ''; + this._notationName = ''; + this._value = ''; + } + + get localName() { + return this._name; + } + + get publicId() { + return this._publicId; + } + + set publicId(value) { + this._publicId = value; + } + + get systemId() { + return this._systemId; + } + + set systemId(value) { + this._systemId = value; + } + + get notationName() { + return this._notationName; + } + + set notationName(value) { + this._notationName = value; + } + + set value(value) { + this._value = value; + } + + get value() { + return this._value; + } +}; + +xml.ProcessingInstruction = class extends xml.Node { + + constructor(document, target, data) { + super(document, xml.NodeType.ProcessingInstruction); + this._target = target; + this._data = data; + } + + get localName() { + return this._target; + } + + get target() { + return this._target; + } + + get data() { + return this._data; + } +}; + +xml.Comment = class extends xml.CharacterData { + + constructor(document, data) { + super(document, xml.NodeType.Comment, data); + } + + get localName() { + return '#comment'; + } +}; + +xml.Document = class extends xml.Node { + + constructor() { + super(null, xml.NodeType.Document); + this._documentElement = null; + this._documentType = null; + } + + get documentElement() { + return this._documentElement; + } + + get documentType() { + return this._documentType; + } + + appendChild(newChild) { + super.appendChild(newChild); + if (newChild.nodeType === xml.NodeType.Element) { + this._documentElement = newChild; + } + if (newChild.nodeType === xml.NodeType.DocumentType) { + this._documentType = newChild; + } + } + + createElement(localName) { + return new xml.Element(this, null, localName); + } + + createElementNS(namespaceURI, qualifiedName) { + return new xml.Element(this, namespaceURI, qualifiedName); + } + + createAttribute(localName) { + return new xml.Attribute(this, null, localName); + } + + createAttributeNS(namespaceURI, qualifiedName) { + return new xml.Attribute(this, namespaceURI, qualifiedName); + } + + createTextNode(data) { + return new xml.Text(this, data); + } + + createCDATASection(data) { + return new xml.CDataSection(this, data); + } + + createProcessingInstruction(target, data) { + return new xml.ProcessingInstruction(this, target, data); + } + + createComment(data) { + return new xml.Comment(this, data); + } + + createDocumentType(qualifiedName, publicId, systemId) { + return new xml.DocumentType(this, qualifiedName, publicId, systemId); + } +}; + +xml.DocumentType = class extends xml.Node { + + constructor(document, qualifiedName, publicId, systemId) { + super(document, xml.NodeType.DocumentType); + this._name = qualifiedName; + this._publicId = publicId; + this._systemId = systemId; + this._entities = new xml.NamedNodeMap(); + } + + get name() { + return this._name; + } + + get publicId() { + return this._publicId; + } + + get systemId() { + return this._systemId; + } + + get entities() { + return this._entities; + } + + appendChild(newChild) { + if (newChild.nodeType === xml.NodeType.Entity) { + this._entities.setNamedItem(newChild); + } + } + + createEntity(name) { + return new xml.Entity(this.ownerDocument, name); + } +}; + +xml.NamedNodeMap = class extends Array { + + getNamedItem(qualifiedName) { + for (let i = this.length - 1; i >= 0; i--) { + const node = this[i]; + const key = node.prefix ? `${node.prefix}:${node.localName}` : node.localName; + if (qualifiedName == key) { + return node; + } + } + return null; + } + + getNamedItemNS(namespaceURI, localName) { + for (let i = this.length - 1; i >= 0; i--) { + const node = this[i]; + if (localName === node.localName && namespaceURI == node.namespaceURI) { + return node; + } + } + return null; + } + + setNamedItem(node) { + const qualifiedName = node.prefix ? `${node.prefix}:${node.localName}` : node.localName; + for (let i = this.length - 1; i >= 0; i--) { + const node = this[i]; + const key = node.prefix ? `${node.prefix}:${node.localName}` : node.localName; + if (qualifiedName == key) { + const oldNode = this[i]; + this[i] = node; + return oldNode; + } + } + this.push(node); + return null; + } +}; + +xml.NodeType = { + None: 0, + Element: 1, + Attribute: 2, + Text: 3, + CDATA: 4, + EntityReference: 5, + Entity: 6, + ProcessingInstruction: 7, + Comment: 8, + Document: 9, + DocumentType: 10, + DocumentFragment: 11, + Notation: 12 +}; + +xml.Utility = class { + + static split(name) { + const index = name.indexOf(':'); + if (index < 0 || index === name.length - 1) { + return [ null, name ]; + } + const localName = name.substring(index + 1); + const c = localName.codePointAt(0); + if (localName.indexOf(':') !== -1 || !xml.Utility.nameStartCharRegExp.test(String.fromCodePoint(c)) && (c < 0x10000 || c > 0xEFFFF)) { + return [ null, name ]; + } + const prefix = name.substring(0, index); + return [ prefix, localName ]; + } +}; + +xml.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'XML Error'; + } +}; + +export const TextReader = xml.TextReader; + diff --git a/xmodel-proto.js b/xmodel-proto.js new file mode 100644 index 00000000000..1e1069642f9 --- /dev/null +++ b/xmodel-proto.js @@ -0,0 +1,1652 @@ + +import * as protobuf from './protobuf.js'; + +const $root = protobuf.get('xmodel'); + +$root.serial_v2 = {}; + +$root.serial_v2.Graph = class Graph { + + constructor() { + this.op_node = []; + this.graph_attr = {}; + this.op_defs = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Graph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.graph_name = reader.string(); + break; + case 5: + message.op_node.push($root.serial_v2.OPNode.decode(reader, reader.uint32())); + break; + case 10: + message.subg_root = $root.serial_v2.SubGraph.decode(reader, reader.uint32()); + break; + case 11: + reader.entry(message.graph_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decode(reader, reader.uint32())); + break; + case 101: + message.op_defs.push($root.serial_v2.OpDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Graph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "graph_name": + message.graph_name = reader.string(); + break; + case "op_node": + message.op_node.push($root.serial_v2.OPNode.decodeText(reader)); + break; + case "subg_root": + message.subg_root = $root.serial_v2.SubGraph.decodeText(reader); + break; + case "graph_attr": + reader.entry(message.graph_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decodeText(reader)); + break; + case "op_defs": + message.op_defs.push($root.serial_v2.OpDef.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Graph.prototype.graph_name = ""; +$root.serial_v2.Graph.prototype.subg_root = null; + +$root.serial_v2.OPNode = class OPNode { + + constructor() { + this.op_attr = {}; + this.args = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.OPNode(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op_name = reader.string(); + break; + case 2: + message.op_type = reader.string(); + break; + case 3: + reader.entry(message.op_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decode(reader, reader.uint32())); + break; + case 4: + message.args.push($root.serial_v2.OpArg.decode(reader, reader.uint32())); + break; + case 5: + message.output_tensor = $root.serial_v2.Tensor.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.OPNode(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "op_name": + message.op_name = reader.string(); + break; + case "op_type": + message.op_type = reader.string(); + break; + case "op_attr": + reader.entry(message.op_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decodeText(reader)); + break; + case "args": + message.args.push($root.serial_v2.OpArg.decodeText(reader)); + break; + case "output_tensor": + message.output_tensor = $root.serial_v2.Tensor.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.OPNode.prototype.op_name = ""; +$root.serial_v2.OPNode.prototype.op_type = ""; +$root.serial_v2.OPNode.prototype.output_tensor = null; + +$root.serial_v2.OpArg = class OpArg { + + constructor() { + this.arg_ops = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.OpArg(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.arg_name = reader.string(); + break; + case 2: + message.arg_ops.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.OpArg(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "arg_name": + message.arg_name = reader.string(); + break; + case "arg_ops": + reader.array(message.arg_ops, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.OpArg.prototype.arg_name = ""; + +$root.serial_v2.Tensor = class Tensor { + + constructor() { + this.tensor_dim = []; + this.tensor_attr = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Tensor(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_name = reader.string(); + break; + case 2: + message.tensor_dim = reader.array(message.tensor_dim, () => reader.uint32(), tag); + break; + case 5: + message.data_type = reader.int32(); + break; + case 6: + message.tensor_bit_width = reader.int32(); + break; + case 10: + reader.entry(message.tensor_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Tensor(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "tensor_name": + message.tensor_name = reader.string(); + break; + case "tensor_dim": + reader.array(message.tensor_dim, () => reader.uint32()); + break; + case "data_type": + message.data_type = reader.int32(); + break; + case "tensor_bit_width": + message.tensor_bit_width = reader.int32(); + break; + case "tensor_attr": + reader.entry(message.tensor_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Tensor.prototype.tensor_name = ""; +$root.serial_v2.Tensor.prototype.data_type = 0; +$root.serial_v2.Tensor.prototype.tensor_bit_width = 0; + +$root.serial_v2.SubGraph = class SubGraph { + + constructor() { + this.op_name = []; + this.subg_attr = {}; + this.subg_child = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.SubGraph(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.subgraph_name = reader.string(); + break; + case 3: + message.op_name.push(reader.string()); + break; + case 5: + reader.entry(message.subg_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decode(reader, reader.uint32())); + break; + case 10: + message.subg_child.push($root.serial_v2.SubGraph.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.SubGraph(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "subgraph_name": + message.subgraph_name = reader.string(); + break; + case "op_name": + reader.array(message.op_name, () => reader.string()); + break; + case "subg_attr": + reader.entry(message.subg_attr, () => reader.string(), () => $root.serial_v2.AttrValue.decodeText(reader)); + break; + case "subg_child": + message.subg_child.push($root.serial_v2.SubGraph.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.SubGraph.prototype.subgraph_name = ""; + +$root.serial_v2.OpDef = class OpDef { + + constructor() { + this.input_args = []; + this.attrs = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.OpDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.input_args.push($root.serial_v2.OpArgDef.decode(reader, reader.uint32())); + break; + case 3: + message.attrs.push($root.serial_v2.AttrDef.decode(reader, reader.uint32())); + break; + case 4: + message.annotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.OpDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input_args": + message.input_args.push($root.serial_v2.OpArgDef.decodeText(reader)); + break; + case "attrs": + message.attrs.push($root.serial_v2.AttrDef.decodeText(reader)); + break; + case "annotation": + message.annotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.OpDef.prototype.name = ""; +$root.serial_v2.OpDef.prototype.annotation = ""; + +$root.serial_v2.AttrDef = class AttrDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.serial_v2.AttrDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 3: + message.occur_type = reader.int32(); + break; + case 4: + message.default_value = $root.serial_v2.AttrValue.decode(reader, reader.uint32()); + break; + case 6: + message.list_length = reader.int32(); + break; + case 7: + message.annotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.AttrDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "occur_type": + message.occur_type = reader.enum($root.serial_v2.AttrDef.OccurType); + break; + case "default_value": + message.default_value = $root.serial_v2.AttrValue.decodeText(reader); + break; + case "list_length": + message.list_length = reader.int32(); + break; + case "annotation": + message.annotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.AttrDef.prototype.name = ""; +$root.serial_v2.AttrDef.prototype.occur_type = 0; +$root.serial_v2.AttrDef.prototype.default_value = null; +$root.serial_v2.AttrDef.prototype.list_length = 0; +$root.serial_v2.AttrDef.prototype.annotation = ""; + +$root.serial_v2.AttrDef.OccurType = { + "REQUIRED": 0, + "OPTIONAL": 1 +}; + +$root.serial_v2.OpArgDef = class OpArgDef { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.serial_v2.OpArgDef(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.occur_type = reader.int32(); + break; + case 3: + message.data_type = reader.int32(); + break; + case 4: + message.annotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.OpArgDef(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "occur_type": + message.occur_type = reader.enum($root.serial_v2.OpArgDef.OccurType); + break; + case "data_type": + message.data_type = reader.int32(); + break; + case "annotation": + message.annotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.OpArgDef.prototype.name = ""; +$root.serial_v2.OpArgDef.prototype.occur_type = 0; +$root.serial_v2.OpArgDef.prototype.data_type = 0; +$root.serial_v2.OpArgDef.prototype.annotation = ""; + +$root.serial_v2.OpArgDef.OccurType = { + "REQUIRED": 0, + "OPTIONAL": 1, + "REPEATED": 2, + "REQUIRED_AND_REPEATED": 3 +}; + +$root.serial_v2.AttrValue = class AttrValue { + + constructor() { + } + + get value() { + $root.serial_v2.AttrValue.valueSet = $root.serial_v2.AttrValue.valueSet || new Set([ "bool_value", "int32_value", "uint32_value", "int64_value", "uint64_value", "float_value", "double_value", "string_value", "bytes_value", "bool_vec_value", "int32_vec_value", "uint32_vec_value", "int64_vec_value", "uint64_vec_value", "float_vec_value", "double_vec_value", "string_vec_value", "bytes_vec_value", "map_string_2_int32_value", "map_string_2_uint32_value", "map_string_2_int64_value", "map_string_2_uint64_value", "map_string_2_string_value", "map_string_2_bytes_value", "map_string_2_int32_vec_value", "map_string_2_uint32_vec_value", "map_string_2_int64_vec_value", "map_string_2_uint64_vec_value", "map_string_2_string_vec_value"]); + return Object.keys(this).find((key) => $root.serial_v2.AttrValue.valueSet.has(key) && this[key] != null); + } + + static decode(reader, length) { + const message = new $root.serial_v2.AttrValue(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 4: + message.bool_value = reader.bool(); + break; + case 5: + message.int32_value = reader.int32(); + break; + case 6: + message.uint32_value = reader.uint32(); + break; + case 7: + message.int64_value = reader.int64(); + break; + case 8: + message.uint64_value = reader.uint64(); + break; + case 9: + message.float_value = reader.float(); + break; + case 10: + message.double_value = reader.double(); + break; + case 11: + message.string_value = reader.string(); + break; + case 12: + message.bytes_value = $root.serial_v2.Bytes.decode(reader, reader.uint32()); + break; + case 13: + message.bool_vec_value = $root.serial_v2.BoolVec.decode(reader, reader.uint32()); + break; + case 14: + message.int32_vec_value = $root.serial_v2.Int32Vec.decode(reader, reader.uint32()); + break; + case 15: + message.uint32_vec_value = $root.serial_v2.Uint32Vec.decode(reader, reader.uint32()); + break; + case 16: + message.int64_vec_value = $root.serial_v2.Int64Vec.decode(reader, reader.uint32()); + break; + case 17: + message.uint64_vec_value = $root.serial_v2.Uint64Vec.decode(reader, reader.uint32()); + break; + case 18: + message.float_vec_value = $root.serial_v2.FloatVec.decode(reader, reader.uint32()); + break; + case 19: + message.double_vec_value = $root.serial_v2.DoubleVec.decode(reader, reader.uint32()); + break; + case 20: + message.string_vec_value = $root.serial_v2.StringVec.decode(reader, reader.uint32()); + break; + case 21: + message.bytes_vec_value = $root.serial_v2.BytesVec.decode(reader, reader.uint32()); + break; + case 22: + message.map_string_2_int32_value = $root.serial_v2.MapString2Int32.decode(reader, reader.uint32()); + break; + case 23: + message.map_string_2_uint32_value = $root.serial_v2.MapString2Uint32.decode(reader, reader.uint32()); + break; + case 24: + message.map_string_2_int64_value = $root.serial_v2.MapString2Int64.decode(reader, reader.uint32()); + break; + case 25: + message.map_string_2_uint64_value = $root.serial_v2.MapString2Uint64.decode(reader, reader.uint32()); + break; + case 26: + message.map_string_2_string_value = $root.serial_v2.MapString2String.decode(reader, reader.uint32()); + break; + case 27: + message.map_string_2_bytes_value = $root.serial_v2.MapString2Bytes.decode(reader, reader.uint32()); + break; + case 28: + message.map_string_2_int32_vec_value = $root.serial_v2.MapString2Int32Vec.decode(reader, reader.uint32()); + break; + case 29: + message.map_string_2_uint32_vec_value = $root.serial_v2.MapString2Uint32Vec.decode(reader, reader.uint32()); + break; + case 30: + message.map_string_2_int64_vec_value = $root.serial_v2.MapString2Int64Vec.decode(reader, reader.uint32()); + break; + case 31: + message.map_string_2_uint64_vec_value = $root.serial_v2.MapString2Uint64Vec.decode(reader, reader.uint32()); + break; + case 32: + message.map_string_2_string_vec_value = $root.serial_v2.MapString2StringVec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.AttrValue(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "bool_value": + message.bool_value = reader.bool(); + break; + case "int32_value": + message.int32_value = reader.int32(); + break; + case "uint32_value": + message.uint32_value = reader.uint32(); + break; + case "int64_value": + message.int64_value = reader.int64(); + break; + case "uint64_value": + message.uint64_value = reader.uint64(); + break; + case "float_value": + message.float_value = reader.float(); + break; + case "double_value": + message.double_value = reader.double(); + break; + case "string_value": + message.string_value = reader.string(); + break; + case "bytes_value": + message.bytes_value = $root.serial_v2.Bytes.decodeText(reader); + break; + case "bool_vec_value": + message.bool_vec_value = $root.serial_v2.BoolVec.decodeText(reader); + break; + case "int32_vec_value": + message.int32_vec_value = $root.serial_v2.Int32Vec.decodeText(reader); + break; + case "uint32_vec_value": + message.uint32_vec_value = $root.serial_v2.Uint32Vec.decodeText(reader); + break; + case "int64_vec_value": + message.int64_vec_value = $root.serial_v2.Int64Vec.decodeText(reader); + break; + case "uint64_vec_value": + message.uint64_vec_value = $root.serial_v2.Uint64Vec.decodeText(reader); + break; + case "float_vec_value": + message.float_vec_value = $root.serial_v2.FloatVec.decodeText(reader); + break; + case "double_vec_value": + message.double_vec_value = $root.serial_v2.DoubleVec.decodeText(reader); + break; + case "string_vec_value": + message.string_vec_value = $root.serial_v2.StringVec.decodeText(reader); + break; + case "bytes_vec_value": + message.bytes_vec_value = $root.serial_v2.BytesVec.decodeText(reader); + break; + case "map_string_2_int32_value": + message.map_string_2_int32_value = $root.serial_v2.MapString2Int32.decodeText(reader); + break; + case "map_string_2_uint32_value": + message.map_string_2_uint32_value = $root.serial_v2.MapString2Uint32.decodeText(reader); + break; + case "map_string_2_int64_value": + message.map_string_2_int64_value = $root.serial_v2.MapString2Int64.decodeText(reader); + break; + case "map_string_2_uint64_value": + message.map_string_2_uint64_value = $root.serial_v2.MapString2Uint64.decodeText(reader); + break; + case "map_string_2_string_value": + message.map_string_2_string_value = $root.serial_v2.MapString2String.decodeText(reader); + break; + case "map_string_2_bytes_value": + message.map_string_2_bytes_value = $root.serial_v2.MapString2Bytes.decodeText(reader); + break; + case "map_string_2_int32_vec_value": + message.map_string_2_int32_vec_value = $root.serial_v2.MapString2Int32Vec.decodeText(reader); + break; + case "map_string_2_uint32_vec_value": + message.map_string_2_uint32_vec_value = $root.serial_v2.MapString2Uint32Vec.decodeText(reader); + break; + case "map_string_2_int64_vec_value": + message.map_string_2_int64_vec_value = $root.serial_v2.MapString2Int64Vec.decodeText(reader); + break; + case "map_string_2_uint64_vec_value": + message.map_string_2_uint64_vec_value = $root.serial_v2.MapString2Uint64Vec.decodeText(reader); + break; + case "map_string_2_string_vec_value": + message.map_string_2_string_vec_value = $root.serial_v2.MapString2StringVec.decodeText(reader); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Bytes = class Bytes { + + constructor() { + } + + static decode(reader, length) { + const message = new $root.serial_v2.Bytes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Bytes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Bytes.prototype.value = new Uint8Array([]); + +$root.serial_v2.BoolVec = class BoolVec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.BoolVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.bool(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.BoolVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.bool()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Int32Vec = class Int32Vec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Int32Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.int32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Int32Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Uint32Vec = class Uint32Vec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Uint32Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.uint32(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Uint32Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Int64Vec = class Int64Vec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Int64Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.int64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Int64Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.Uint64Vec = class Uint64Vec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.Uint64Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.array(message.value, () => reader.uint64(), tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.Uint64Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.FloatVec = class FloatVec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.FloatVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.floats(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.FloatVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.DoubleVec = class DoubleVec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.DoubleVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.doubles(message.value, tag); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.DoubleVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.double()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.StringVec = class StringVec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.StringVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.StringVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.array(message.value, () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.BytesVec = class BytesVec { + + constructor() { + this.value = []; + } + + static decode(reader, length) { + const message = new $root.serial_v2.BytesVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value.push($root.serial_v2.Bytes.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.BytesVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + message.value.push($root.serial_v2.Bytes.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Int32 = class MapString2Int32 { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Int32(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => reader.int32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Int32(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Uint32 = class MapString2Uint32 { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Uint32(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Uint32(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Int64 = class MapString2Int64 { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Int64(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Int64(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Uint64 = class MapString2Uint64 { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Uint64(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Uint64(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Bytes = class MapString2Bytes { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Bytes(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Bytes.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Bytes(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Bytes.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2String = class MapString2String { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2String(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2String(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Int32Vec = class MapString2Int32Vec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Int32Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Int32Vec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Int32Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Int32Vec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Uint32Vec = class MapString2Uint32Vec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Uint32Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Uint32Vec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Uint32Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Uint32Vec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Int64Vec = class MapString2Int64Vec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Int64Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Int64Vec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Int64Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Int64Vec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2Uint64Vec = class MapString2Uint64Vec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2Uint64Vec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Uint64Vec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2Uint64Vec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.Uint64Vec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2BytesVec = class MapString2BytesVec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2BytesVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.BytesVec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2BytesVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.BytesVec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; + +$root.serial_v2.MapString2StringVec = class MapString2StringVec { + + constructor() { + this.value = {}; + } + + static decode(reader, length) { + const message = new $root.serial_v2.MapString2StringVec(); + const end = length !== undefined ? reader.position + length : reader.length; + while (reader.position < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.StringVec.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + } + + static decodeText(reader) { + const message = new $root.serial_v2.MapString2StringVec(); + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + switch (tag) { + case "value": + reader.entry(message.value, () => reader.string(), () => $root.serial_v2.StringVec.decodeText(reader)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + } +}; diff --git a/xmodel.js b/xmodel.js new file mode 100644 index 00000000000..c2bcddfde3e --- /dev/null +++ b/xmodel.js @@ -0,0 +1,387 @@ + +import * as protobuf from './protobuf.js'; + +const xmodel = {}; + +xmodel.ModelFactory = class { + + match(context) { + const tags = context.tags('pb'); + if (tags.get(5) === 2) { + return 'xmodel.pb'; + } + return undefined; + } + + async open(context) { + await context.require('./xmodel-proto'); + let graph = null; + try { + xmodel.proto = protobuf.get('xmodel').serial_v2; + const stream = context.stream; + const reader = protobuf.BinaryReader.open(stream); + graph = xmodel.proto.Graph.decode(reader); + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new xmodel.Error(`File format is not serial_v2.Graph (${message.replace(/\.$/, '')}).`); + } + return new xmodel.Model(graph); + } +}; + +xmodel.Model = class { + + constructor(graph) { + this.name = graph.graph_name || ''; + this.format = 'xmodel'; + this.producer = graph && graph.graph_attr && graph.graph_attr.origin && graph.graph_attr.origin.string_value ? graph.graph_attr.origin.string_value : ''; + this.graphs = [ new xmodel.Graph(graph) ]; + } +}; + +xmodel.Graph = class { + + constructor(graph) { + const metadata = new xmodel.Metadata(graph.op_defs); + this.inputs = []; + this.outputs = []; + const counts = new Map(); + for (const op_node of graph.op_node) { + for (const arg of op_node.args) { + for (const arg_op of arg.arg_ops) { + counts.set(arg_op, counts.has(arg_op) ? counts.get(arg_op) + 1 : 1); + } + } + } + const values = new Map(); + values.map = (name, node, initializer) => { + if (!values.has(name)) { + values.set(name, new xmodel.Value(name, node, initializer)); + } + return values.get(name); + }; + const nodes = []; + for (const node of graph.op_node) { + if (node.args.length === 0) { + if (node.op_type === 'data' || node.op_type === 'data-fix') { + const value = values.map(node.op_name, node); + this.inputs.push(new xmodel.Argument(node.op_name, [ value ])); + continue; + } + } + if (node.args.length === 0 && counts.get(node.op_name) === 1) { + if (node.op_type === 'const' || node.op_type === 'const-fix') { + values.map(node.op_name, node, true); + continue; + } + } + values.map(node.op_name, node); + nodes.push(node); + } + this.nodes = nodes.map((node) => new xmodel.Node(metadata, node, values)); + } +}; + +xmodel.Argument = class { + + constructor(name, value) { + this.name = name; + this.value = value; + } +}; + +xmodel.Value = class { + + constructor(name, node, initializer) { + if (typeof name !== 'string') { + throw new xmodel.Error(`Invalid value identifier '${JSON.stringify(name)}'.`); + } + this.name = name; + if (node) { + const tensor = node.output_tensor; + if (tensor && tensor.tensor_attr && tensor.data_type) { + if (initializer) { + this.initializer = new xmodel.Tensor(node); + this.type = this.initializer.type; + } else { + this.type = new xmodel.TensorType(tensor); + } + } + } + } +}; + +xmodel.Node = class { + + constructor(metadata, op_node, values) { + this.name = op_node.op_name || ''; + this.type = metadata.type(op_node.op_type); + this.inputs = []; + this.outputs = []; + this.attributes = []; + this.chain = []; + if (op_node.op_attr) { + for (const [name, obj] of Object.entries(op_node.op_attr)) { + if (name === 'device') { + this.device = obj.string_value; + continue; + } + if (name === 'workload') { + continue; + } + if (name.startsWith('quant_in_') || name.startsWith('quant_out_')) { + continue; + } + const value = xmodel.Utility.attribute(obj); + if (name === 'nonlinear' && value.value && value.value !== 'NONE' && value.value !== 0) { + let activation = value.value; + if (typeof activation === 'string') { + activation = activation.toLowerCase(); + } else if (Number.isInteger(activation) && activation < 5) { + activation = [ 'none', 'relu', 'prelu', 'leakyrelu', 'relu6' ][activation]; + } else { + activation = JSON.stringify(activation); + } + this.chain.push(new xmodel.Node(metadata, { op_type: activation }, values)); + continue; + } + const attribute = new xmodel.Attribute(metadata.attribute(this.type, name), name, value); + this.attributes.push(attribute); + } + } + if (op_node.args) { + for (const input of op_node.args) { + const args = input.arg_ops.map((arg_op) => values.map(arg_op)); + const argument = new xmodel.Argument(input.arg_name, args); + this.inputs.push(argument); + } + } + if (op_node.op_name) { + const argument = new xmodel.Argument('output', [ values.map(op_node.op_name) ]); + this.outputs.push(argument); + } + } +}; + +xmodel.Attribute = class { + + constructor(metadata, name, attribute) { + this.name = name; + this.type = attribute.type; + this.value = attribute.value; + if (metadata) { + if (metadata.default !== undefined) { + if (metadata.default === this.value) { + this.visible = false; + } + if (Array.isArray(metadata.default) && Array.isArray(this.value) && + metadata.default.length === this.value.length && metadata.default.every((value, index) => value === this.value[index])) { + this.visible = false; + } + } + } + } +}; + +xmodel.TensorType = class { + + constructor(tensor) { + switch (tensor.data_type) { + case 0: this.dataType = 'int'; break; + case 1: this.dataType = 'uint'; break; + case 2: this.dataType = 'xint'; break; + case 4: this.dataType = 'float'; break; + case 3: this.dataType = 'xuint'; break; + default: throw new xmodel.Error(`Unsupported data type '${tensor.data_type}'.`); + } + this.dataType += tensor.tensor_bit_width.toString(); + this.shape = new xmodel.TensorShape(tensor.tensor_dim); + if (tensor.tensor_attr) { + const attr = {}; + for (const [key, obj] of Object.entries(tensor.tensor_attr)) { + const value = obj[obj.value]; + if (key.startsWith('quant_')) { + continue; + } + attr[key] = value; + const denotation = []; + if (attr.fix_point !== undefined) { + denotation.push(`${attr.fix_point}.`); + } + if (attr.round_mode !== undefined) { + denotation.push(attr.round_mode.toString()); + } + if (denotation.length > 0) { + this.denotation = denotation.join(' '); + } + } + } + } + + toString() { + return (this.dataType || '?') + this.shape.toString(); + } +}; + +xmodel.TensorShape = class { + + constructor(dimensions) { + this.dimensions = Array.from(dimensions); + } + + toString() { + if (!this.dimensions || this.dimensions.length == 0) { + return ''; + } + return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`; + } +}; + +xmodel.Tensor = class { + + constructor(node) { + this.type = new xmodel.TensorType(node.output_tensor); + this.category = node.op_type; + if (node.op_attr && node.op_attr.data) { + const data = node.op_attr.data; + if (data.bytes_value && data.bytes_value.value) { + this.encoding = '<'; + this.values = data.bytes_value.value; + } + } + } +}; + +xmodel.Utility = class { + + static attribute(attr_value) { + const key = attr_value.value; + const type = key.replace(/_value$/, ''); + const value = attr_value[attr_value.value]; + switch (type) { + case 'bool': return { type: 'boolean', value: value }; + case 'int32': return { type: 'int32', value: value }; + case 'int32_vec': return { type: 'int32[]', value: value.value }; + case 'uint32': return { type: 'uint32', value: value }; + case 'uint32_vec': return { type: 'uint32[]', value: value.value }; + case 'int64': return { type: 'int64', value: value }; + case 'uint64': return { type: 'uint64', value: value }; + case 'float': return { type: 'float32', value: value }; + case 'float_vec': return { type: 'float32[]', value: value.value }; + case 'double': return { type: 'float64', value: value }; + case 'string': return { type: 'string', value: value }; + case 'string_vec': return { type: 'string[]', value: value.value }; + case 'bytes': return { type: 'byte[]', value: value.value }; + case 'map_string_2_int32': return { type: 'map', value: value.value }; + default: throw new xmodel.Error(`Unsupported attribute type '${type}'.`); + } + } +}; + +xmodel.Metadata = class { + + constructor(op_defs) { + this._types = new Map(); + this._attributes = new Map(); + const categories = [ + [ 'avgpool2d', 'Pool' ], + [ 'batchnorm', 'Normalization' ], + [ 'celu', 'Activation' ], + [ 'concat-fix', 'Tensor' ], + [ 'concat', 'Tensor' ], + [ 'conv2d-fix', 'Layer' ], + [ 'conv2d', 'Layer' ], + [ 'depthwise-conv2d-fix', 'Layer' ], + [ 'depthwise-conv2d', 'Layer' ], + [ 'elu', 'Activation' ], + [ 'fix', 'Quantization' ], + [ 'fix2float', 'Quantization' ], + [ 'flatten', 'Shape' ], + [ 'float2fix', 'Quantization' ], + [ 'gelu', 'Activation' ], + [ 'hard-sigmoid', 'Activation' ], + [ 'hard-sigmoid-fix', 'Activation' ], + [ 'hard-swish', 'Activation' ], + [ 'hard-tanh', 'Activation' ], + [ 'identity', 'Control' ], + [ 'inner-product', 'Layer' ], + [ 'l2_normalize', 'Normalization' ], + [ 'leaky-relu', 'Activation' ], + [ 'leakyrelu', 'Activation' ], + [ 'maxpool2d', 'Pool' ], + [ 'pool-fix', 'Pool' ], + [ 'relu', 'Activation' ], + [ 'relu6', 'Activation' ], + [ 'reshape-fix', 'Shape' ], + [ 'reshape', 'Shape' ], + [ 'scale', 'Layer' ], + [ 'selu', 'Activation' ], + [ 'shape', 'Shape' ], + [ 'sigmoid', 'Activation' ], + [ 'softmax', 'Activation' ], + [ 'squeeze', 'Transform' ], + [ 'stack', 'Tensor' ], + [ 'strided_slice', 'Tensor' ], + [ 'swish', 'Activation' ], + [ 'tanh', 'Activation' ], + [ 'threshold', 'Quantization' ], + [ 'transpose', 'Tensor' ], + [ 'transposed-conv2d', 'Layer' ], + [ 'transposed-conv2d-fix', 'Layer' ], + [ 'transposed-depthwise-conv2d', 'Layer' ], + [ 'transposed-depthwise-conv2d-fix', 'Layer' ], + [ 'upsample-fix', 'Data' ], + ]; + this._types = new Map(categories.map(([name, category]) => [ name, { name: name, category: category } ])); + for (const op_def of op_defs) { + const type = this._types.get(op_def.name) || { name: op_def.name }; + if (op_def.annotation) { + type.description = op_def.annotation; + } + type.inputs = op_def.input_args.map((input_arg) => { + const input = {}; + input.name = input_arg.name; + if (input_arg.annotation) { + input.description = input_arg.annotation; + } + return input; + }); + type.attributes = op_def.attrs.map((attr) => { + const attribute = {}; + attribute.name = attr.name; + attribute.default = xmodel.Utility.attribute(attr.default_value).value; + if (attr.annotation) { + attribute.description = attr.annotation; + } + return attribute; + }); + for (const attribute of type.attributes) { + this._attributes.set(`${type.name}:${attribute.name}`, attribute); + } + this._types.set(type.name, type); + } + } + + type(name) { + if (!this._types.has(name)) { + this._types.set(name, { name: name }); + } + return this._types.get(name); + } + + attribute(type, name) { + const key = `${type}:${name}`; + return this._attributes.get(key); + } +}; + +xmodel.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading xmodel.'; + } +}; + +export const ModelFactory = xmodel.ModelFactory; + diff --git a/zip.js b/zip.js new file mode 100644 index 00000000000..02e4e5b02bf --- /dev/null +++ b/zip.js @@ -0,0 +1,947 @@ + +const zip = {}; +const gzip = {}; +const zlib = {}; + +zip.Archive = class { + + static async import() { + if (typeof process === 'object' && typeof process.versions == 'object' && typeof process.versions.node !== 'undefined') { + zip.zlib = await import('zlib'); + } + } + + static open(data, format) { + const stream = data instanceof Uint8Array ? new zip.BinaryReader(data) : data; + if (stream && stream.length > 2) { + const buffer = stream.peek(Math.min(512, stream.length)); + if (buffer.length >= 512) { + // Reject tar with Zip content + const sum = buffer.map((value, index) => (index >= 148 && index < 156) ? 32 : value).reduce((a, b) => a + b, 0); + const checksum = parseInt(Array.from(buffer.slice(148, 156)).map((c) => String.fromCharCode(c)).join('').split('\0').shift(), 8); + if (!isNaN(checksum) && sum === checksum) { + return null; + } + } + if ((!format || format === 'zlib') && buffer[0] === 0x78) { // zlib + const check = (buffer[0] << 8) + buffer[1]; + if (check % 31 === 0) { + return new zlib.Archive(stream); + } + } + if ((!format || format == 'gzip') && buffer.length > 18 && buffer[0] === 0x1f && buffer[1] === 0x8b) { // gzip + return new gzip.Archive(stream); + } + if (!format || format == 'zip') { + const search = buffer[0] === 0x50 && buffer[1] === 0x4B; + const location = stream.position; + const seek = (signature, size) => { + signature = Array.from(signature, (c) => c.charCodeAt(0)); + let position = stream.length; + do { + position = Math.max(0, position - 66000); + stream.seek(position); + const length = Math.min(stream.length - position, 66000 + size); + const buffer = stream.read(length); + for (let i = buffer.length - size; i >= 0; i--) { + if (signature[0] === buffer[i] && signature[1] === buffer[i + 1] && + signature[2] === buffer[i + 2] && signature[3] === buffer[i + 3]) { + stream.seek(position + i); + return new zip.BinaryReader(buffer.subarray(i, i + size)); + } + } + if (!search) { + break; + } + } + while (position > 0); + return null; + }; + const read = (signature, size) => { + if ((stream.position - size) > 0) { + stream.skip(-size); + signature = Array.from(signature, (c) => c.charCodeAt(0)); + const buffer = stream.peek(size); + if (buffer[0] === signature[0] && buffer[1] === signature[1] && buffer[2] === signature[2] && buffer[3] === signature[3]) { + return new zip.BinaryReader(buffer); + } + } + return null; + }; + const header = {}; + let position = -1; + let reader = seek('PK\x05\x06', 22); + if (reader) { + position = stream.position; + reader.skip(4); + header.disk = reader.uint16(); + header.startDisk = reader.uint16(); + header.diskRecords = reader.uint16(); + header.totalRecords = reader.uint16(); + header.size = reader.uint32(); + header.offset = reader.uint32(); + header.commentLength = reader.uint16(); + reader = null; + if (read('PK\x06\x07', 20)) { + reader = read('PK\x06\x06', 56); + if (!reader) { + stream.seek(location); + throw new zip.Error('Zip64 end of central directory not found.'); + } + } + } else { + reader = seek('PK\x06\x06', 56); + if (!reader) { + stream.seek(location); + if (search) { + throw new zip.Error('Zip end of central directory not found.'); + } + return null; + } + } + if (reader) { + position = stream.position; + reader.skip(4); + reader.recordSize = reader.uint64(); + reader.version = reader.uint16(); + reader.minVersion = reader.uint16(); + reader.disks = reader.uint32(); + reader.startDisk = reader.uint32(); + header.diskRecords = reader.uint64(); + header.totalRecords = reader.uint64(); + header.size = reader.uint64(); + header.offset = reader.uint64(); + if (header.offset === undefined) { + stream.seek(location); + throw new zip.Error('Zip 64-bit central directory offset not supported.'); + } + } + position -= header.size; + if (position < 0 || position > stream.length) { + stream.seek(location); + throw new zip.Error('Invalid Zip central directory size.'); + } + if (position < header.offset) { + stream.seek(location); + throw new zip.Error('Invalid Zip central directory offset.'); + } + stream.seek(position); + position -= header.offset; + const archive = new zip.Archive(stream, position); + stream.seek(location); + return archive; + } + } + return null; + } + + constructor(stream, offset) { + offset = offset || 0; + this._entries = new Map(); + const headers = []; + const signature = Array.from('PK\x01\x02', (c) => c.charCodeAt(0)); + while (stream.position + 4 < stream.length && stream.read(4).every((value, index) => value === signature[index])) { + const header = {}; + const reader = new zip.BinaryReader(stream.read(42)); + reader.uint16(); // version made by + reader.skip(2); // version needed to extract + const flags = reader.uint16(); + if ((flags & 1) == 1) { + throw new zip.Error('Encrypted Zip entries not supported.'); + } + header.encoding = flags & 0x800 ? 'utf-8' : 'ascii'; + header.compressionMethod = reader.uint16(); + header.date = reader.uint32(); // date + header.crc32 = reader.uint32(); // crc32 + header.compressedSize = reader.uint32(); + header.size = reader.uint32(); + header.nameLength = reader.uint16(); // file name length + const extraDataLength = reader.uint16(); + const commentLength = reader.uint16(); + header.disk = reader.uint16(); // disk number start + reader.uint16(); // internal file attributes + reader.uint32(); // external file attributes + header.localHeaderOffset = reader.uint32(); + const nameBuffer = stream.read(header.nameLength); + const decoder = new TextDecoder(header.encoding); + header.name = decoder.decode(nameBuffer); + const extraData = stream.read(extraDataLength); + if (extraData.length > 0) { + const reader = new zip.BinaryReader(extraData); + while (reader.position < reader.length) { + const type = reader.uint16(); + const length = reader.uint16(); + switch (type) { + case 0x0001: + if (header.size === 0xffffffff) { + header.size = reader.uint64(); + if (header.size === undefined) { + throw new zip.Error('Zip 64-bit size not supported.'); + } + } + if (header.compressedSize === 0xffffffff) { + header.compressedSize = reader.uint64(); + if (header.compressedSize === undefined) { + throw new zip.Error('Zip 64-bit compressed size not supported.'); + } + } + if (header.localHeaderOffset === 0xffffffff) { + header.localHeaderOffset = reader.uint64(); + if (header.localHeaderOffset === undefined) { + throw new zip.Error('Zip 64-bit offset not supported.'); + } + } + if (header.disk === 0xffff) { + header.disk = reader.uint32(); + } + break; + default: + reader.skip(length); + break; + } + } + } + stream.read(commentLength); // comment + headers.push(header); + } + for (const header of headers) { + if (header.size === 0 && header.name.endsWith('/')) { + continue; + } + const entry = new zip.Entry(stream, header, offset); + this._entries.set(entry.name, entry.stream); + } + } + + get entries() { + return this._entries; + } +}; + +zip.Entry = class { + + constructor(stream, header, offset) { + offset = offset || 0; + this._name = header.name; + stream.seek(offset + header.localHeaderOffset); + const signature = Array.from('PK\x03\x04', (c) => c.charCodeAt(0)); + if (stream.position + 4 > stream.length || !stream.read(4).every((value, index) => value === signature[index])) { + this._stream = new zip.ErrorStream(header.size, 'Invalid Zip local file header signature.'); + } else { + const reader = new zip.BinaryReader(stream.read(26)); + reader.skip(22); + header.nameLength = reader.uint16(); + const extraDataLength = reader.uint16(); + header.nameBuffer = stream.read(header.nameLength); + stream.skip(extraDataLength); + const decoder = new TextDecoder(header.encoding); + this._name = decoder.decode(header.nameBuffer); + this._stream = stream.stream(header.compressedSize); + switch (header.compressionMethod) { + case 0: { // stored + if (header.size !== header.compressedSize) { + this._stream = new zip.ErrorStream(header.size, 'Invalid compression size.'); + } + break; + } + case 8: { // deflate + this._stream = new zip.InflaterStream(this._stream, header.size); + break; + } + default: { + this._stream = new new zip.ErrorStream(header.size, 'Invalid compression method.'); + } + } + } + } + + get name() { + return this._name; + } + + get stream() { + return this._stream; + } +}; + +zip.Inflater = class { + + inflateRaw(data, length) { + let buffer = null; + if (zip.zlib) { + buffer = zip.zlib.inflateRawSync(data); + } else { + const reader = new zip.BitReader(data); + const writer = length === undefined ? new zip.BlockWriter() : new zip.BufferWriter(length); + if (!zip.Inflater._staticLengthTree) { + zip.Inflater._codeLengths = new Uint8Array(19); + zip.Inflater._codeOrder = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; + zip.Inflater._lengthBase = [ 24, 32, 40, 48, 56, 64, 72, 80, 89, 105, 121, 137, 154, 186, 218, 250, 283, 347, 411, 475, 540, 668, 796, 924, 1053, 1309, 1565, 1821, 2064, 7992, 7992, 7992 ]; + zip.Inflater._distanceBase = [ 16, 32, 48, 64, 81, 113, 146, 210, 275, 403, 532, 788, 1045, 1557, 2070, 3094, 4119, 6167, 8216, 12312, 16409, 24601, 32794, 49178, 65563, 98331, 131100, 196636, 262173, 393245, 1048560, 1048560 ]; + } + let type; + do { + type = reader.bits(3); + switch (type >>> 1) { + case 0: { // uncompressed block + this._copyUncompressedBlock(reader, writer); + break; + } + case 1: { // block with fixed huffman trees + if (!zip.Inflater._staticLengthTree) { + zip.Inflater._staticLengthTree = zip.HuffmanTree.create(new Uint8Array([].concat.apply([], [[144, 8], [112, 9], [24, 7], [8, 8]].map((x) => [...Array(x[0])].map(() => x[1]))))); + zip.Inflater._staticDistanceTree = zip.HuffmanTree.create(new Uint8Array([...Array(32)].map(() => 5))); + } + this._lengthTree = zip.Inflater._staticLengthTree; + this._distanceTree = zip.Inflater._staticDistanceTree; + this._inflateBlock(reader, writer); + break; + } + case 2: { // block with dynamic huffman trees + this._decodeTrees(reader); + this._inflateBlock(reader, writer); + break; + } + default: { + throw new zip.Error('Unsupported block type.'); + } + } + } while ((type & 1) == 0); + if (length !== undefined && length !== writer.length) { + throw new zip.Error('Invalid uncompressed size.'); + } + buffer = writer.toBuffer(); + } + if (length !== undefined && length !== buffer.length) { + throw new zip.Error('Invalid uncompressed size.'); + } + return buffer; + } + + _copyUncompressedBlock(reader, writer) { + const length = reader.uint16(); + const inverseLength = reader.uint16(); + if (length !== (~inverseLength & 0xffff)) { + throw new zip.Error('Invalid uncompressed block length.'); + } + writer.write(reader.read(length)); + } + + _decodeTrees(reader) { + const hlit = reader.bits(5) + 257; + const hdist = reader.bits(5) + 1; + const hclen = reader.bits(4) + 4; + const codeLengths = zip.Inflater._codeLengths; + for (let i = 0; i < codeLengths.length; i++) { + codeLengths[i] = 0; + } + const codeOrder = zip.Inflater._codeOrder; + for (let i = 0; i < hclen; i++) { + codeLengths[codeOrder[i]] = reader.bits(3); + } + const codeTree = zip.HuffmanTree.create(codeLengths); + const codeMask = codeTree.length - 1; + const lengths = new Uint8Array(hlit + hdist); + let value = 0; + let length = 0; + for (let i = 0; i < hlit + hdist;) { + const code = codeTree[reader.bits16() & codeMask]; + reader.position += code & 0x0f; + const literal = code >>> 4; + switch (literal) { + case 16: length = reader.bits(2) + 3; break; + case 17: length = reader.bits(3) + 3; value = 0; break; + case 18: length = reader.bits(7) + 11; value = 0; break; + default: length = 1; value = literal; break; + } + for (; length > 0; length--) { + lengths[i++] = value; + } + } + this._lengthTree = zip.HuffmanTree.create(lengths.subarray(0, hlit)); + this._distanceTree = zip.HuffmanTree.create(lengths.subarray(hlit, hlit + hdist)); + } + + _inflateBlock(reader, writer) { + const lengthTree = this._lengthTree; + const distanceTree = this._distanceTree; + const lengthMask = lengthTree.length - 1; + const distanceMask = distanceTree.length - 1; + const buffer = writer.buffer; + const threshold = writer.threshold !== undefined ? writer.threshold : writer.length; + let position = writer.position; + for (;;) { + if (position > threshold) { + position = writer.push(position); + } + const code = lengthTree[reader.bits16() & lengthMask]; + reader.position += code & 0x0f; + const literal = code >>> 4; + if (literal < 256) { + buffer[position++] = literal; + } else if (literal === 256) { + writer.push(position); + return; + } else { + let length = literal - 254; + if (literal > 264) { + const lengthBase = zip.Inflater._lengthBase[literal - 257]; + length = (lengthBase >>> 3) + reader.bits(lengthBase & 0x07); + } + const code = distanceTree[reader.bits16() & distanceMask]; + reader.position += code & 0x0f; + const distanceBase = zip.Inflater._distanceBase[code >>> 4]; + const bits = distanceBase & 0x0f; + const distance = (distanceBase >>> 4) + (reader.bits16() & ((1 << bits) - 1)); + reader.position += bits; + let offset = position - distance; + for (let i = 0; i < length; i++) { + buffer[position++] = buffer[offset++]; + } + } + } + } +}; + +zip.HuffmanTree = class { + + static create(tree) { + const bits = Math.max.apply(null, tree); + // Algorithm from https://github.com/photopea/UZIP.js + let rev15 = zip.HuffmanTree._rev15; + if (!rev15) { + const length = 1 << 15; + rev15 = new Uint16Array(length); + for (let i = 0; i < length; i++) { + let x = i; + x = (((x & 0xaaaaaaaa) >>> 1) | ((x & 0x55555555) << 1)); + x = (((x & 0xcccccccc) >>> 2) | ((x & 0x33333333) << 2)); + x = (((x & 0xf0f0f0f0) >>> 4) | ((x & 0x0f0f0f0f) << 4)); + x = (((x & 0xff00ff00) >>> 8) | ((x & 0x00ff00ff) << 8)); + rev15[i] = (((x >>> 16) | (x << 16))) >>> 17; + } + zip.HuffmanTree._rev15 = rev15; + zip.HuffmanTree._bitLengthCounts = new Uint16Array(16); + zip.HuffmanTree._nextCodes = new Uint16Array(16); + } + const length = tree.length; + const bitLengthCounts = zip.HuffmanTree._bitLengthCounts; + for (let i = 0; i < 16; i++) { + bitLengthCounts[i] = 0; + } + for (let i = 0; i < length; i++) { + bitLengthCounts[tree[i]]++; + } + const nextCodes = zip.HuffmanTree._nextCodes; + let code = 0; + bitLengthCounts[0] = 0; + for (let i = 0; i < bits; i++) { + code = (code + bitLengthCounts[i]) << 1; + nextCodes[i + 1] = code; + } + const codes = new Uint16Array(length); + for (let i = 0; i < length; i++) { + const index = tree[i]; + if (index !== 0) { + codes[i] = nextCodes[index]; + nextCodes[index]++; + } + } + const shift = 15 - bits; + const table = new Uint16Array(1 << bits); + for (let i = 0; i < length; i++) { + const c = tree[i]; + if (c !== 0) { + const value = (i << 4) | c; + const rest = bits - c; + let index = codes[i] << rest; + const max = index + (1 << rest); + for (; index != max; index++) { + table[rev15[index] >>> shift] = value; + } + } + } + return table; + } +}; + +zip.BitReader = class { + + constructor(buffer) { + this.buffer = buffer; + this.position = 0; + } + + bits(count) { + const offset = Math.floor(this.position / 8); + const shift = (this.position & 7); + this.position += count; + return ((this.buffer[offset] | (this.buffer[offset + 1] << 8)) >>> shift) & ((1 << count) - 1); + } + + bits16() { + const offset = Math.floor(this.position / 8); + return ((this.buffer[offset] | (this.buffer[offset + 1] << 8) | (this.buffer[offset + 2] << 16)) >>> (this.position & 7)); + } + + read(length) { + const remainder = this.position & 7; + if (remainder !== 0) { + this.position += (8 - remainder); + } + const offset = Math.floor(this.position / 8); + this.position += length * 8; + return this.buffer.subarray(offset, offset + length); + } + + uint16() { + const remainder = this.position & 7; + if (remainder !== 0) { + this.position += (8 - remainder); + } + const offset = Math.floor(this.position / 8); + this.position += 16; + return this.buffer[offset] | (this.buffer[offset + 1] << 8); + } +}; + +zip.BlockWriter = class { + + constructor() { + this.blocks = []; + this.buffer = new Uint8Array(65536); + this.position = 0; + this.length = 0; + this.threshold = 0xf400; + } + + push(position) { + this.blocks.push(new Uint8Array(this.buffer.subarray(this.position, position))); + this.length += position - this.position; + this.position = position; + return this._reset(); + } + + write(buffer) { + this.blocks.push(buffer); + const length = buffer.length; + this.length += length; + if (length > 32768) { + this.buffer.set(buffer.subarray(length - 32768, length), 0); + this.position = 32768; + } else { + this._reset(); + this.buffer.set(buffer, this.position); + this.position += length; + } + } + + toBuffer() { + const buffer = new Uint8Array(this.length); + let offset = 0; + for (const block of this.blocks) { + buffer.set(block, offset); + offset += block.length; + } + return buffer; + } + + _reset() { + if (this.position > 32768) { + this.buffer.set(this.buffer.subarray(this.position - 32768, this.position), 0); + this.position = 32768; + } + return this.position; + } +}; + +zip.BufferWriter = class { + + constructor(length) { + this.buffer = new Uint8Array(length); + this.length = length; + this.position = 0; + } + + push(position) { + this.position = position; + if (this.position > this.length) { + throw new zip.Error('Invalid size.'); + } + return this.position; + } + + write(buffer) { + this.buffer.set(buffer, this.position); + this.position += buffer.length; + if (this.position > this.length) { + throw new zip.Error('Invalid size.'); + } + return this.position; + } + + toBuffer() { + return this.buffer; + } +}; + +zip.InflaterStream = class { + + constructor(stream, length) { + this._stream = stream; + this._offset = this._stream.position; + this._position = 0; + this._length = length; + } + + get position() { + return this._position; + } + + get length() { + if (this._length === undefined) { + this._inflate(); + } + return this._length; + } + + seek(position) { + if (this._buffer === undefined) { + this._inflate(); + } + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + if (this._buffer === undefined) { + this._inflate(); + } + this._position += offset; + } + + peek(length) { + const position = this._position; + length = length !== undefined ? length : this.length - position; + this.skip(length); + const end = this._position; + this.seek(position); + if (position === 0 && length === this.length) { + return this._buffer; + } + return this._buffer.subarray(position, end); + } + + read(length) { + const position = this._position; + length = length !== undefined ? length : this.length - position; + this.skip(length); + if (position === 0 && length === this.length) { + return this._buffer; + } + return this._buffer.subarray(position, this._position); + } + + stream(length) { + const buffer = this.read(length); + return new zip.BinaryReader(buffer); + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } + + _inflate() { + if (this._buffer === undefined) { + const position = this._stream.position; + this._stream.seek(this._offset); + const buffer = this._stream.peek(); + this._buffer = new zip.Inflater().inflateRaw(buffer, this._length); + this._length = this._buffer.length; + this._stream.seek(position); + delete this._stream; + } + } +}; + +zip.ErrorStream = class { + + constructor(size, message) { + this._message = message; + this._position = 0; + this._length = size; + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + this._position += offset; + } + + peek(/* length */) { + this._throw(); + } + + read(/* length */) { + this._throw(); + } + + stream(/* length */) { + this._throw(); + } + + byte() { + this._throw(); + } + + _throw() { + throw new zip.Error(this._message); + } +}; + +zip.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._length = buffer.length; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + create(buffer) { + return new zip.BinaryReader(buffer); + } + + stream(length) { + return this.create(this.read(length)); + } + + seek(position) { + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + this._position += offset; + } + + peek(length) { + if (this._position === 0 && length === undefined) { + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + const end = this._position; + this.seek(position); + return this._buffer.subarray(position, end); + } + + read(length) { + if (this._position === 0 && length === undefined) { + this._position = this._length; + return this._buffer; + } + const position = this._position; + this.skip(length !== undefined ? length : this._length - this._position); + return this._buffer.subarray(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } + + uint16() { + const position = this._position; + this.skip(2); + return this._view.getUint16(position, true); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._view.getUint32(position, true); + } + + uint64() { + const lo = this.uint32(); + const hi = this.uint32(); + if (hi > 0xffff) { + return undefined; + } + return hi * 4294967296 + lo; + } +}; + +zlib.Archive = class { + + constructor(stream) { + const position = stream.position; + stream.read(2); + this._entries = new Map([ [ '', new zip.InflaterStream(stream) ] ]); + stream.seek(position); + } + + get entries() { + return this._entries; + } +}; + +zip.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Zip Error'; + } +}; + +gzip.Archive = class { + + constructor(stream) { + const position = stream.position; + const signature = [ 0x1f, 0x8b ]; + if (stream.position + 2 > stream.length || + !stream.read(2).every((value, index) => value === signature[index])) { + throw new gzip.Error('Invalid gzip signature.'); + } + const string = () => { + let content = ''; + while (stream.position < stream.length) { + const value = stream.byte(); + if (value === 0x00) { + break; + } + content += String.fromCharCode(value); + } + return content; + }; + const reader = new zip.BinaryReader(stream.read(8)); + const compressionMethod = reader.byte(); + if (compressionMethod != 8) { + stream.seek(position); + throw new gzip.Error(`Invalid compression method '${compressionMethod}'.`); + } + const flags = reader.byte(); + reader.uint32(); // MTIME + reader.byte(); // XFL + reader.byte(); // OS + if ((flags & 4) != 0) { // FEXTRA + const xlen = stream.byte() | (stream.byte() << 8); + stream.skip(xlen); + } + const name = (flags & 8) != 0 ? string() : ''; // FNAME + if ((flags & 16) != 0) { // FCOMMENT + string(); + } + if ((flags & 1) != 0) { // FHCRC + stream.skip(2); + } + this._entries = new Map([ [ name, new gzip.InflaterStream(stream) ] ]); + stream.seek(position); + } + + get entries() { + return this._entries; + } +}; + +gzip.InflaterStream = class { + + constructor(stream) { + this._stream = stream.stream(stream.length - stream.position - 8); + const reader = new zip.BinaryReader(stream.read(8)); + reader.uint32(); // CRC32 + this._length = reader.uint32(); // ISIZE + this._position = 0; + } + + get position() { + return this._position; + } + + get length() { + return this._length; + } + + seek(position) { + if (this._buffer === undefined) { + this._inflate(); + } + this._position = position >= 0 ? position : this._length + position; + } + + skip(offset) { + if (this._buffer === undefined) { + this._inflate(); + } + this._position += offset; + } + + stream(length) { + return new zip.BinaryReader(this.read(length)); + } + + peek(length) { + const position = this._position; + length = length !== undefined ? length : this._length - this._position; + this.skip(length); + const end = this._position; + this.seek(position); + if (position === 0 && length === this._length) { + return this._buffer; + } + return this._buffer.subarray(position, end); + } + + read(length) { + const position = this._position; + length = length !== undefined ? length : this._length - this._position; + this.skip(length); + if (position === 0 && length === this._length) { + return this._buffer; + } + return this._buffer.subarray(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._buffer[position]; + } + + _inflate() { + if (this._buffer === undefined) { + const buffer = this._stream.peek(); + this._buffer = new zip.Inflater().inflateRaw(buffer, this._length); + delete this._stream; + } + } +}; + +gzip.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Gzip Error'; + } +}; + +export const Archive = zip.Archive;